1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally ported from the -rt patch by:
9 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code in the latency_tracer, that is:
12 *
13 * Copyright (C) 2004-2006 Ingo Molnar
14 * Copyright (C) 2004 Nadia Yvette Chambers
15 */
16
17#include <linux/stop_machine.h>
18#include <linux/clocksource.h>
19#include <linux/sched/task.h>
20#include <linux/kallsyms.h>
21#include <linux/seq_file.h>
22#include <linux/tracefs.h>
23#include <linux/hardirq.h>
24#include <linux/kthread.h>
25#include <linux/uaccess.h>
26#include <linux/bsearch.h>
27#include <linux/module.h>
28#include <linux/ftrace.h>
29#include <linux/sysctl.h>
30#include <linux/slab.h>
31#include <linux/ctype.h>
32#include <linux/sort.h>
33#include <linux/list.h>
34#include <linux/hash.h>
35#include <linux/rcupdate.h>
36
37#include <trace/events/sched.h>
38
39#include <asm/sections.h>
40#include <asm/setup.h>
41
42#include "ftrace_internal.h"
43#include "trace_output.h"
44#include "trace_stat.h"
45
46#define FTRACE_WARN_ON(cond) \
47 ({ \
48 int ___r = cond; \
49 if (WARN_ON(___r)) \
50 ftrace_kill(); \
51 ___r; \
52 })
53
54#define FTRACE_WARN_ON_ONCE(cond) \
55 ({ \
56 int ___r = cond; \
57 if (WARN_ON_ONCE(___r)) \
58 ftrace_kill(); \
59 ___r; \
60 })
61
62/* hash bits for specific function selection */
63#define FTRACE_HASH_BITS 7
64#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
65#define FTRACE_HASH_DEFAULT_BITS 10
66#define FTRACE_HASH_MAX_BITS 12
67
68#ifdef CONFIG_DYNAMIC_FTRACE
69#define INIT_OPS_HASH(opsname) \
70 .func_hash = &opsname.local_hash, \
71 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
72#define ASSIGN_OPS_HASH(opsname, val) \
73 .func_hash = val, \
74 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
75#else
76#define INIT_OPS_HASH(opsname)
77#define ASSIGN_OPS_HASH(opsname, val)
78#endif
79
80enum {
81 FTRACE_MODIFY_ENABLE_FL = (1 << 0),
82 FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1),
83};
84
85struct ftrace_ops ftrace_list_end __read_mostly = {
86 .func = ftrace_stub,
87 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
88 INIT_OPS_HASH(ftrace_list_end)
89};
90
91/* ftrace_enabled is a method to turn ftrace on or off */
92int ftrace_enabled __read_mostly;
93static int last_ftrace_enabled;
94
95/* Current function tracing op */
96struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
97/* What to set function_trace_op to */
98static struct ftrace_ops *set_function_trace_op;
99
100static bool ftrace_pids_enabled(struct ftrace_ops *ops)
101{
102 struct trace_array *tr;
103
104 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
105 return false;
106
107 tr = ops->private;
108
109 return tr->function_pids != NULL;
110}
111
112static void ftrace_update_trampoline(struct ftrace_ops *ops);
113
114/*
115 * ftrace_disabled is set when an anomaly is discovered.
116 * ftrace_disabled is much stronger than ftrace_enabled.
117 */
118static int ftrace_disabled __read_mostly;
119
120DEFINE_MUTEX(ftrace_lock);
121
122struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
123ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
124struct ftrace_ops global_ops;
125
126#if ARCH_SUPPORTS_FTRACE_OPS
127static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
128 struct ftrace_ops *op, struct pt_regs *regs);
129#else
130/* See comment below, where ftrace_ops_list_func is defined */
131static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
132#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
133#endif
134
135static inline void ftrace_ops_init(struct ftrace_ops *ops)
136{
137#ifdef CONFIG_DYNAMIC_FTRACE
138 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
139 mutex_init(&ops->local_hash.regex_lock);
140 ops->func_hash = &ops->local_hash;
141 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
142 }
143#endif
144}
145
146static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
147 struct ftrace_ops *op, struct pt_regs *regs)
148{
149 struct trace_array *tr = op->private;
150
151 if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
152 return;
153
154 op->saved_func(ip, parent_ip, op, regs);
155}
156
157static void ftrace_sync(struct work_struct *work)
158{
159 /*
160 * This function is just a stub to implement a hard force
161 * of synchronize_rcu(). This requires synchronizing
162 * tasks even in userspace and idle.
163 *
164 * Yes, function tracing is rude.
165 */
166}
167
168static void ftrace_sync_ipi(void *data)
169{
170 /* Probably not needed, but do it anyway */
171 smp_rmb();
172}
173
174static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
175{
176 /*
177 * If this is a dynamic, RCU, or per CPU ops, or we force list func,
178 * then it needs to call the list anyway.
179 */
180 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
181 FTRACE_FORCE_LIST_FUNC)
182 return ftrace_ops_list_func;
183
184 return ftrace_ops_get_func(ops);
185}
186
187static void update_ftrace_function(void)
188{
189 ftrace_func_t func;
190
191 /*
192 * Prepare the ftrace_ops that the arch callback will use.
193 * If there's only one ftrace_ops registered, the ftrace_ops_list
194 * will point to the ops we want.
195 */
196 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
197 lockdep_is_held(&ftrace_lock));
198
199 /* If there's no ftrace_ops registered, just call the stub function */
200 if (set_function_trace_op == &ftrace_list_end) {
201 func = ftrace_stub;
202
203 /*
204 * If we are at the end of the list and this ops is
205 * recursion safe and not dynamic and the arch supports passing ops,
206 * then have the mcount trampoline call the function directly.
207 */
208 } else if (rcu_dereference_protected(ftrace_ops_list->next,
209 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
210 func = ftrace_ops_get_list_func(ftrace_ops_list);
211
212 } else {
213 /* Just use the default ftrace_ops */
214 set_function_trace_op = &ftrace_list_end;
215 func = ftrace_ops_list_func;
216 }
217
218 update_function_graph_func();
219
220 /* If there's no change, then do nothing more here */
221 if (ftrace_trace_function == func)
222 return;
223
224 /*
225 * If we are using the list function, it doesn't care
226 * about the function_trace_ops.
227 */
228 if (func == ftrace_ops_list_func) {
229 ftrace_trace_function = func;
230 /*
231 * Don't even bother setting function_trace_ops,
232 * it would be racy to do so anyway.
233 */
234 return;
235 }
236
237#ifndef CONFIG_DYNAMIC_FTRACE
238 /*
239 * For static tracing, we need to be a bit more careful.
240 * The function change takes affect immediately. Thus,
241 * we need to coorditate the setting of the function_trace_ops
242 * with the setting of the ftrace_trace_function.
243 *
244 * Set the function to the list ops, which will call the
245 * function we want, albeit indirectly, but it handles the
246 * ftrace_ops and doesn't depend on function_trace_op.
247 */
248 ftrace_trace_function = ftrace_ops_list_func;
249 /*
250 * Make sure all CPUs see this. Yes this is slow, but static
251 * tracing is slow and nasty to have enabled.
252 */
253 schedule_on_each_cpu(ftrace_sync);
254 /* Now all cpus are using the list ops. */
255 function_trace_op = set_function_trace_op;
256 /* Make sure the function_trace_op is visible on all CPUs */
257 smp_wmb();
258 /* Nasty way to force a rmb on all cpus */
259 smp_call_function(ftrace_sync_ipi, NULL, 1);
260 /* OK, we are all set to update the ftrace_trace_function now! */
261#endif /* !CONFIG_DYNAMIC_FTRACE */
262
263 ftrace_trace_function = func;
264}
265
266static void add_ftrace_ops(struct ftrace_ops __rcu **list,
267 struct ftrace_ops *ops)
268{
269 rcu_assign_pointer(ops->next, *list);
270
271 /*
272 * We are entering ops into the list but another
273 * CPU might be walking that list. We need to make sure
274 * the ops->next pointer is valid before another CPU sees
275 * the ops pointer included into the list.
276 */
277 rcu_assign_pointer(*list, ops);
278}
279
280static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
281 struct ftrace_ops *ops)
282{
283 struct ftrace_ops **p;
284
285 /*
286 * If we are removing the last function, then simply point
287 * to the ftrace_stub.
288 */
289 if (rcu_dereference_protected(*list,
290 lockdep_is_held(&ftrace_lock)) == ops &&
291 rcu_dereference_protected(ops->next,
292 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
293 *list = &ftrace_list_end;
294 return 0;
295 }
296
297 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
298 if (*p == ops)
299 break;
300
301 if (*p != ops)
302 return -1;
303
304 *p = (*p)->next;
305 return 0;
306}
307
308static void ftrace_update_trampoline(struct ftrace_ops *ops);
309
310int __register_ftrace_function(struct ftrace_ops *ops)
311{
312 if (ops->flags & FTRACE_OPS_FL_DELETED)
313 return -EINVAL;
314
315 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
316 return -EBUSY;
317
318#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
319 /*
320 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
321 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
322 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
323 */
324 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
325 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
326 return -EINVAL;
327
328 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
329 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
330#endif
331
332 if (!core_kernel_data((unsigned long)ops))
333 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
334
335 add_ftrace_ops(&ftrace_ops_list, ops);
336
337 /* Always save the function, and reset at unregistering */
338 ops->saved_func = ops->func;
339
340 if (ftrace_pids_enabled(ops))
341 ops->func = ftrace_pid_func;
342
343 ftrace_update_trampoline(ops);
344
345 if (ftrace_enabled)
346 update_ftrace_function();
347
348 return 0;
349}
350
351int __unregister_ftrace_function(struct ftrace_ops *ops)
352{
353 int ret;
354
355 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
356 return -EBUSY;
357
358 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
359
360 if (ret < 0)
361 return ret;
362
363 if (ftrace_enabled)
364 update_ftrace_function();
365
366 ops->func = ops->saved_func;
367
368 return 0;
369}
370
371static void ftrace_update_pid_func(void)
372{
373 struct ftrace_ops *op;
374
375 /* Only do something if we are tracing something */
376 if (ftrace_trace_function == ftrace_stub)
377 return;
378
379 do_for_each_ftrace_op(op, ftrace_ops_list) {
380 if (op->flags & FTRACE_OPS_FL_PID) {
381 op->func = ftrace_pids_enabled(op) ?
382 ftrace_pid_func : op->saved_func;
383 ftrace_update_trampoline(op);
384 }
385 } while_for_each_ftrace_op(op);
386
387 update_ftrace_function();
388}
389
390#ifdef CONFIG_FUNCTION_PROFILER
391struct ftrace_profile {
392 struct hlist_node node;
393 unsigned long ip;
394 unsigned long counter;
395#ifdef CONFIG_FUNCTION_GRAPH_TRACER
396 unsigned long long time;
397 unsigned long long time_squared;
398#endif
399};
400
401struct ftrace_profile_page {
402 struct ftrace_profile_page *next;
403 unsigned long index;
404 struct ftrace_profile records[];
405};
406
407struct ftrace_profile_stat {
408 atomic_t disabled;
409 struct hlist_head *hash;
410 struct ftrace_profile_page *pages;
411 struct ftrace_profile_page *start;
412 struct tracer_stat stat;
413};
414
415#define PROFILE_RECORDS_SIZE \
416 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
417
418#define PROFILES_PER_PAGE \
419 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
420
421static int ftrace_profile_enabled __read_mostly;
422
423/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
424static DEFINE_MUTEX(ftrace_profile_lock);
425
426static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
427
428#define FTRACE_PROFILE_HASH_BITS 10
429#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
430
431static void *
432function_stat_next(void *v, int idx)
433{
434 struct ftrace_profile *rec = v;
435 struct ftrace_profile_page *pg;
436
437 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
438
439 again:
440 if (idx != 0)
441 rec++;
442
443 if ((void *)rec >= (void *)&pg->records[pg->index]) {
444 pg = pg->next;
445 if (!pg)
446 return NULL;
447 rec = &pg->records[0];
448 if (!rec->counter)
449 goto again;
450 }
451
452 return rec;
453}
454
455static void *function_stat_start(struct tracer_stat *trace)
456{
457 struct ftrace_profile_stat *stat =
458 container_of(trace, struct ftrace_profile_stat, stat);
459
460 if (!stat || !stat->start)
461 return NULL;
462
463 return function_stat_next(&stat->start->records[0], 0);
464}
465
466#ifdef CONFIG_FUNCTION_GRAPH_TRACER
467/* function graph compares on total time */
468static int function_stat_cmp(void *p1, void *p2)
469{
470 struct ftrace_profile *a = p1;
471 struct ftrace_profile *b = p2;
472
473 if (a->time < b->time)
474 return -1;
475 if (a->time > b->time)
476 return 1;
477 else
478 return 0;
479}
480#else
481/* not function graph compares against hits */
482static int function_stat_cmp(void *p1, void *p2)
483{
484 struct ftrace_profile *a = p1;
485 struct ftrace_profile *b = p2;
486
487 if (a->counter < b->counter)
488 return -1;
489 if (a->counter > b->counter)
490 return 1;
491 else
492 return 0;
493}
494#endif
495
496static int function_stat_headers(struct seq_file *m)
497{
498#ifdef CONFIG_FUNCTION_GRAPH_TRACER
499 seq_puts(m, " Function "
500 "Hit Time Avg s^2\n"
501 " -------- "
502 "--- ---- --- ---\n");
503#else
504 seq_puts(m, " Function Hit\n"
505 " -------- ---\n");
506#endif
507 return 0;
508}
509
510static int function_stat_show(struct seq_file *m, void *v)
511{
512 struct ftrace_profile *rec = v;
513 char str[KSYM_SYMBOL_LEN];
514 int ret = 0;
515#ifdef CONFIG_FUNCTION_GRAPH_TRACER
516 static struct trace_seq s;
517 unsigned long long avg;
518 unsigned long long stddev;
519#endif
520 mutex_lock(&ftrace_profile_lock);
521
522 /* we raced with function_profile_reset() */
523 if (unlikely(rec->counter == 0)) {
524 ret = -EBUSY;
525 goto out;
526 }
527
528#ifdef CONFIG_FUNCTION_GRAPH_TRACER
529 avg = rec->time;
530 do_div(avg, rec->counter);
531 if (tracing_thresh && (avg < tracing_thresh))
532 goto out;
533#endif
534
535 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
536 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
537
538#ifdef CONFIG_FUNCTION_GRAPH_TRACER
539 seq_puts(m, " ");
540
541 /* Sample standard deviation (s^2) */
542 if (rec->counter <= 1)
543 stddev = 0;
544 else {
545 /*
546 * Apply Welford's method:
547 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
548 */
549 stddev = rec->counter * rec->time_squared -
550 rec->time * rec->time;
551
552 /*
553 * Divide only 1000 for ns^2 -> us^2 conversion.
554 * trace_print_graph_duration will divide 1000 again.
555 */
556 do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
557 }
558
559 trace_seq_init(&s);
560 trace_print_graph_duration(rec->time, &s);
561 trace_seq_puts(&s, " ");
562 trace_print_graph_duration(avg, &s);
563 trace_seq_puts(&s, " ");
564 trace_print_graph_duration(stddev, &s);
565 trace_print_seq(m, &s);
566#endif
567 seq_putc(m, '\n');
568out:
569 mutex_unlock(&ftrace_profile_lock);
570
571 return ret;
572}
573
574static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
575{
576 struct ftrace_profile_page *pg;
577
578 pg = stat->pages = stat->start;
579
580 while (pg) {
581 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
582 pg->index = 0;
583 pg = pg->next;
584 }
585
586 memset(stat->hash, 0,
587 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
588}
589
590int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
591{
592 struct ftrace_profile_page *pg;
593 int functions;
594 int pages;
595 int i;
596
597 /* If we already allocated, do nothing */
598 if (stat->pages)
599 return 0;
600
601 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
602 if (!stat->pages)
603 return -ENOMEM;
604
605#ifdef CONFIG_DYNAMIC_FTRACE
606 functions = ftrace_update_tot_cnt;
607#else
608 /*
609 * We do not know the number of functions that exist because
610 * dynamic tracing is what counts them. With past experience
611 * we have around 20K functions. That should be more than enough.
612 * It is highly unlikely we will execute every function in
613 * the kernel.
614 */
615 functions = 20000;
616#endif
617
618 pg = stat->start = stat->pages;
619
620 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
621
622 for (i = 1; i < pages; i++) {
623 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
624 if (!pg->next)
625 goto out_free;
626 pg = pg->next;
627 }
628
629 return 0;
630
631 out_free:
632 pg = stat->start;
633 while (pg) {
634 unsigned long tmp = (unsigned long)pg;
635
636 pg = pg->next;
637 free_page(tmp);
638 }
639
640 stat->pages = NULL;
641 stat->start = NULL;
642
643 return -ENOMEM;
644}
645
646static int ftrace_profile_init_cpu(int cpu)
647{
648 struct ftrace_profile_stat *stat;
649 int size;
650
651 stat = &per_cpu(ftrace_profile_stats, cpu);
652
653 if (stat->hash) {
654 /* If the profile is already created, simply reset it */
655 ftrace_profile_reset(stat);
656 return 0;
657 }
658
659 /*
660 * We are profiling all functions, but usually only a few thousand
661 * functions are hit. We'll make a hash of 1024 items.
662 */
663 size = FTRACE_PROFILE_HASH_SIZE;
664
665 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
666
667 if (!stat->hash)
668 return -ENOMEM;
669
670 /* Preallocate the function profiling pages */
671 if (ftrace_profile_pages_init(stat) < 0) {
672 kfree(stat->hash);
673 stat->hash = NULL;
674 return -ENOMEM;
675 }
676
677 return 0;
678}
679
680static int ftrace_profile_init(void)
681{
682 int cpu;
683 int ret = 0;
684
685 for_each_possible_cpu(cpu) {
686 ret = ftrace_profile_init_cpu(cpu);
687 if (ret)
688 break;
689 }
690
691 return ret;
692}
693
694/* interrupts must be disabled */
695static struct ftrace_profile *
696ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
697{
698 struct ftrace_profile *rec;
699 struct hlist_head *hhd;
700 unsigned long key;
701
702 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
703 hhd = &stat->hash[key];
704
705 if (hlist_empty(hhd))
706 return NULL;
707
708 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
709 if (rec->ip == ip)
710 return rec;
711 }
712
713 return NULL;
714}
715
716static void ftrace_add_profile(struct ftrace_profile_stat *stat,
717 struct ftrace_profile *rec)
718{
719 unsigned long key;
720
721 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
722 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
723}
724
725/*
726 * The memory is already allocated, this simply finds a new record to use.
727 */
728static struct ftrace_profile *
729ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
730{
731 struct ftrace_profile *rec = NULL;
732
733 /* prevent recursion (from NMIs) */
734 if (atomic_inc_return(&stat->disabled) != 1)
735 goto out;
736
737 /*
738 * Try to find the function again since an NMI
739 * could have added it
740 */
741 rec = ftrace_find_profiled_func(stat, ip);
742 if (rec)
743 goto out;
744
745 if (stat->pages->index == PROFILES_PER_PAGE) {
746 if (!stat->pages->next)
747 goto out;
748 stat->pages = stat->pages->next;
749 }
750
751 rec = &stat->pages->records[stat->pages->index++];
752 rec->ip = ip;
753 ftrace_add_profile(stat, rec);
754
755 out:
756 atomic_dec(&stat->disabled);
757
758 return rec;
759}
760
761static void
762function_profile_call(unsigned long ip, unsigned long parent_ip,
763 struct ftrace_ops *ops, struct pt_regs *regs)
764{
765 struct ftrace_profile_stat *stat;
766 struct ftrace_profile *rec;
767 unsigned long flags;
768
769 if (!ftrace_profile_enabled)
770 return;
771
772 local_irq_save(flags);
773
774 stat = this_cpu_ptr(&ftrace_profile_stats);
775 if (!stat->hash || !ftrace_profile_enabled)
776 goto out;
777
778 rec = ftrace_find_profiled_func(stat, ip);
779 if (!rec) {
780 rec = ftrace_profile_alloc(stat, ip);
781 if (!rec)
782 goto out;
783 }
784
785 rec->counter++;
786 out:
787 local_irq_restore(flags);
788}
789
790#ifdef CONFIG_FUNCTION_GRAPH_TRACER
791static bool fgraph_graph_time = true;
792
793void ftrace_graph_graph_time_control(bool enable)
794{
795 fgraph_graph_time = enable;
796}
797
798static int profile_graph_entry(struct ftrace_graph_ent *trace)
799{
800 struct ftrace_ret_stack *ret_stack;
801
802 function_profile_call(trace->func, 0, NULL, NULL);
803
804 /* If function graph is shutting down, ret_stack can be NULL */
805 if (!current->ret_stack)
806 return 0;
807
808 ret_stack = ftrace_graph_get_ret_stack(current, 0);
809 if (ret_stack)
810 ret_stack->subtime = 0;
811
812 return 1;
813}
814
815static void profile_graph_return(struct ftrace_graph_ret *trace)
816{
817 struct ftrace_ret_stack *ret_stack;
818 struct ftrace_profile_stat *stat;
819 unsigned long long calltime;
820 struct ftrace_profile *rec;
821 unsigned long flags;
822
823 local_irq_save(flags);
824 stat = this_cpu_ptr(&ftrace_profile_stats);
825 if (!stat->hash || !ftrace_profile_enabled)
826 goto out;
827
828 /* If the calltime was zero'd ignore it */
829 if (!trace->calltime)
830 goto out;
831
832 calltime = trace->rettime - trace->calltime;
833
834 if (!fgraph_graph_time) {
835
836 /* Append this call time to the parent time to subtract */
837 ret_stack = ftrace_graph_get_ret_stack(current, 1);
838 if (ret_stack)
839 ret_stack->subtime += calltime;
840
841 ret_stack = ftrace_graph_get_ret_stack(current, 0);
842 if (ret_stack && ret_stack->subtime < calltime)
843 calltime -= ret_stack->subtime;
844 else
845 calltime = 0;
846 }
847
848 rec = ftrace_find_profiled_func(stat, trace->func);
849 if (rec) {
850 rec->time += calltime;
851 rec->time_squared += calltime * calltime;
852 }
853
854 out:
855 local_irq_restore(flags);
856}
857
858static struct fgraph_ops fprofiler_ops = {
859 .entryfunc = &profile_graph_entry,
860 .retfunc = &profile_graph_return,
861};
862
863static int register_ftrace_profiler(void)
864{
865 return register_ftrace_graph(&fprofiler_ops);
866}
867
868static void unregister_ftrace_profiler(void)
869{
870 unregister_ftrace_graph(&fprofiler_ops);
871}
872#else
873static struct ftrace_ops ftrace_profile_ops __read_mostly = {
874 .func = function_profile_call,
875 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
876 INIT_OPS_HASH(ftrace_profile_ops)
877};
878
879static int register_ftrace_profiler(void)
880{
881 return register_ftrace_function(&ftrace_profile_ops);
882}
883
884static void unregister_ftrace_profiler(void)
885{
886 unregister_ftrace_function(&ftrace_profile_ops);
887}
888#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
889
890static ssize_t
891ftrace_profile_write(struct file *filp, const char __user *ubuf,
892 size_t cnt, loff_t *ppos)
893{
894 unsigned long val;
895 int ret;
896
897 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
898 if (ret)
899 return ret;
900
901 val = !!val;
902
903 mutex_lock(&ftrace_profile_lock);
904 if (ftrace_profile_enabled ^ val) {
905 if (val) {
906 ret = ftrace_profile_init();
907 if (ret < 0) {
908 cnt = ret;
909 goto out;
910 }
911
912 ret = register_ftrace_profiler();
913 if (ret < 0) {
914 cnt = ret;
915 goto out;
916 }
917 ftrace_profile_enabled = 1;
918 } else {
919 ftrace_profile_enabled = 0;
920 /*
921 * unregister_ftrace_profiler calls stop_machine
922 * so this acts like an synchronize_rcu.
923 */
924 unregister_ftrace_profiler();
925 }
926 }
927 out:
928 mutex_unlock(&ftrace_profile_lock);
929
930 *ppos += cnt;
931
932 return cnt;
933}
934
935static ssize_t
936ftrace_profile_read(struct file *filp, char __user *ubuf,
937 size_t cnt, loff_t *ppos)
938{
939 char buf[64]; /* big enough to hold a number */
940 int r;
941
942 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
943 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
944}
945
946static const struct file_operations ftrace_profile_fops = {
947 .open = tracing_open_generic,
948 .read = ftrace_profile_read,
949 .write = ftrace_profile_write,
950 .llseek = default_llseek,
951};
952
953/* used to initialize the real stat files */
954static struct tracer_stat function_stats __initdata = {
955 .name = "functions",
956 .stat_start = function_stat_start,
957 .stat_next = function_stat_next,
958 .stat_cmp = function_stat_cmp,
959 .stat_headers = function_stat_headers,
960 .stat_show = function_stat_show
961};
962
963static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
964{
965 struct ftrace_profile_stat *stat;
966 struct dentry *entry;
967 char *name;
968 int ret;
969 int cpu;
970
971 for_each_possible_cpu(cpu) {
972 stat = &per_cpu(ftrace_profile_stats, cpu);
973
974 name = kasprintf(GFP_KERNEL, "function%d", cpu);
975 if (!name) {
976 /*
977 * The files created are permanent, if something happens
978 * we still do not free memory.
979 */
980 WARN(1,
981 "Could not allocate stat file for cpu %d\n",
982 cpu);
983 return;
984 }
985 stat->stat = function_stats;
986 stat->stat.name = name;
987 ret = register_stat_tracer(&stat->stat);
988 if (ret) {
989 WARN(1,
990 "Could not register function stat for cpu %d\n",
991 cpu);
992 kfree(name);
993 return;
994 }
995 }
996
997 entry = tracefs_create_file("function_profile_enabled", 0644,
998 d_tracer, NULL, &ftrace_profile_fops);
999 if (!entry)
1000 pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
1001}
1002
1003#else /* CONFIG_FUNCTION_PROFILER */
1004static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1005{
1006}
1007#endif /* CONFIG_FUNCTION_PROFILER */
1008
1009#ifdef CONFIG_DYNAMIC_FTRACE
1010
1011static struct ftrace_ops *removed_ops;
1012
1013/*
1014 * Set when doing a global update, like enabling all recs or disabling them.
1015 * It is not set when just updating a single ftrace_ops.
1016 */
1017static bool update_all_ops;
1018
1019#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1020# error Dynamic ftrace depends on MCOUNT_RECORD
1021#endif
1022
1023struct ftrace_func_entry {
1024 struct hlist_node hlist;
1025 unsigned long ip;
1026};
1027
1028struct ftrace_func_probe {
1029 struct ftrace_probe_ops *probe_ops;
1030 struct ftrace_ops ops;
1031 struct trace_array *tr;
1032 struct list_head list;
1033 void *data;
1034 int ref;
1035};
1036
1037/*
1038 * We make these constant because no one should touch them,
1039 * but they are used as the default "empty hash", to avoid allocating
1040 * it all the time. These are in a read only section such that if
1041 * anyone does try to modify it, it will cause an exception.
1042 */
1043static const struct hlist_head empty_buckets[1];
1044static const struct ftrace_hash empty_hash = {
1045 .buckets = (struct hlist_head *)empty_buckets,
1046};
1047#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1048
1049struct ftrace_ops global_ops = {
1050 .func = ftrace_stub,
1051 .local_hash.notrace_hash = EMPTY_HASH,
1052 .local_hash.filter_hash = EMPTY_HASH,
1053 INIT_OPS_HASH(global_ops)
1054 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
1055 FTRACE_OPS_FL_INITIALIZED |
1056 FTRACE_OPS_FL_PID,
1057};
1058
1059/*
1060 * Used by the stack undwinder to know about dynamic ftrace trampolines.
1061 */
1062struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1063{
1064 struct ftrace_ops *op = NULL;
1065
1066 /*
1067 * Some of the ops may be dynamically allocated,
1068 * they are freed after a synchronize_rcu().
1069 */
1070 preempt_disable_notrace();
1071
1072 do_for_each_ftrace_op(op, ftrace_ops_list) {
1073 /*
1074 * This is to check for dynamically allocated trampolines.
1075 * Trampolines that are in kernel text will have
1076 * core_kernel_text() return true.
1077 */
1078 if (op->trampoline && op->trampoline_size)
1079 if (addr >= op->trampoline &&
1080 addr < op->trampoline + op->trampoline_size) {
1081 preempt_enable_notrace();
1082 return op;
1083 }
1084 } while_for_each_ftrace_op(op);
1085 preempt_enable_notrace();
1086
1087 return NULL;
1088}
1089
1090/*
1091 * This is used by __kernel_text_address() to return true if the
1092 * address is on a dynamically allocated trampoline that would
1093 * not return true for either core_kernel_text() or
1094 * is_module_text_address().
1095 */
1096bool is_ftrace_trampoline(unsigned long addr)
1097{
1098 return ftrace_ops_trampoline(addr) != NULL;
1099}
1100
1101struct ftrace_page {
1102 struct ftrace_page *next;
1103 struct dyn_ftrace *records;
1104 int index;
1105 int size;
1106};
1107
1108#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1109#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1110
1111/* estimate from running different kernels */
1112#define NR_TO_INIT 10000
1113
1114static struct ftrace_page *ftrace_pages_start;
1115static struct ftrace_page *ftrace_pages;
1116
1117static __always_inline unsigned long
1118ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1119{
1120 if (hash->size_bits > 0)
1121 return hash_long(ip, hash->size_bits);
1122
1123 return 0;
1124}
1125
1126/* Only use this function if ftrace_hash_empty() has already been tested */
1127static __always_inline struct ftrace_func_entry *
1128__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1129{
1130 unsigned long key;
1131 struct ftrace_func_entry *entry;
1132 struct hlist_head *hhd;
1133
1134 key = ftrace_hash_key(hash, ip);
1135 hhd = &hash->buckets[key];
1136
1137 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1138 if (entry->ip == ip)
1139 return entry;
1140 }
1141 return NULL;
1142}
1143
1144/**
1145 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1146 * @hash: The hash to look at
1147 * @ip: The instruction pointer to test
1148 *
1149 * Search a given @hash to see if a given instruction pointer (@ip)
1150 * exists in it.
1151 *
1152 * Returns the entry that holds the @ip if found. NULL otherwise.
1153 */
1154struct ftrace_func_entry *
1155ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1156{
1157 if (ftrace_hash_empty(hash))
1158 return NULL;
1159
1160 return __ftrace_lookup_ip(hash, ip);
1161}
1162
1163static void __add_hash_entry(struct ftrace_hash *hash,
1164 struct ftrace_func_entry *entry)
1165{
1166 struct hlist_head *hhd;
1167 unsigned long key;
1168
1169 key = ftrace_hash_key(hash, entry->ip);
1170 hhd = &hash->buckets[key];
1171 hlist_add_head(&entry->hlist, hhd);
1172 hash->count++;
1173}
1174
1175static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1176{
1177 struct ftrace_func_entry *entry;
1178
1179 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1180 if (!entry)
1181 return -ENOMEM;
1182
1183 entry->ip = ip;
1184 __add_hash_entry(hash, entry);
1185
1186 return 0;
1187}
1188
1189static void
1190free_hash_entry(struct ftrace_hash *hash,
1191 struct ftrace_func_entry *entry)
1192{
1193 hlist_del(&entry->hlist);
1194 kfree(entry);
1195 hash->count--;
1196}
1197
1198static void
1199remove_hash_entry(struct ftrace_hash *hash,
1200 struct ftrace_func_entry *entry)
1201{
1202 hlist_del_rcu(&entry->hlist);
1203 hash->count--;
1204}
1205
1206static void ftrace_hash_clear(struct ftrace_hash *hash)
1207{
1208 struct hlist_head *hhd;
1209 struct hlist_node *tn;
1210 struct ftrace_func_entry *entry;
1211 int size = 1 << hash->size_bits;
1212 int i;
1213
1214 if (!hash->count)
1215 return;
1216
1217 for (i = 0; i < size; i++) {
1218 hhd = &hash->buckets[i];
1219 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1220 free_hash_entry(hash, entry);
1221 }
1222 FTRACE_WARN_ON(hash->count);
1223}
1224
1225static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1226{
1227 list_del(&ftrace_mod->list);
1228 kfree(ftrace_mod->module);
1229 kfree(ftrace_mod->func);
1230 kfree(ftrace_mod);
1231}
1232
1233static void clear_ftrace_mod_list(struct list_head *head)
1234{
1235 struct ftrace_mod_load *p, *n;
1236
1237 /* stack tracer isn't supported yet */
1238 if (!head)
1239 return;
1240
1241 mutex_lock(&ftrace_lock);
1242 list_for_each_entry_safe(p, n, head, list)
1243 free_ftrace_mod(p);
1244 mutex_unlock(&ftrace_lock);
1245}
1246
1247static void free_ftrace_hash(struct ftrace_hash *hash)
1248{
1249 if (!hash || hash == EMPTY_HASH)
1250 return;
1251 ftrace_hash_clear(hash);
1252 kfree(hash->buckets);
1253 kfree(hash);
1254}
1255
1256static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1257{
1258 struct ftrace_hash *hash;
1259
1260 hash = container_of(rcu, struct ftrace_hash, rcu);
1261 free_ftrace_hash(hash);
1262}
1263
1264static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1265{
1266 if (!hash || hash == EMPTY_HASH)
1267 return;
1268 call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1269}
1270
1271void ftrace_free_filter(struct ftrace_ops *ops)
1272{
1273 ftrace_ops_init(ops);
1274 free_ftrace_hash(ops->func_hash->filter_hash);
1275 free_ftrace_hash(ops->func_hash->notrace_hash);
1276}
1277
1278static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1279{
1280 struct ftrace_hash *hash;
1281 int size;
1282
1283 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1284 if (!hash)
1285 return NULL;
1286
1287 size = 1 << size_bits;
1288 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1289
1290 if (!hash->buckets) {
1291 kfree(hash);
1292 return NULL;
1293 }
1294
1295 hash->size_bits = size_bits;
1296
1297 return hash;
1298}
1299
1300
1301static int ftrace_add_mod(struct trace_array *tr,
1302 const char *func, const char *module,
1303 int enable)
1304{
1305 struct ftrace_mod_load *ftrace_mod;
1306 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1307
1308 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1309 if (!ftrace_mod)
1310 return -ENOMEM;
1311
1312 ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1313 ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1314 ftrace_mod->enable = enable;
1315
1316 if (!ftrace_mod->func || !ftrace_mod->module)
1317 goto out_free;
1318
1319 list_add(&ftrace_mod->list, mod_head);
1320
1321 return 0;
1322
1323 out_free:
1324 free_ftrace_mod(ftrace_mod);
1325
1326 return -ENOMEM;
1327}
1328
1329static struct ftrace_hash *
1330alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1331{
1332 struct ftrace_func_entry *entry;
1333 struct ftrace_hash *new_hash;
1334 int size;
1335 int ret;
1336 int i;
1337
1338 new_hash = alloc_ftrace_hash(size_bits);
1339 if (!new_hash)
1340 return NULL;
1341
1342 if (hash)
1343 new_hash->flags = hash->flags;
1344
1345 /* Empty hash? */
1346 if (ftrace_hash_empty(hash))
1347 return new_hash;
1348
1349 size = 1 << hash->size_bits;
1350 for (i = 0; i < size; i++) {
1351 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1352 ret = add_hash_entry(new_hash, entry->ip);
1353 if (ret < 0)
1354 goto free_hash;
1355 }
1356 }
1357
1358 FTRACE_WARN_ON(new_hash->count != hash->count);
1359
1360 return new_hash;
1361
1362 free_hash:
1363 free_ftrace_hash(new_hash);
1364 return NULL;
1365}
1366
1367static void
1368ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1369static void
1370ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1371
1372static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1373 struct ftrace_hash *new_hash);
1374
1375static struct ftrace_hash *
1376__ftrace_hash_move(struct ftrace_hash *src)
1377{
1378 struct ftrace_func_entry *entry;
1379 struct hlist_node *tn;
1380 struct hlist_head *hhd;
1381 struct ftrace_hash *new_hash;
1382 int size = src->count;
1383 int bits = 0;
1384 int i;
1385
1386 /*
1387 * If the new source is empty, just return the empty_hash.
1388 */
1389 if (ftrace_hash_empty(src))
1390 return EMPTY_HASH;
1391
1392 /*
1393 * Make the hash size about 1/2 the # found
1394 */
1395 for (size /= 2; size; size >>= 1)
1396 bits++;
1397
1398 /* Don't allocate too much */
1399 if (bits > FTRACE_HASH_MAX_BITS)
1400 bits = FTRACE_HASH_MAX_BITS;
1401
1402 new_hash = alloc_ftrace_hash(bits);
1403 if (!new_hash)
1404 return NULL;
1405
1406 new_hash->flags = src->flags;
1407
1408 size = 1 << src->size_bits;
1409 for (i = 0; i < size; i++) {
1410 hhd = &src->buckets[i];
1411 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1412 remove_hash_entry(src, entry);
1413 __add_hash_entry(new_hash, entry);
1414 }
1415 }
1416
1417 return new_hash;
1418}
1419
1420static int
1421ftrace_hash_move(struct ftrace_ops *ops, int enable,
1422 struct ftrace_hash **dst, struct ftrace_hash *src)
1423{
1424 struct ftrace_hash *new_hash;
1425 int ret;
1426
1427 /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1428 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1429 return -EINVAL;
1430
1431 new_hash = __ftrace_hash_move(src);
1432 if (!new_hash)
1433 return -ENOMEM;
1434
1435 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1436 if (enable) {
1437 /* IPMODIFY should be updated only when filter_hash updating */
1438 ret = ftrace_hash_ipmodify_update(ops, new_hash);
1439 if (ret < 0) {
1440 free_ftrace_hash(new_hash);
1441 return ret;
1442 }
1443 }
1444
1445 /*
1446 * Remove the current set, update the hash and add
1447 * them back.
1448 */
1449 ftrace_hash_rec_disable_modify(ops, enable);
1450
1451 rcu_assign_pointer(*dst, new_hash);
1452
1453 ftrace_hash_rec_enable_modify(ops, enable);
1454
1455 return 0;
1456}
1457
1458static bool hash_contains_ip(unsigned long ip,
1459 struct ftrace_ops_hash *hash)
1460{
1461 /*
1462 * The function record is a match if it exists in the filter
1463 * hash and not in the notrace hash. Note, an emty hash is
1464 * considered a match for the filter hash, but an empty
1465 * notrace hash is considered not in the notrace hash.
1466 */
1467 return (ftrace_hash_empty(hash->filter_hash) ||
1468 __ftrace_lookup_ip(hash->filter_hash, ip)) &&
1469 (ftrace_hash_empty(hash->notrace_hash) ||
1470 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1471}
1472
1473/*
1474 * Test the hashes for this ops to see if we want to call
1475 * the ops->func or not.
1476 *
1477 * It's a match if the ip is in the ops->filter_hash or
1478 * the filter_hash does not exist or is empty,
1479 * AND
1480 * the ip is not in the ops->notrace_hash.
1481 *
1482 * This needs to be called with preemption disabled as
1483 * the hashes are freed with call_rcu().
1484 */
1485int
1486ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1487{
1488 struct ftrace_ops_hash hash;
1489 int ret;
1490
1491#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1492 /*
1493 * There's a small race when adding ops that the ftrace handler
1494 * that wants regs, may be called without them. We can not
1495 * allow that handler to be called if regs is NULL.
1496 */
1497 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1498 return 0;
1499#endif
1500
1501 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1502 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1503
1504 if (hash_contains_ip(ip, &hash))
1505 ret = 1;
1506 else
1507 ret = 0;
1508
1509 return ret;
1510}
1511
1512/*
1513 * This is a double for. Do not use 'break' to break out of the loop,
1514 * you must use a goto.
1515 */
1516#define do_for_each_ftrace_rec(pg, rec) \
1517 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1518 int _____i; \
1519 for (_____i = 0; _____i < pg->index; _____i++) { \
1520 rec = &pg->records[_____i];
1521
1522#define while_for_each_ftrace_rec() \
1523 } \
1524 }
1525
1526
1527static int ftrace_cmp_recs(const void *a, const void *b)
1528{
1529 const struct dyn_ftrace *key = a;
1530 const struct dyn_ftrace *rec = b;
1531
1532 if (key->flags < rec->ip)
1533 return -1;
1534 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1535 return 1;
1536 return 0;
1537}
1538
1539/**
1540 * ftrace_location_range - return the first address of a traced location
1541 * if it touches the given ip range
1542 * @start: start of range to search.
1543 * @end: end of range to search (inclusive). @end points to the last byte
1544 * to check.
1545 *
1546 * Returns rec->ip if the related ftrace location is a least partly within
1547 * the given address range. That is, the first address of the instruction
1548 * that is either a NOP or call to the function tracer. It checks the ftrace
1549 * internal tables to determine if the address belongs or not.
1550 */
1551unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1552{
1553 struct ftrace_page *pg;
1554 struct dyn_ftrace *rec;
1555 struct dyn_ftrace key;
1556
1557 key.ip = start;
1558 key.flags = end; /* overload flags, as it is unsigned long */
1559
1560 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1561 if (end < pg->records[0].ip ||
1562 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1563 continue;
1564 rec = bsearch(&key, pg->records, pg->index,
1565 sizeof(struct dyn_ftrace),
1566 ftrace_cmp_recs);
1567 if (rec)
1568 return rec->ip;
1569 }
1570
1571 return 0;
1572}
1573
1574/**
1575 * ftrace_location - return true if the ip giving is a traced location
1576 * @ip: the instruction pointer to check
1577 *
1578 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1579 * That is, the instruction that is either a NOP or call to
1580 * the function tracer. It checks the ftrace internal tables to
1581 * determine if the address belongs or not.
1582 */
1583unsigned long ftrace_location(unsigned long ip)
1584{
1585 return ftrace_location_range(ip, ip);
1586}
1587
1588/**
1589 * ftrace_text_reserved - return true if range contains an ftrace location
1590 * @start: start of range to search
1591 * @end: end of range to search (inclusive). @end points to the last byte to check.
1592 *
1593 * Returns 1 if @start and @end contains a ftrace location.
1594 * That is, the instruction that is either a NOP or call to
1595 * the function tracer. It checks the ftrace internal tables to
1596 * determine if the address belongs or not.
1597 */
1598int ftrace_text_reserved(const void *start, const void *end)
1599{
1600 unsigned long ret;
1601
1602 ret = ftrace_location_range((unsigned long)start,
1603 (unsigned long)end);
1604
1605 return (int)!!ret;
1606}
1607
1608/* Test if ops registered to this rec needs regs */
1609static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1610{
1611 struct ftrace_ops *ops;
1612 bool keep_regs = false;
1613
1614 for (ops = ftrace_ops_list;
1615 ops != &ftrace_list_end; ops = ops->next) {
1616 /* pass rec in as regs to have non-NULL val */
1617 if (ftrace_ops_test(ops, rec->ip, rec)) {
1618 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1619 keep_regs = true;
1620 break;
1621 }
1622 }
1623 }
1624
1625 return keep_regs;
1626}
1627
1628static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1629 int filter_hash,
1630 bool inc)
1631{
1632 struct ftrace_hash *hash;
1633 struct ftrace_hash *other_hash;
1634 struct ftrace_page *pg;
1635 struct dyn_ftrace *rec;
1636 bool update = false;
1637 int count = 0;
1638 int all = false;
1639
1640 /* Only update if the ops has been registered */
1641 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1642 return false;
1643
1644 /*
1645 * In the filter_hash case:
1646 * If the count is zero, we update all records.
1647 * Otherwise we just update the items in the hash.
1648 *
1649 * In the notrace_hash case:
1650 * We enable the update in the hash.
1651 * As disabling notrace means enabling the tracing,
1652 * and enabling notrace means disabling, the inc variable
1653 * gets inversed.
1654 */
1655 if (filter_hash) {
1656 hash = ops->func_hash->filter_hash;
1657 other_hash = ops->func_hash->notrace_hash;
1658 if (ftrace_hash_empty(hash))
1659 all = true;
1660 } else {
1661 inc = !inc;
1662 hash = ops->func_hash->notrace_hash;
1663 other_hash = ops->func_hash->filter_hash;
1664 /*
1665 * If the notrace hash has no items,
1666 * then there's nothing to do.
1667 */
1668 if (ftrace_hash_empty(hash))
1669 return false;
1670 }
1671
1672 do_for_each_ftrace_rec(pg, rec) {
1673 int in_other_hash = 0;
1674 int in_hash = 0;
1675 int match = 0;
1676
1677 if (rec->flags & FTRACE_FL_DISABLED)
1678 continue;
1679
1680 if (all) {
1681 /*
1682 * Only the filter_hash affects all records.
1683 * Update if the record is not in the notrace hash.
1684 */
1685 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1686 match = 1;
1687 } else {
1688 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1689 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1690
1691 /*
1692 * If filter_hash is set, we want to match all functions
1693 * that are in the hash but not in the other hash.
1694 *
1695 * If filter_hash is not set, then we are decrementing.
1696 * That means we match anything that is in the hash
1697 * and also in the other_hash. That is, we need to turn
1698 * off functions in the other hash because they are disabled
1699 * by this hash.
1700 */
1701 if (filter_hash && in_hash && !in_other_hash)
1702 match = 1;
1703 else if (!filter_hash && in_hash &&
1704 (in_other_hash || ftrace_hash_empty(other_hash)))
1705 match = 1;
1706 }
1707 if (!match)
1708 continue;
1709
1710 if (inc) {
1711 rec->flags++;
1712 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1713 return false;
1714
1715 /*
1716 * If there's only a single callback registered to a
1717 * function, and the ops has a trampoline registered
1718 * for it, then we can call it directly.
1719 */
1720 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1721 rec->flags |= FTRACE_FL_TRAMP;
1722 else
1723 /*
1724 * If we are adding another function callback
1725 * to this function, and the previous had a
1726 * custom trampoline in use, then we need to go
1727 * back to the default trampoline.
1728 */
1729 rec->flags &= ~FTRACE_FL_TRAMP;
1730
1731 /*
1732 * If any ops wants regs saved for this function
1733 * then all ops will get saved regs.
1734 */
1735 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1736 rec->flags |= FTRACE_FL_REGS;
1737 } else {
1738 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1739 return false;
1740 rec->flags--;
1741
1742 /*
1743 * If the rec had REGS enabled and the ops that is
1744 * being removed had REGS set, then see if there is
1745 * still any ops for this record that wants regs.
1746 * If not, we can stop recording them.
1747 */
1748 if (ftrace_rec_count(rec) > 0 &&
1749 rec->flags & FTRACE_FL_REGS &&
1750 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1751 if (!test_rec_ops_needs_regs(rec))
1752 rec->flags &= ~FTRACE_FL_REGS;
1753 }
1754
1755 /*
1756 * If the rec had TRAMP enabled, then it needs to
1757 * be cleared. As TRAMP can only be enabled iff
1758 * there is only a single ops attached to it.
1759 * In otherwords, always disable it on decrementing.
1760 * In the future, we may set it if rec count is
1761 * decremented to one, and the ops that is left
1762 * has a trampoline.
1763 */
1764 rec->flags &= ~FTRACE_FL_TRAMP;
1765
1766 /*
1767 * flags will be cleared in ftrace_check_record()
1768 * if rec count is zero.
1769 */
1770 }
1771 count++;
1772
1773 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1774 update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE;
1775
1776 /* Shortcut, if we handled all records, we are done. */
1777 if (!all && count == hash->count)
1778 return update;
1779 } while_for_each_ftrace_rec();
1780
1781 return update;
1782}
1783
1784static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1785 int filter_hash)
1786{
1787 return __ftrace_hash_rec_update(ops, filter_hash, 0);
1788}
1789
1790static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1791 int filter_hash)
1792{
1793 return __ftrace_hash_rec_update(ops, filter_hash, 1);
1794}
1795
1796static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1797 int filter_hash, int inc)
1798{
1799 struct ftrace_ops *op;
1800
1801 __ftrace_hash_rec_update(ops, filter_hash, inc);
1802
1803 if (ops->func_hash != &global_ops.local_hash)
1804 return;
1805
1806 /*
1807 * If the ops shares the global_ops hash, then we need to update
1808 * all ops that are enabled and use this hash.
1809 */
1810 do_for_each_ftrace_op(op, ftrace_ops_list) {
1811 /* Already done */
1812 if (op == ops)
1813 continue;
1814 if (op->func_hash == &global_ops.local_hash)
1815 __ftrace_hash_rec_update(op, filter_hash, inc);
1816 } while_for_each_ftrace_op(op);
1817}
1818
1819static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1820 int filter_hash)
1821{
1822 ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1823}
1824
1825static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1826 int filter_hash)
1827{
1828 ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1829}
1830
1831/*
1832 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1833 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1834 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1835 * Note that old_hash and new_hash has below meanings
1836 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1837 * - If the hash is EMPTY_HASH, it hits nothing
1838 * - Anything else hits the recs which match the hash entries.
1839 */
1840static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1841 struct ftrace_hash *old_hash,
1842 struct ftrace_hash *new_hash)
1843{
1844 struct ftrace_page *pg;
1845 struct dyn_ftrace *rec, *end = NULL;
1846 int in_old, in_new;
1847
1848 /* Only update if the ops has been registered */
1849 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1850 return 0;
1851
1852 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1853 return 0;
1854
1855 /*
1856 * Since the IPMODIFY is a very address sensitive action, we do not
1857 * allow ftrace_ops to set all functions to new hash.
1858 */
1859 if (!new_hash || !old_hash)
1860 return -EINVAL;
1861
1862 /* Update rec->flags */
1863 do_for_each_ftrace_rec(pg, rec) {
1864
1865 if (rec->flags & FTRACE_FL_DISABLED)
1866 continue;
1867
1868 /* We need to update only differences of filter_hash */
1869 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1870 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1871 if (in_old == in_new)
1872 continue;
1873
1874 if (in_new) {
1875 /* New entries must ensure no others are using it */
1876 if (rec->flags & FTRACE_FL_IPMODIFY)
1877 goto rollback;
1878 rec->flags |= FTRACE_FL_IPMODIFY;
1879 } else /* Removed entry */
1880 rec->flags &= ~FTRACE_FL_IPMODIFY;
1881 } while_for_each_ftrace_rec();
1882
1883 return 0;
1884
1885rollback:
1886 end = rec;
1887
1888 /* Roll back what we did above */
1889 do_for_each_ftrace_rec(pg, rec) {
1890
1891 if (rec->flags & FTRACE_FL_DISABLED)
1892 continue;
1893
1894 if (rec == end)
1895 goto err_out;
1896
1897 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1898 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1899 if (in_old == in_new)
1900 continue;
1901
1902 if (in_new)
1903 rec->flags &= ~FTRACE_FL_IPMODIFY;
1904 else
1905 rec->flags |= FTRACE_FL_IPMODIFY;
1906 } while_for_each_ftrace_rec();
1907
1908err_out:
1909 return -EBUSY;
1910}
1911
1912static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1913{
1914 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1915
1916 if (ftrace_hash_empty(hash))
1917 hash = NULL;
1918
1919 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1920}
1921
1922/* Disabling always succeeds */
1923static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1924{
1925 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1926
1927 if (ftrace_hash_empty(hash))
1928 hash = NULL;
1929
1930 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1931}
1932
1933static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1934 struct ftrace_hash *new_hash)
1935{
1936 struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
1937
1938 if (ftrace_hash_empty(old_hash))
1939 old_hash = NULL;
1940
1941 if (ftrace_hash_empty(new_hash))
1942 new_hash = NULL;
1943
1944 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1945}
1946
1947static void print_ip_ins(const char *fmt, const unsigned char *p)
1948{
1949 int i;
1950
1951 printk(KERN_CONT "%s", fmt);
1952
1953 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1954 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1955}
1956
1957static struct ftrace_ops *
1958ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1959static struct ftrace_ops *
1960ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1961
1962enum ftrace_bug_type ftrace_bug_type;
1963const void *ftrace_expected;
1964
1965static void print_bug_type(void)
1966{
1967 switch (ftrace_bug_type) {
1968 case FTRACE_BUG_UNKNOWN:
1969 break;
1970 case FTRACE_BUG_INIT:
1971 pr_info("Initializing ftrace call sites\n");
1972 break;
1973 case FTRACE_BUG_NOP:
1974 pr_info("Setting ftrace call site to NOP\n");
1975 break;
1976 case FTRACE_BUG_CALL:
1977 pr_info("Setting ftrace call site to call ftrace function\n");
1978 break;
1979 case FTRACE_BUG_UPDATE:
1980 pr_info("Updating ftrace call site to call a different ftrace function\n");
1981 break;
1982 }
1983}
1984
1985/**
1986 * ftrace_bug - report and shutdown function tracer
1987 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1988 * @rec: The record that failed
1989 *
1990 * The arch code that enables or disables the function tracing
1991 * can call ftrace_bug() when it has detected a problem in
1992 * modifying the code. @failed should be one of either:
1993 * EFAULT - if the problem happens on reading the @ip address
1994 * EINVAL - if what is read at @ip is not what was expected
1995 * EPERM - if the problem happens on writting to the @ip address
1996 */
1997void ftrace_bug(int failed, struct dyn_ftrace *rec)
1998{
1999 unsigned long ip = rec ? rec->ip : 0;
2000
2001 switch (failed) {
2002 case -EFAULT:
2003 FTRACE_WARN_ON_ONCE(1);
2004 pr_info("ftrace faulted on modifying ");
2005 print_ip_sym(ip);
2006 break;
2007 case -EINVAL:
2008 FTRACE_WARN_ON_ONCE(1);
2009 pr_info("ftrace failed to modify ");
2010 print_ip_sym(ip);
2011 print_ip_ins(" actual: ", (unsigned char *)ip);
2012 pr_cont("\n");
2013 if (ftrace_expected) {
2014 print_ip_ins(" expected: ", ftrace_expected);
2015 pr_cont("\n");
2016 }
2017 break;
2018 case -EPERM:
2019 FTRACE_WARN_ON_ONCE(1);
2020 pr_info("ftrace faulted on writing ");
2021 print_ip_sym(ip);
2022 break;
2023 default:
2024 FTRACE_WARN_ON_ONCE(1);
2025 pr_info("ftrace faulted on unknown error ");
2026 print_ip_sym(ip);
2027 }
2028 print_bug_type();
2029 if (rec) {
2030 struct ftrace_ops *ops = NULL;
2031
2032 pr_info("ftrace record flags: %lx\n", rec->flags);
2033 pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2034 rec->flags & FTRACE_FL_REGS ? " R" : " ");
2035 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2036 ops = ftrace_find_tramp_ops_any(rec);
2037 if (ops) {
2038 do {
2039 pr_cont("\ttramp: %pS (%pS)",
2040 (void *)ops->trampoline,
2041 (void *)ops->func);
2042 ops = ftrace_find_tramp_ops_next(rec, ops);
2043 } while (ops);
2044 } else
2045 pr_cont("\ttramp: ERROR!");
2046
2047 }
2048 ip = ftrace_get_addr_curr(rec);
2049 pr_cont("\n expected tramp: %lx\n", ip);
2050 }
2051}
2052
2053static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
2054{
2055 unsigned long flag = 0UL;
2056
2057 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2058
2059 if (rec->flags & FTRACE_FL_DISABLED)
2060 return FTRACE_UPDATE_IGNORE;
2061
2062 /*
2063 * If we are updating calls:
2064 *
2065 * If the record has a ref count, then we need to enable it
2066 * because someone is using it.
2067 *
2068 * Otherwise we make sure its disabled.
2069 *
2070 * If we are disabling calls, then disable all records that
2071 * are enabled.
2072 */
2073 if (enable && ftrace_rec_count(rec))
2074 flag = FTRACE_FL_ENABLED;
2075
2076 /*
2077 * If enabling and the REGS flag does not match the REGS_EN, or
2078 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2079 * this record. Set flags to fail the compare against ENABLED.
2080 */
2081 if (flag) {
2082 if (!(rec->flags & FTRACE_FL_REGS) !=
2083 !(rec->flags & FTRACE_FL_REGS_EN))
2084 flag |= FTRACE_FL_REGS;
2085
2086 if (!(rec->flags & FTRACE_FL_TRAMP) !=
2087 !(rec->flags & FTRACE_FL_TRAMP_EN))
2088 flag |= FTRACE_FL_TRAMP;
2089 }
2090
2091 /* If the state of this record hasn't changed, then do nothing */
2092 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2093 return FTRACE_UPDATE_IGNORE;
2094
2095 if (flag) {
2096 /* Save off if rec is being enabled (for return value) */
2097 flag ^= rec->flags & FTRACE_FL_ENABLED;
2098
2099 if (update) {
2100 rec->flags |= FTRACE_FL_ENABLED;
2101 if (flag & FTRACE_FL_REGS) {
2102 if (rec->flags & FTRACE_FL_REGS)
2103 rec->flags |= FTRACE_FL_REGS_EN;
2104 else
2105 rec->flags &= ~FTRACE_FL_REGS_EN;
2106 }
2107 if (flag & FTRACE_FL_TRAMP) {
2108 if (rec->flags & FTRACE_FL_TRAMP)
2109 rec->flags |= FTRACE_FL_TRAMP_EN;
2110 else
2111 rec->flags &= ~FTRACE_FL_TRAMP_EN;
2112 }
2113 }
2114
2115 /*
2116 * If this record is being updated from a nop, then
2117 * return UPDATE_MAKE_CALL.
2118 * Otherwise,
2119 * return UPDATE_MODIFY_CALL to tell the caller to convert
2120 * from the save regs, to a non-save regs function or
2121 * vice versa, or from a trampoline call.
2122 */
2123 if (flag & FTRACE_FL_ENABLED) {
2124 ftrace_bug_type = FTRACE_BUG_CALL;
2125 return FTRACE_UPDATE_MAKE_CALL;
2126 }
2127
2128 ftrace_bug_type = FTRACE_BUG_UPDATE;
2129 return FTRACE_UPDATE_MODIFY_CALL;
2130 }
2131
2132 if (update) {
2133 /* If there's no more users, clear all flags */
2134 if (!ftrace_rec_count(rec))
2135 rec->flags = 0;
2136 else
2137 /*
2138 * Just disable the record, but keep the ops TRAMP
2139 * and REGS states. The _EN flags must be disabled though.
2140 */
2141 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2142 FTRACE_FL_REGS_EN);
2143 }
2144
2145 ftrace_bug_type = FTRACE_BUG_NOP;
2146 return FTRACE_UPDATE_MAKE_NOP;
2147}
2148
2149/**
2150 * ftrace_update_record, set a record that now is tracing or not
2151 * @rec: the record to update
2152 * @enable: set to 1 if the record is tracing, zero to force disable
2153 *
2154 * The records that represent all functions that can be traced need
2155 * to be updated when tracing has been enabled.
2156 */
2157int ftrace_update_record(struct dyn_ftrace *rec, int enable)
2158{
2159 return ftrace_check_record(rec, enable, 1);
2160}
2161
2162/**
2163 * ftrace_test_record, check if the record has been enabled or not
2164 * @rec: the record to test
2165 * @enable: set to 1 to check if enabled, 0 if it is disabled
2166 *
2167 * The arch code may need to test if a record is already set to
2168 * tracing to determine how to modify the function code that it
2169 * represents.
2170 */
2171int ftrace_test_record(struct dyn_ftrace *rec, int enable)
2172{
2173 return ftrace_check_record(rec, enable, 0);
2174}
2175
2176static struct ftrace_ops *
2177ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2178{
2179 struct ftrace_ops *op;
2180 unsigned long ip = rec->ip;
2181
2182 do_for_each_ftrace_op(op, ftrace_ops_list) {
2183
2184 if (!op->trampoline)
2185 continue;
2186
2187 if (hash_contains_ip(ip, op->func_hash))
2188 return op;
2189 } while_for_each_ftrace_op(op);
2190
2191 return NULL;
2192}
2193
2194static struct ftrace_ops *
2195ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2196 struct ftrace_ops *op)
2197{
2198 unsigned long ip = rec->ip;
2199
2200 while_for_each_ftrace_op(op) {
2201
2202 if (!op->trampoline)
2203 continue;
2204
2205 if (hash_contains_ip(ip, op->func_hash))
2206 return op;
2207 }
2208
2209 return NULL;
2210}
2211
2212static struct ftrace_ops *
2213ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2214{
2215 struct ftrace_ops *op;
2216 unsigned long ip = rec->ip;
2217
2218 /*
2219 * Need to check removed ops first.
2220 * If they are being removed, and this rec has a tramp,
2221 * and this rec is in the ops list, then it would be the
2222 * one with the tramp.
2223 */
2224 if (removed_ops) {
2225 if (hash_contains_ip(ip, &removed_ops->old_hash))
2226 return removed_ops;
2227 }
2228
2229 /*
2230 * Need to find the current trampoline for a rec.
2231 * Now, a trampoline is only attached to a rec if there
2232 * was a single 'ops' attached to it. But this can be called
2233 * when we are adding another op to the rec or removing the
2234 * current one. Thus, if the op is being added, we can
2235 * ignore it because it hasn't attached itself to the rec
2236 * yet.
2237 *
2238 * If an ops is being modified (hooking to different functions)
2239 * then we don't care about the new functions that are being
2240 * added, just the old ones (that are probably being removed).
2241 *
2242 * If we are adding an ops to a function that already is using
2243 * a trampoline, it needs to be removed (trampolines are only
2244 * for single ops connected), then an ops that is not being
2245 * modified also needs to be checked.
2246 */
2247 do_for_each_ftrace_op(op, ftrace_ops_list) {
2248
2249 if (!op->trampoline)
2250 continue;
2251
2252 /*
2253 * If the ops is being added, it hasn't gotten to
2254 * the point to be removed from this tree yet.
2255 */
2256 if (op->flags & FTRACE_OPS_FL_ADDING)
2257 continue;
2258
2259
2260 /*
2261 * If the ops is being modified and is in the old
2262 * hash, then it is probably being removed from this
2263 * function.
2264 */
2265 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2266 hash_contains_ip(ip, &op->old_hash))
2267 return op;
2268 /*
2269 * If the ops is not being added or modified, and it's
2270 * in its normal filter hash, then this must be the one
2271 * we want!
2272 */
2273 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2274 hash_contains_ip(ip, op->func_hash))
2275 return op;
2276
2277 } while_for_each_ftrace_op(op);
2278
2279 return NULL;
2280}
2281
2282static struct ftrace_ops *
2283ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2284{
2285 struct ftrace_ops *op;
2286 unsigned long ip = rec->ip;
2287
2288 do_for_each_ftrace_op(op, ftrace_ops_list) {
2289 /* pass rec in as regs to have non-NULL val */
2290 if (hash_contains_ip(ip, op->func_hash))
2291 return op;
2292 } while_for_each_ftrace_op(op);
2293
2294 return NULL;
2295}
2296
2297/**
2298 * ftrace_get_addr_new - Get the call address to set to
2299 * @rec: The ftrace record descriptor
2300 *
2301 * If the record has the FTRACE_FL_REGS set, that means that it
2302 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2303 * is not not set, then it wants to convert to the normal callback.
2304 *
2305 * Returns the address of the trampoline to set to
2306 */
2307unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2308{
2309 struct ftrace_ops *ops;
2310
2311 /* Trampolines take precedence over regs */
2312 if (rec->flags & FTRACE_FL_TRAMP) {
2313 ops = ftrace_find_tramp_ops_new(rec);
2314 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2315 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2316 (void *)rec->ip, (void *)rec->ip, rec->flags);
2317 /* Ftrace is shutting down, return anything */
2318 return (unsigned long)FTRACE_ADDR;
2319 }
2320 return ops->trampoline;
2321 }
2322
2323 if (rec->flags & FTRACE_FL_REGS)
2324 return (unsigned long)FTRACE_REGS_ADDR;
2325 else
2326 return (unsigned long)FTRACE_ADDR;
2327}
2328
2329/**
2330 * ftrace_get_addr_curr - Get the call address that is already there
2331 * @rec: The ftrace record descriptor
2332 *
2333 * The FTRACE_FL_REGS_EN is set when the record already points to
2334 * a function that saves all the regs. Basically the '_EN' version
2335 * represents the current state of the function.
2336 *
2337 * Returns the address of the trampoline that is currently being called
2338 */
2339unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2340{
2341 struct ftrace_ops *ops;
2342
2343 /* Trampolines take precedence over regs */
2344 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2345 ops = ftrace_find_tramp_ops_curr(rec);
2346 if (FTRACE_WARN_ON(!ops)) {
2347 pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2348 (void *)rec->ip, (void *)rec->ip);
2349 /* Ftrace is shutting down, return anything */
2350 return (unsigned long)FTRACE_ADDR;
2351 }
2352 return ops->trampoline;
2353 }
2354
2355 if (rec->flags & FTRACE_FL_REGS_EN)
2356 return (unsigned long)FTRACE_REGS_ADDR;
2357 else
2358 return (unsigned long)FTRACE_ADDR;
2359}
2360
2361static int
2362__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2363{
2364 unsigned long ftrace_old_addr;
2365 unsigned long ftrace_addr;
2366 int ret;
2367
2368 ftrace_addr = ftrace_get_addr_new(rec);
2369
2370 /* This needs to be done before we call ftrace_update_record */
2371 ftrace_old_addr = ftrace_get_addr_curr(rec);
2372
2373 ret = ftrace_update_record(rec, enable);
2374
2375 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2376
2377 switch (ret) {
2378 case FTRACE_UPDATE_IGNORE:
2379 return 0;
2380
2381 case FTRACE_UPDATE_MAKE_CALL:
2382 ftrace_bug_type = FTRACE_BUG_CALL;
2383 return ftrace_make_call(rec, ftrace_addr);
2384
2385 case FTRACE_UPDATE_MAKE_NOP:
2386 ftrace_bug_type = FTRACE_BUG_NOP;
2387 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2388
2389 case FTRACE_UPDATE_MODIFY_CALL:
2390 ftrace_bug_type = FTRACE_BUG_UPDATE;
2391 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2392 }
2393
2394 return -1; /* unknow ftrace bug */
2395}
2396
2397void __weak ftrace_replace_code(int mod_flags)
2398{
2399 struct dyn_ftrace *rec;
2400 struct ftrace_page *pg;
2401 int enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2402 int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
2403 int failed;
2404
2405 if (unlikely(ftrace_disabled))
2406 return;
2407
2408 do_for_each_ftrace_rec(pg, rec) {
2409
2410 if (rec->flags & FTRACE_FL_DISABLED)
2411 continue;
2412
2413 failed = __ftrace_replace_code(rec, enable);
2414 if (failed) {
2415 ftrace_bug(failed, rec);
2416 /* Stop processing */
2417 return;
2418 }
2419 if (schedulable)
2420 cond_resched();
2421 } while_for_each_ftrace_rec();
2422}
2423
2424struct ftrace_rec_iter {
2425 struct ftrace_page *pg;
2426 int index;
2427};
2428
2429/**
2430 * ftrace_rec_iter_start, start up iterating over traced functions
2431 *
2432 * Returns an iterator handle that is used to iterate over all
2433 * the records that represent address locations where functions
2434 * are traced.
2435 *
2436 * May return NULL if no records are available.
2437 */
2438struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2439{
2440 /*
2441 * We only use a single iterator.
2442 * Protected by the ftrace_lock mutex.
2443 */
2444 static struct ftrace_rec_iter ftrace_rec_iter;
2445 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2446
2447 iter->pg = ftrace_pages_start;
2448 iter->index = 0;
2449
2450 /* Could have empty pages */
2451 while (iter->pg && !iter->pg->index)
2452 iter->pg = iter->pg->next;
2453
2454 if (!iter->pg)
2455 return NULL;
2456
2457 return iter;
2458}
2459
2460/**
2461 * ftrace_rec_iter_next, get the next record to process.
2462 * @iter: The handle to the iterator.
2463 *
2464 * Returns the next iterator after the given iterator @iter.
2465 */
2466struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2467{
2468 iter->index++;
2469
2470 if (iter->index >= iter->pg->index) {
2471 iter->pg = iter->pg->next;
2472 iter->index = 0;
2473
2474 /* Could have empty pages */
2475 while (iter->pg && !iter->pg->index)
2476 iter->pg = iter->pg->next;
2477 }
2478
2479 if (!iter->pg)
2480 return NULL;
2481
2482 return iter;
2483}
2484
2485/**
2486 * ftrace_rec_iter_record, get the record at the iterator location
2487 * @iter: The current iterator location
2488 *
2489 * Returns the record that the current @iter is at.
2490 */
2491struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2492{
2493 return &iter->pg->records[iter->index];
2494}
2495
2496static int
2497ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2498{
2499 int ret;
2500
2501 if (unlikely(ftrace_disabled))
2502 return 0;
2503
2504 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2505 if (ret) {
2506 ftrace_bug_type = FTRACE_BUG_INIT;
2507 ftrace_bug(ret, rec);
2508 return 0;
2509 }
2510 return 1;
2511}
2512
2513/*
2514 * archs can override this function if they must do something
2515 * before the modifying code is performed.
2516 */
2517int __weak ftrace_arch_code_modify_prepare(void)
2518{
2519 return 0;
2520}
2521
2522/*
2523 * archs can override this function if they must do something
2524 * after the modifying code is performed.
2525 */
2526int __weak ftrace_arch_code_modify_post_process(void)
2527{
2528 return 0;
2529}
2530
2531void ftrace_modify_all_code(int command)
2532{
2533 int update = command & FTRACE_UPDATE_TRACE_FUNC;
2534 int mod_flags = 0;
2535 int err = 0;
2536
2537 if (command & FTRACE_MAY_SLEEP)
2538 mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2539
2540 /*
2541 * If the ftrace_caller calls a ftrace_ops func directly,
2542 * we need to make sure that it only traces functions it
2543 * expects to trace. When doing the switch of functions,
2544 * we need to update to the ftrace_ops_list_func first
2545 * before the transition between old and new calls are set,
2546 * as the ftrace_ops_list_func will check the ops hashes
2547 * to make sure the ops are having the right functions
2548 * traced.
2549 */
2550 if (update) {
2551 err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2552 if (FTRACE_WARN_ON(err))
2553 return;
2554 }
2555
2556 if (command & FTRACE_UPDATE_CALLS)
2557 ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
2558 else if (command & FTRACE_DISABLE_CALLS)
2559 ftrace_replace_code(mod_flags);
2560
2561 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2562 function_trace_op = set_function_trace_op;
2563 smp_wmb();
2564 /* If irqs are disabled, we are in stop machine */
2565 if (!irqs_disabled())
2566 smp_call_function(ftrace_sync_ipi, NULL, 1);
2567 err = ftrace_update_ftrace_func(ftrace_trace_function);
2568 if (FTRACE_WARN_ON(err))
2569 return;
2570 }
2571
2572 if (command & FTRACE_START_FUNC_RET)
2573 err = ftrace_enable_ftrace_graph_caller();
2574 else if (command & FTRACE_STOP_FUNC_RET)
2575 err = ftrace_disable_ftrace_graph_caller();
2576 FTRACE_WARN_ON(err);
2577}
2578
2579static int __ftrace_modify_code(void *data)
2580{
2581 int *command = data;
2582
2583 ftrace_modify_all_code(*command);
2584
2585 return 0;
2586}
2587
2588/**
2589 * ftrace_run_stop_machine, go back to the stop machine method
2590 * @command: The command to tell ftrace what to do
2591 *
2592 * If an arch needs to fall back to the stop machine method, the
2593 * it can call this function.
2594 */
2595void ftrace_run_stop_machine(int command)
2596{
2597 stop_machine(__ftrace_modify_code, &command, NULL);
2598}
2599
2600/**
2601 * arch_ftrace_update_code, modify the code to trace or not trace
2602 * @command: The command that needs to be done
2603 *
2604 * Archs can override this function if it does not need to
2605 * run stop_machine() to modify code.
2606 */
2607void __weak arch_ftrace_update_code(int command)
2608{
2609 ftrace_run_stop_machine(command);
2610}
2611
2612static void ftrace_run_update_code(int command)
2613{
2614 int ret;
2615
2616 ret = ftrace_arch_code_modify_prepare();
2617 FTRACE_WARN_ON(ret);
2618 if (ret)
2619 return;
2620
2621 /*
2622 * By default we use stop_machine() to modify the code.
2623 * But archs can do what ever they want as long as it
2624 * is safe. The stop_machine() is the safest, but also
2625 * produces the most overhead.
2626 */
2627 arch_ftrace_update_code(command);
2628
2629 ret = ftrace_arch_code_modify_post_process();
2630 FTRACE_WARN_ON(ret);
2631}
2632
2633static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2634 struct ftrace_ops_hash *old_hash)
2635{
2636 ops->flags |= FTRACE_OPS_FL_MODIFYING;
2637 ops->old_hash.filter_hash = old_hash->filter_hash;
2638 ops->old_hash.notrace_hash = old_hash->notrace_hash;
2639 ftrace_run_update_code(command);
2640 ops->old_hash.filter_hash = NULL;
2641 ops->old_hash.notrace_hash = NULL;
2642 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2643}
2644
2645static ftrace_func_t saved_ftrace_func;
2646static int ftrace_start_up;
2647
2648void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2649{
2650}
2651
2652static void ftrace_startup_enable(int command)
2653{
2654 if (saved_ftrace_func != ftrace_trace_function) {
2655 saved_ftrace_func = ftrace_trace_function;
2656 command |= FTRACE_UPDATE_TRACE_FUNC;
2657 }
2658
2659 if (!command || !ftrace_enabled)
2660 return;
2661
2662 ftrace_run_update_code(command);
2663}
2664
2665static void ftrace_startup_all(int command)
2666{
2667 update_all_ops = true;
2668 ftrace_startup_enable(command);
2669 update_all_ops = false;
2670}
2671
2672int ftrace_startup(struct ftrace_ops *ops, int command)
2673{
2674 int ret;
2675
2676 if (unlikely(ftrace_disabled))
2677 return -ENODEV;
2678
2679 ret = __register_ftrace_function(ops);
2680 if (ret)
2681 return ret;
2682
2683 ftrace_start_up++;
2684
2685 /*
2686 * Note that ftrace probes uses this to start up
2687 * and modify functions it will probe. But we still
2688 * set the ADDING flag for modification, as probes
2689 * do not have trampolines. If they add them in the
2690 * future, then the probes will need to distinguish
2691 * between adding and updating probes.
2692 */
2693 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2694
2695 ret = ftrace_hash_ipmodify_enable(ops);
2696 if (ret < 0) {
2697 /* Rollback registration process */
2698 __unregister_ftrace_function(ops);
2699 ftrace_start_up--;
2700 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2701 return ret;
2702 }
2703
2704 if (ftrace_hash_rec_enable(ops, 1))
2705 command |= FTRACE_UPDATE_CALLS;
2706
2707 ftrace_startup_enable(command);
2708
2709 ops->flags &= ~FTRACE_OPS_FL_ADDING;
2710
2711 return 0;
2712}
2713
2714int ftrace_shutdown(struct ftrace_ops *ops, int command)
2715{
2716 int ret;
2717
2718 if (unlikely(ftrace_disabled))
2719 return -ENODEV;
2720
2721 ret = __unregister_ftrace_function(ops);
2722 if (ret)
2723 return ret;
2724
2725 ftrace_start_up--;
2726 /*
2727 * Just warn in case of unbalance, no need to kill ftrace, it's not
2728 * critical but the ftrace_call callers may be never nopped again after
2729 * further ftrace uses.
2730 */
2731 WARN_ON_ONCE(ftrace_start_up < 0);
2732
2733 /* Disabling ipmodify never fails */
2734 ftrace_hash_ipmodify_disable(ops);
2735
2736 if (ftrace_hash_rec_disable(ops, 1))
2737 command |= FTRACE_UPDATE_CALLS;
2738
2739 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2740
2741 if (saved_ftrace_func != ftrace_trace_function) {
2742 saved_ftrace_func = ftrace_trace_function;
2743 command |= FTRACE_UPDATE_TRACE_FUNC;
2744 }
2745
2746 if (!command || !ftrace_enabled) {
2747 /*
2748 * If these are dynamic or per_cpu ops, they still
2749 * need their data freed. Since, function tracing is
2750 * not currently active, we can just free them
2751 * without synchronizing all CPUs.
2752 */
2753 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2754 goto free_ops;
2755
2756 return 0;
2757 }
2758
2759 /*
2760 * If the ops uses a trampoline, then it needs to be
2761 * tested first on update.
2762 */
2763 ops->flags |= FTRACE_OPS_FL_REMOVING;
2764 removed_ops = ops;
2765
2766 /* The trampoline logic checks the old hashes */
2767 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2768 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2769
2770 ftrace_run_update_code(command);
2771
2772 /*
2773 * If there's no more ops registered with ftrace, run a
2774 * sanity check to make sure all rec flags are cleared.
2775 */
2776 if (rcu_dereference_protected(ftrace_ops_list,
2777 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
2778 struct ftrace_page *pg;
2779 struct dyn_ftrace *rec;
2780
2781 do_for_each_ftrace_rec(pg, rec) {
2782 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
2783 pr_warn(" %pS flags:%lx\n",
2784 (void *)rec->ip, rec->flags);
2785 } while_for_each_ftrace_rec();
2786 }
2787
2788 ops->old_hash.filter_hash = NULL;
2789 ops->old_hash.notrace_hash = NULL;
2790
2791 removed_ops = NULL;
2792 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2793
2794 /*
2795 * Dynamic ops may be freed, we must make sure that all
2796 * callers are done before leaving this function.
2797 * The same goes for freeing the per_cpu data of the per_cpu
2798 * ops.
2799 */
2800 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
2801 /*
2802 * We need to do a hard force of sched synchronization.
2803 * This is because we use preempt_disable() to do RCU, but
2804 * the function tracers can be called where RCU is not watching
2805 * (like before user_exit()). We can not rely on the RCU
2806 * infrastructure to do the synchronization, thus we must do it
2807 * ourselves.
2808 */
2809 schedule_on_each_cpu(ftrace_sync);
2810
2811 /*
2812 * When the kernel is preeptive, tasks can be preempted
2813 * while on a ftrace trampoline. Just scheduling a task on
2814 * a CPU is not good enough to flush them. Calling
2815 * synchornize_rcu_tasks() will wait for those tasks to
2816 * execute and either schedule voluntarily or enter user space.
2817 */
2818 if (IS_ENABLED(CONFIG_PREEMPT))
2819 synchronize_rcu_tasks();
2820
2821 free_ops:
2822 arch_ftrace_trampoline_free(ops);
2823 }
2824
2825 return 0;
2826}
2827
2828static void ftrace_startup_sysctl(void)
2829{
2830 int command;
2831
2832 if (unlikely(ftrace_disabled))
2833 return;
2834
2835 /* Force update next time */
2836 saved_ftrace_func = NULL;
2837 /* ftrace_start_up is true if we want ftrace running */
2838 if (ftrace_start_up) {
2839 command = FTRACE_UPDATE_CALLS;
2840 if (ftrace_graph_active)
2841 command |= FTRACE_START_FUNC_RET;
2842 ftrace_startup_enable(command);
2843 }
2844}
2845
2846static void ftrace_shutdown_sysctl(void)
2847{
2848 int command;
2849
2850 if (unlikely(ftrace_disabled))
2851 return;
2852
2853 /* ftrace_start_up is true if ftrace is running */
2854 if (ftrace_start_up) {
2855 command = FTRACE_DISABLE_CALLS;
2856 if (ftrace_graph_active)
2857 command |= FTRACE_STOP_FUNC_RET;
2858 ftrace_run_update_code(command);
2859 }
2860}
2861
2862static u64 ftrace_update_time;
2863unsigned long ftrace_update_tot_cnt;
2864
2865static inline int ops_traces_mod(struct ftrace_ops *ops)
2866{
2867 /*
2868 * Filter_hash being empty will default to trace module.
2869 * But notrace hash requires a test of individual module functions.
2870 */
2871 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2872 ftrace_hash_empty(ops->func_hash->notrace_hash);
2873}
2874
2875/*
2876 * Check if the current ops references the record.
2877 *
2878 * If the ops traces all functions, then it was already accounted for.
2879 * If the ops does not trace the current record function, skip it.
2880 * If the ops ignores the function via notrace filter, skip it.
2881 */
2882static inline bool
2883ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2884{
2885 /* If ops isn't enabled, ignore it */
2886 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2887 return false;
2888
2889 /* If ops traces all then it includes this function */
2890 if (ops_traces_mod(ops))
2891 return true;
2892
2893 /* The function must be in the filter */
2894 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2895 !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2896 return false;
2897
2898 /* If in notrace hash, we ignore it too */
2899 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2900 return false;
2901
2902 return true;
2903}
2904
2905static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2906{
2907 struct ftrace_page *pg;
2908 struct dyn_ftrace *p;
2909 u64 start, stop;
2910 unsigned long update_cnt = 0;
2911 unsigned long rec_flags = 0;
2912 int i;
2913
2914 start = ftrace_now(raw_smp_processor_id());
2915
2916 /*
2917 * When a module is loaded, this function is called to convert
2918 * the calls to mcount in its text to nops, and also to create
2919 * an entry in the ftrace data. Now, if ftrace is activated
2920 * after this call, but before the module sets its text to
2921 * read-only, the modification of enabling ftrace can fail if
2922 * the read-only is done while ftrace is converting the calls.
2923 * To prevent this, the module's records are set as disabled
2924 * and will be enabled after the call to set the module's text
2925 * to read-only.
2926 */
2927 if (mod)
2928 rec_flags |= FTRACE_FL_DISABLED;
2929
2930 for (pg = new_pgs; pg; pg = pg->next) {
2931
2932 for (i = 0; i < pg->index; i++) {
2933
2934 /* If something went wrong, bail without enabling anything */
2935 if (unlikely(ftrace_disabled))
2936 return -1;
2937
2938 p = &pg->records[i];
2939 p->flags = rec_flags;
2940
2941#ifndef CC_USING_NOP_MCOUNT
2942 /*
2943 * Do the initial record conversion from mcount jump
2944 * to the NOP instructions.
2945 */
2946 if (!ftrace_code_disable(mod, p))
2947 break;
2948#endif
2949
2950 update_cnt++;
2951 }
2952 }
2953
2954 stop = ftrace_now(raw_smp_processor_id());
2955 ftrace_update_time = stop - start;
2956 ftrace_update_tot_cnt += update_cnt;
2957
2958 return 0;
2959}
2960
2961static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2962{
2963 int order;
2964 int cnt;
2965
2966 if (WARN_ON(!count))
2967 return -EINVAL;
2968
2969 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2970
2971 /*
2972 * We want to fill as much as possible. No more than a page
2973 * may be empty.
2974 */
2975 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2976 order--;
2977
2978 again:
2979 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2980
2981 if (!pg->records) {
2982 /* if we can't allocate this size, try something smaller */
2983 if (!order)
2984 return -ENOMEM;
2985 order >>= 1;
2986 goto again;
2987 }
2988
2989 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2990 pg->size = cnt;
2991
2992 if (cnt > count)
2993 cnt = count;
2994
2995 return cnt;
2996}
2997
2998static struct ftrace_page *
2999ftrace_allocate_pages(unsigned long num_to_init)
3000{
3001 struct ftrace_page *start_pg;
3002 struct ftrace_page *pg;
3003 int order;
3004 int cnt;
3005
3006 if (!num_to_init)
3007 return 0;
3008
3009 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3010 if (!pg)
3011 return NULL;
3012
3013 /*
3014 * Try to allocate as much as possible in one continues
3015 * location that fills in all of the space. We want to
3016 * waste as little space as possible.
3017 */
3018 for (;;) {
3019 cnt = ftrace_allocate_records(pg, num_to_init);
3020 if (cnt < 0)
3021 goto free_pages;
3022
3023 num_to_init -= cnt;
3024 if (!num_to_init)
3025 break;
3026
3027 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3028 if (!pg->next)
3029 goto free_pages;
3030
3031 pg = pg->next;
3032 }
3033
3034 return start_pg;
3035
3036 free_pages:
3037 pg = start_pg;
3038 while (pg) {
3039 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3040 free_pages((unsigned long)pg->records, order);
3041 start_pg = pg->next;
3042 kfree(pg);
3043 pg = start_pg;
3044 }
3045 pr_info("ftrace: FAILED to allocate memory for functions\n");
3046 return NULL;
3047}
3048
3049#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3050
3051struct ftrace_iterator {
3052 loff_t pos;
3053 loff_t func_pos;
3054 loff_t mod_pos;
3055 struct ftrace_page *pg;
3056 struct dyn_ftrace *func;
3057 struct ftrace_func_probe *probe;
3058 struct ftrace_func_entry *probe_entry;
3059 struct trace_parser parser;
3060 struct ftrace_hash *hash;
3061 struct ftrace_ops *ops;
3062 struct trace_array *tr;
3063 struct list_head *mod_list;
3064 int pidx;
3065 int idx;
3066 unsigned flags;
3067};
3068
3069static void *
3070t_probe_next(struct seq_file *m, loff_t *pos)
3071{
3072 struct ftrace_iterator *iter = m->private;
3073 struct trace_array *tr = iter->ops->private;
3074 struct list_head *func_probes;
3075 struct ftrace_hash *hash;
3076 struct list_head *next;
3077 struct hlist_node *hnd = NULL;
3078 struct hlist_head *hhd;
3079 int size;
3080
3081 (*pos)++;
3082 iter->pos = *pos;
3083
3084 if (!tr)
3085 return NULL;
3086
3087 func_probes = &tr->func_probes;
3088 if (list_empty(func_probes))
3089 return NULL;
3090
3091 if (!iter->probe) {
3092 next = func_probes->next;
3093 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3094 }
3095
3096 if (iter->probe_entry)
3097 hnd = &iter->probe_entry->hlist;
3098
3099 hash = iter->probe->ops.func_hash->filter_hash;
3100 size = 1 << hash->size_bits;
3101
3102 retry:
3103 if (iter->pidx >= size) {
3104 if (iter->probe->list.next == func_probes)
3105 return NULL;
3106 next = iter->probe->list.next;
3107 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3108 hash = iter->probe->ops.func_hash->filter_hash;
3109 size = 1 << hash->size_bits;
3110 iter->pidx = 0;
3111 }
3112
3113 hhd = &hash->buckets[iter->pidx];
3114
3115 if (hlist_empty(hhd)) {
3116 iter->pidx++;
3117 hnd = NULL;
3118 goto retry;
3119 }
3120
3121 if (!hnd)
3122 hnd = hhd->first;
3123 else {
3124 hnd = hnd->next;
3125 if (!hnd) {
3126 iter->pidx++;
3127 goto retry;
3128 }
3129 }
3130
3131 if (WARN_ON_ONCE(!hnd))
3132 return NULL;
3133
3134 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3135
3136 return iter;
3137}
3138
3139static void *t_probe_start(struct seq_file *m, loff_t *pos)
3140{
3141 struct ftrace_iterator *iter = m->private;
3142 void *p = NULL;
3143 loff_t l;
3144
3145 if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3146 return NULL;
3147
3148 if (iter->mod_pos > *pos)
3149 return NULL;
3150
3151 iter->probe = NULL;
3152 iter->probe_entry = NULL;
3153 iter->pidx = 0;
3154 for (l = 0; l <= (*pos - iter->mod_pos); ) {
3155 p = t_probe_next(m, &l);
3156 if (!p)
3157 break;
3158 }
3159 if (!p)
3160 return NULL;
3161
3162 /* Only set this if we have an item */
3163 iter->flags |= FTRACE_ITER_PROBE;
3164
3165 return iter;
3166}
3167
3168static int
3169t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3170{
3171 struct ftrace_func_entry *probe_entry;
3172 struct ftrace_probe_ops *probe_ops;
3173 struct ftrace_func_probe *probe;
3174
3175 probe = iter->probe;
3176 probe_entry = iter->probe_entry;
3177
3178 if (WARN_ON_ONCE(!probe || !probe_entry))
3179 return -EIO;
3180
3181 probe_ops = probe->probe_ops;
3182
3183 if (probe_ops->print)
3184 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
3185
3186 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3187 (void *)probe_ops->func);
3188
3189 return 0;
3190}
3191
3192static void *
3193t_mod_next(struct seq_file *m, loff_t *pos)
3194{
3195 struct ftrace_iterator *iter = m->private;
3196 struct trace_array *tr = iter->tr;
3197
3198 (*pos)++;
3199 iter->pos = *pos;
3200
3201 iter->mod_list = iter->mod_list->next;
3202
3203 if (iter->mod_list == &tr->mod_trace ||
3204 iter->mod_list == &tr->mod_notrace) {
3205 iter->flags &= ~FTRACE_ITER_MOD;
3206 return NULL;
3207 }
3208
3209 iter->mod_pos = *pos;
3210
3211 return iter;
3212}
3213
3214static void *t_mod_start(struct seq_file *m, loff_t *pos)
3215{
3216 struct ftrace_iterator *iter = m->private;
3217 void *p = NULL;
3218 loff_t l;
3219
3220 if (iter->func_pos > *pos)
3221 return NULL;
3222
3223 iter->mod_pos = iter->func_pos;
3224
3225 /* probes are only available if tr is set */
3226 if (!iter->tr)
3227 return NULL;
3228
3229 for (l = 0; l <= (*pos - iter->func_pos); ) {
3230 p = t_mod_next(m, &l);
3231 if (!p)
3232 break;
3233 }
3234 if (!p) {
3235 iter->flags &= ~FTRACE_ITER_MOD;
3236 return t_probe_start(m, pos);
3237 }
3238
3239 /* Only set this if we have an item */
3240 iter->flags |= FTRACE_ITER_MOD;
3241
3242 return iter;
3243}
3244
3245static int
3246t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
3247{
3248 struct ftrace_mod_load *ftrace_mod;
3249 struct trace_array *tr = iter->tr;
3250
3251 if (WARN_ON_ONCE(!iter->mod_list) ||
3252 iter->mod_list == &tr->mod_trace ||
3253 iter->mod_list == &tr->mod_notrace)
3254 return -EIO;
3255
3256 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
3257
3258 if (ftrace_mod->func)
3259 seq_printf(m, "%s", ftrace_mod->func);
3260 else
3261 seq_putc(m, '*');
3262
3263 seq_printf(m, ":mod:%s\n", ftrace_mod->module);
3264
3265 return 0;
3266}
3267
3268static void *
3269t_func_next(struct seq_file *m, loff_t *pos)
3270{
3271 struct ftrace_iterator *iter = m->private;
3272 struct dyn_ftrace *rec = NULL;
3273
3274 (*pos)++;
3275
3276 retry:
3277 if (iter->idx >= iter->pg->index) {
3278 if (iter->pg->next) {
3279 iter->pg = iter->pg->next;
3280 iter->idx = 0;
3281 goto retry;
3282 }
3283 } else {
3284 rec = &iter->pg->records[iter->idx++];
3285 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3286 !ftrace_lookup_ip(iter->hash, rec->ip)) ||
3287
3288 ((iter->flags & FTRACE_ITER_ENABLED) &&
3289 !(rec->flags & FTRACE_FL_ENABLED))) {
3290
3291 rec = NULL;
3292 goto retry;
3293 }
3294 }
3295
3296 if (!rec)
3297 return NULL;
3298
3299 iter->pos = iter->func_pos = *pos;
3300 iter->func = rec;
3301
3302 return iter;
3303}
3304
3305static void *
3306t_next(struct seq_file *m, void *v, loff_t *pos)
3307{
3308 struct ftrace_iterator *iter = m->private;
3309 loff_t l = *pos; /* t_probe_start() must use original pos */
3310 void *ret;
3311
3312 if (unlikely(ftrace_disabled))
3313 return NULL;
3314
3315 if (iter->flags & FTRACE_ITER_PROBE)
3316 return t_probe_next(m, pos);
3317
3318 if (iter->flags & FTRACE_ITER_MOD)
3319 return t_mod_next(m, pos);
3320
3321 if (iter->flags & FTRACE_ITER_PRINTALL) {
3322 /* next must increment pos, and t_probe_start does not */
3323 (*pos)++;
3324 return t_mod_start(m, &l);
3325 }
3326
3327 ret = t_func_next(m, pos);
3328
3329 if (!ret)
3330 return t_mod_start(m, &l);
3331
3332 return ret;
3333}
3334
3335static void reset_iter_read(struct ftrace_iterator *iter)
3336{
3337 iter->pos = 0;
3338 iter->func_pos = 0;
3339 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
3340}
3341
3342static void *t_start(struct seq_file *m, loff_t *pos)
3343{
3344 struct ftrace_iterator *iter = m->private;
3345 void *p = NULL;
3346 loff_t l;
3347
3348 mutex_lock(&ftrace_lock);
3349
3350 if (unlikely(ftrace_disabled))
3351 return NULL;
3352
3353 /*
3354 * If an lseek was done, then reset and start from beginning.
3355 */
3356 if (*pos < iter->pos)
3357 reset_iter_read(iter);
3358
3359 /*
3360 * For set_ftrace_filter reading, if we have the filter
3361 * off, we can short cut and just print out that all
3362 * functions are enabled.
3363 */
3364 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3365 ftrace_hash_empty(iter->hash)) {
3366 iter->func_pos = 1; /* Account for the message */
3367 if (*pos > 0)
3368 return t_mod_start(m, pos);
3369 iter->flags |= FTRACE_ITER_PRINTALL;
3370 /* reset in case of seek/pread */
3371 iter->flags &= ~FTRACE_ITER_PROBE;
3372 return iter;
3373 }
3374
3375 if (iter->flags & FTRACE_ITER_MOD)
3376 return t_mod_start(m, pos);
3377
3378 /*
3379 * Unfortunately, we need to restart at ftrace_pages_start
3380 * every time we let go of the ftrace_mutex. This is because
3381 * those pointers can change without the lock.
3382 */
3383 iter->pg = ftrace_pages_start;
3384 iter->idx = 0;
3385 for (l = 0; l <= *pos; ) {
3386 p = t_func_next(m, &l);
3387 if (!p)
3388 break;
3389 }
3390
3391 if (!p)
3392 return t_mod_start(m, pos);
3393
3394 return iter;
3395}
3396
3397static void t_stop(struct seq_file *m, void *p)
3398{
3399 mutex_unlock(&ftrace_lock);
3400}
3401
3402void * __weak
3403arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3404{
3405 return NULL;
3406}
3407
3408static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3409 struct dyn_ftrace *rec)
3410{
3411 void *ptr;
3412
3413 ptr = arch_ftrace_trampoline_func(ops, rec);
3414 if (ptr)
3415 seq_printf(m, " ->%pS", ptr);
3416}
3417
3418static int t_show(struct seq_file *m, void *v)
3419{
3420 struct ftrace_iterator *iter = m->private;
3421 struct dyn_ftrace *rec;
3422
3423 if (iter->flags & FTRACE_ITER_PROBE)
3424 return t_probe_show(m, iter);
3425
3426 if (iter->flags & FTRACE_ITER_MOD)
3427 return t_mod_show(m, iter);
3428
3429 if (iter->flags & FTRACE_ITER_PRINTALL) {
3430 if (iter->flags & FTRACE_ITER_NOTRACE)
3431 seq_puts(m, "#### no functions disabled ####\n");
3432 else
3433 seq_puts(m, "#### all functions enabled ####\n");
3434 return 0;
3435 }
3436
3437 rec = iter->func;
3438
3439 if (!rec)
3440 return 0;
3441
3442 seq_printf(m, "%ps", (void *)rec->ip);
3443 if (iter->flags & FTRACE_ITER_ENABLED) {
3444 struct ftrace_ops *ops;
3445
3446 seq_printf(m, " (%ld)%s%s",
3447 ftrace_rec_count(rec),
3448 rec->flags & FTRACE_FL_REGS ? " R" : " ",
3449 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ");
3450 if (rec->flags & FTRACE_FL_TRAMP_EN) {
3451 ops = ftrace_find_tramp_ops_any(rec);
3452 if (ops) {
3453 do {
3454 seq_printf(m, "\ttramp: %pS (%pS)",
3455 (void *)ops->trampoline,
3456 (void *)ops->func);
3457 add_trampoline_func(m, ops, rec);
3458 ops = ftrace_find_tramp_ops_next(rec, ops);
3459 } while (ops);
3460 } else
3461 seq_puts(m, "\ttramp: ERROR!");
3462 } else {
3463 add_trampoline_func(m, NULL, rec);
3464 }
3465 }
3466
3467 seq_putc(m, '\n');
3468
3469 return 0;
3470}
3471
3472static const struct seq_operations show_ftrace_seq_ops = {
3473 .start = t_start,
3474 .next = t_next,
3475 .stop = t_stop,
3476 .show = t_show,
3477};
3478
3479static int
3480ftrace_avail_open(struct inode *inode, struct file *file)
3481{
3482 struct ftrace_iterator *iter;
3483
3484 if (unlikely(ftrace_disabled))
3485 return -ENODEV;
3486
3487 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3488 if (!iter)
3489 return -ENOMEM;
3490
3491 iter->pg = ftrace_pages_start;
3492 iter->ops = &global_ops;
3493
3494 return 0;
3495}
3496
3497static int
3498ftrace_enabled_open(struct inode *inode, struct file *file)
3499{
3500 struct ftrace_iterator *iter;
3501
3502 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3503 if (!iter)
3504 return -ENOMEM;
3505
3506 iter->pg = ftrace_pages_start;
3507 iter->flags = FTRACE_ITER_ENABLED;
3508 iter->ops = &global_ops;
3509
3510 return 0;
3511}
3512
3513/**
3514 * ftrace_regex_open - initialize function tracer filter files
3515 * @ops: The ftrace_ops that hold the hash filters
3516 * @flag: The type of filter to process
3517 * @inode: The inode, usually passed in to your open routine
3518 * @file: The file, usually passed in to your open routine
3519 *
3520 * ftrace_regex_open() initializes the filter files for the
3521 * @ops. Depending on @flag it may process the filter hash or
3522 * the notrace hash of @ops. With this called from the open
3523 * routine, you can use ftrace_filter_write() for the write
3524 * routine if @flag has FTRACE_ITER_FILTER set, or
3525 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3526 * tracing_lseek() should be used as the lseek routine, and
3527 * release must call ftrace_regex_release().
3528 */
3529int
3530ftrace_regex_open(struct ftrace_ops *ops, int flag,
3531 struct inode *inode, struct file *file)
3532{
3533 struct ftrace_iterator *iter;
3534 struct ftrace_hash *hash;
3535 struct list_head *mod_head;
3536 struct trace_array *tr = ops->private;
3537 int ret = 0;
3538
3539 ftrace_ops_init(ops);
3540
3541 if (unlikely(ftrace_disabled))
3542 return -ENODEV;
3543
3544 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3545 if (!iter)
3546 return -ENOMEM;
3547
3548 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3549 kfree(iter);
3550 return -ENOMEM;
3551 }
3552
3553 iter->ops = ops;
3554 iter->flags = flag;
3555 iter->tr = tr;
3556
3557 mutex_lock(&ops->func_hash->regex_lock);
3558
3559 if (flag & FTRACE_ITER_NOTRACE) {
3560 hash = ops->func_hash->notrace_hash;
3561 mod_head = tr ? &tr->mod_notrace : NULL;
3562 } else {
3563 hash = ops->func_hash->filter_hash;
3564 mod_head = tr ? &tr->mod_trace : NULL;
3565 }
3566
3567 iter->mod_list = mod_head;
3568
3569 if (file->f_mode & FMODE_WRITE) {
3570 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3571
3572 if (file->f_flags & O_TRUNC) {
3573 iter->hash = alloc_ftrace_hash(size_bits);
3574 clear_ftrace_mod_list(mod_head);
3575 } else {
3576 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3577 }
3578
3579 if (!iter->hash) {
3580 trace_parser_put(&iter->parser);
3581 kfree(iter);
3582 ret = -ENOMEM;
3583 goto out_unlock;
3584 }
3585 } else
3586 iter->hash = hash;
3587
3588 if (file->f_mode & FMODE_READ) {
3589 iter->pg = ftrace_pages_start;
3590
3591 ret = seq_open(file, &show_ftrace_seq_ops);
3592 if (!ret) {
3593 struct seq_file *m = file->private_data;
3594 m->private = iter;
3595 } else {
3596 /* Failed */
3597 free_ftrace_hash(iter->hash);
3598 trace_parser_put(&iter->parser);
3599 kfree(iter);
3600 }
3601 } else
3602 file->private_data = iter;
3603
3604 out_unlock:
3605 mutex_unlock(&ops->func_hash->regex_lock);
3606
3607 return ret;
3608}
3609
3610static int
3611ftrace_filter_open(struct inode *inode, struct file *file)
3612{
3613 struct ftrace_ops *ops = inode->i_private;
3614
3615 return ftrace_regex_open(ops,
3616 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
3617 inode, file);
3618}
3619
3620static int
3621ftrace_notrace_open(struct inode *inode, struct file *file)
3622{
3623 struct ftrace_ops *ops = inode->i_private;
3624
3625 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3626 inode, file);
3627}
3628
3629/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3630struct ftrace_glob {
3631 char *search;
3632 unsigned len;
3633 int type;
3634};
3635
3636/*
3637 * If symbols in an architecture don't correspond exactly to the user-visible
3638 * name of what they represent, it is possible to define this function to
3639 * perform the necessary adjustments.
3640*/
3641char * __weak arch_ftrace_match_adjust(char *str, const char *search)
3642{
3643 return str;
3644}
3645
3646static int ftrace_match(char *str, struct ftrace_glob *g)
3647{
3648 int matched = 0;
3649 int slen;
3650
3651 str = arch_ftrace_match_adjust(str, g->search);
3652
3653 switch (g->type) {
3654 case MATCH_FULL:
3655 if (strcmp(str, g->search) == 0)
3656 matched = 1;
3657 break;
3658 case MATCH_FRONT_ONLY:
3659 if (strncmp(str, g->search, g->len) == 0)
3660 matched = 1;
3661 break;
3662 case MATCH_MIDDLE_ONLY:
3663 if (strstr(str, g->search))
3664 matched = 1;
3665 break;
3666 case MATCH_END_ONLY:
3667 slen = strlen(str);
3668 if (slen >= g->len &&
3669 memcmp(str + slen - g->len, g->search, g->len) == 0)
3670 matched = 1;
3671 break;
3672 case MATCH_GLOB:
3673 if (glob_match(g->search, str))
3674 matched = 1;
3675 break;
3676 }
3677
3678 return matched;
3679}
3680
3681static int
3682enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
3683{
3684 struct ftrace_func_entry *entry;
3685 int ret = 0;
3686
3687 entry = ftrace_lookup_ip(hash, rec->ip);
3688 if (clear_filter) {
3689 /* Do nothing if it doesn't exist */
3690 if (!entry)
3691 return 0;
3692
3693 free_hash_entry(hash, entry);
3694 } else {
3695 /* Do nothing if it exists */
3696 if (entry)
3697 return 0;
3698
3699 ret = add_hash_entry(hash, rec->ip);
3700 }
3701 return ret;
3702}
3703
3704static int
3705add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
3706 int clear_filter)
3707{
3708 long index = simple_strtoul(func_g->search, NULL, 0);
3709 struct ftrace_page *pg;
3710 struct dyn_ftrace *rec;
3711
3712 /* The index starts at 1 */
3713 if (--index < 0)
3714 return 0;
3715
3716 do_for_each_ftrace_rec(pg, rec) {
3717 if (pg->index <= index) {
3718 index -= pg->index;
3719 /* this is a double loop, break goes to the next page */
3720 break;
3721 }
3722 rec = &pg->records[index];
3723 enter_record(hash, rec, clear_filter);
3724 return 1;
3725 } while_for_each_ftrace_rec();
3726 return 0;
3727}
3728
3729static int
3730ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
3731 struct ftrace_glob *mod_g, int exclude_mod)
3732{
3733 char str[KSYM_SYMBOL_LEN];
3734 char *modname;
3735
3736 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3737
3738 if (mod_g) {
3739 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
3740
3741 /* blank module name to match all modules */
3742 if (!mod_g->len) {
3743 /* blank module globbing: modname xor exclude_mod */
3744 if (!exclude_mod != !modname)
3745 goto func_match;
3746 return 0;
3747 }
3748
3749 /*
3750 * exclude_mod is set to trace everything but the given
3751 * module. If it is set and the module matches, then
3752 * return 0. If it is not set, and the module doesn't match
3753 * also return 0. Otherwise, check the function to see if
3754 * that matches.
3755 */
3756 if (!mod_matches == !exclude_mod)
3757 return 0;
3758func_match:
3759 /* blank search means to match all funcs in the mod */
3760 if (!func_g->len)
3761 return 1;
3762 }
3763
3764 return ftrace_match(str, func_g);
3765}
3766
3767static int
3768match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
3769{
3770 struct ftrace_page *pg;
3771 struct dyn_ftrace *rec;
3772 struct ftrace_glob func_g = { .type = MATCH_FULL };
3773 struct ftrace_glob mod_g = { .type = MATCH_FULL };
3774 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
3775 int exclude_mod = 0;
3776 int found = 0;
3777 int ret;
3778 int clear_filter = 0;
3779
3780 if (func) {
3781 func_g.type = filter_parse_regex(func, len, &func_g.search,
3782 &clear_filter);
3783 func_g.len = strlen(func_g.search);
3784 }
3785
3786 if (mod) {
3787 mod_g.type = filter_parse_regex(mod, strlen(mod),
3788 &mod_g.search, &exclude_mod);
3789 mod_g.len = strlen(mod_g.search);
3790 }
3791
3792 mutex_lock(&ftrace_lock);
3793
3794 if (unlikely(ftrace_disabled))
3795 goto out_unlock;
3796
3797 if (func_g.type == MATCH_INDEX) {
3798 found = add_rec_by_index(hash, &func_g, clear_filter);
3799 goto out_unlock;
3800 }
3801
3802 do_for_each_ftrace_rec(pg, rec) {
3803
3804 if (rec->flags & FTRACE_FL_DISABLED)
3805 continue;
3806
3807 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
3808 ret = enter_record(hash, rec, clear_filter);
3809 if (ret < 0) {
3810 found = ret;
3811 goto out_unlock;
3812 }
3813 found = 1;
3814 }
3815 } while_for_each_ftrace_rec();
3816 out_unlock:
3817 mutex_unlock(&ftrace_lock);
3818
3819 return found;
3820}
3821
3822static int
3823ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3824{
3825 return match_records(hash, buff, len, NULL);
3826}
3827
3828static void ftrace_ops_update_code(struct ftrace_ops *ops,
3829 struct ftrace_ops_hash *old_hash)
3830{
3831 struct ftrace_ops *op;
3832
3833 if (!ftrace_enabled)
3834 return;
3835
3836 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
3837 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
3838 return;
3839 }
3840
3841 /*
3842 * If this is the shared global_ops filter, then we need to
3843 * check if there is another ops that shares it, is enabled.
3844 * If so, we still need to run the modify code.
3845 */
3846 if (ops->func_hash != &global_ops.local_hash)
3847 return;
3848
3849 do_for_each_ftrace_op(op, ftrace_ops_list) {
3850 if (op->func_hash == &global_ops.local_hash &&
3851 op->flags & FTRACE_OPS_FL_ENABLED) {
3852 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
3853 /* Only need to do this once */
3854 return;
3855 }
3856 } while_for_each_ftrace_op(op);
3857}
3858
3859static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
3860 struct ftrace_hash **orig_hash,
3861 struct ftrace_hash *hash,
3862 int enable)
3863{
3864 struct ftrace_ops_hash old_hash_ops;
3865 struct ftrace_hash *old_hash;
3866 int ret;
3867
3868 old_hash = *orig_hash;
3869 old_hash_ops.filter_hash = ops->func_hash->filter_hash;
3870 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
3871 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3872 if (!ret) {
3873 ftrace_ops_update_code(ops, &old_hash_ops);
3874 free_ftrace_hash_rcu(old_hash);
3875 }
3876 return ret;
3877}
3878
3879static bool module_exists(const char *module)
3880{
3881 /* All modules have the symbol __this_module */
3882 const char this_mod[] = "__this_module";
3883 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
3884 unsigned long val;
3885 int n;
3886
3887 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
3888
3889 if (n > sizeof(modname) - 1)
3890 return false;
3891
3892 val = module_kallsyms_lookup_name(modname);
3893 return val != 0;
3894}
3895
3896static int cache_mod(struct trace_array *tr,
3897 const char *func, char *module, int enable)
3898{
3899 struct ftrace_mod_load *ftrace_mod, *n;
3900 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
3901 int ret;
3902
3903 mutex_lock(&ftrace_lock);
3904
3905 /* We do not cache inverse filters */
3906 if (func[0] == '!') {
3907 func++;
3908 ret = -EINVAL;
3909
3910 /* Look to remove this hash */
3911 list_for_each_entry_safe(ftrace_mod, n, head, list) {
3912 if (strcmp(ftrace_mod->module, module) != 0)
3913 continue;
3914
3915 /* no func matches all */
3916 if (strcmp(func, "*") == 0 ||
3917 (ftrace_mod->func &&
3918 strcmp(ftrace_mod->func, func) == 0)) {
3919 ret = 0;
3920 free_ftrace_mod(ftrace_mod);
3921 continue;
3922 }
3923 }
3924 goto out;
3925 }
3926
3927 ret = -EINVAL;
3928 /* We only care about modules that have not been loaded yet */
3929 if (module_exists(module))
3930 goto out;
3931
3932 /* Save this string off, and execute it when the module is loaded */
3933 ret = ftrace_add_mod(tr, func, module, enable);
3934 out:
3935 mutex_unlock(&ftrace_lock);
3936
3937 return ret;
3938}
3939
3940static int
3941ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3942 int reset, int enable);
3943
3944#ifdef CONFIG_MODULES
3945static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
3946 char *mod, bool enable)
3947{
3948 struct ftrace_mod_load *ftrace_mod, *n;
3949 struct ftrace_hash **orig_hash, *new_hash;
3950 LIST_HEAD(process_mods);
3951 char *func;
3952 int ret;
3953
3954 mutex_lock(&ops->func_hash->regex_lock);
3955
3956 if (enable)
3957 orig_hash = &ops->func_hash->filter_hash;
3958 else
3959 orig_hash = &ops->func_hash->notrace_hash;
3960
3961 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
3962 *orig_hash);
3963 if (!new_hash)
3964 goto out; /* warn? */
3965
3966 mutex_lock(&ftrace_lock);
3967
3968 list_for_each_entry_safe(ftrace_mod, n, head, list) {
3969
3970 if (strcmp(ftrace_mod->module, mod) != 0)
3971 continue;
3972
3973 if (ftrace_mod->func)
3974 func = kstrdup(ftrace_mod->func, GFP_KERNEL);
3975 else
3976 func = kstrdup("*", GFP_KERNEL);
3977
3978 if (!func) /* warn? */
3979 continue;
3980
3981 list_del(&ftrace_mod->list);
3982 list_add(&ftrace_mod->list, &process_mods);
3983
3984 /* Use the newly allocated func, as it may be "*" */
3985 kfree(ftrace_mod->func);
3986 ftrace_mod->func = func;
3987 }
3988
3989 mutex_unlock(&ftrace_lock);
3990
3991 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
3992
3993 func = ftrace_mod->func;
3994
3995 /* Grabs ftrace_lock, which is why we have this extra step */
3996 match_records(new_hash, func, strlen(func), mod);
3997 free_ftrace_mod(ftrace_mod);
3998 }
3999
4000 if (enable && list_empty(head))
4001 new_hash->flags &= ~FTRACE_HASH_FL_MOD;
4002
4003 mutex_lock(&ftrace_lock);
4004
4005 ret = ftrace_hash_move_and_update_ops(ops, orig_hash,
4006 new_hash, enable);
4007 mutex_unlock(&ftrace_lock);
4008
4009 out:
4010 mutex_unlock(&ops->func_hash->regex_lock);
4011
4012 free_ftrace_hash(new_hash);
4013}
4014
4015static void process_cached_mods(const char *mod_name)
4016{
4017 struct trace_array *tr;
4018 char *mod;
4019
4020 mod = kstrdup(mod_name, GFP_KERNEL);
4021 if (!mod)
4022 return;
4023
4024 mutex_lock(&trace_types_lock);
4025 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4026 if (!list_empty(&tr->mod_trace))
4027 process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4028 if (!list_empty(&tr->mod_notrace))
4029 process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4030 }
4031 mutex_unlock(&trace_types_lock);
4032
4033 kfree(mod);
4034}
4035#endif
4036
4037/*
4038 * We register the module command as a template to show others how
4039 * to register the a command as well.
4040 */
4041
4042static int
4043ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
4044 char *func_orig, char *cmd, char *module, int enable)
4045{
4046 char *func;
4047 int ret;
4048
4049 /* match_records() modifies func, and we need the original */
4050 func = kstrdup(func_orig, GFP_KERNEL);
4051 if (!func)
4052 return -ENOMEM;
4053
4054 /*
4055 * cmd == 'mod' because we only registered this func
4056 * for the 'mod' ftrace_func_command.
4057 * But if you register one func with multiple commands,
4058 * you can tell which command was used by the cmd
4059 * parameter.
4060 */
4061 ret = match_records(hash, func, strlen(func), module);
4062 kfree(func);
4063
4064 if (!ret)
4065 return cache_mod(tr, func_orig, module, enable);
4066 if (ret < 0)
4067 return ret;
4068 return 0;
4069}
4070
4071static struct ftrace_func_command ftrace_mod_cmd = {
4072 .name = "mod",
4073 .func = ftrace_mod_callback,
4074};
4075
4076static int __init ftrace_mod_cmd_init(void)
4077{
4078 return register_ftrace_command(&ftrace_mod_cmd);
4079}
4080core_initcall(ftrace_mod_cmd_init);
4081
4082static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
4083 struct ftrace_ops *op, struct pt_regs *pt_regs)
4084{
4085 struct ftrace_probe_ops *probe_ops;
4086 struct ftrace_func_probe *probe;
4087
4088 probe = container_of(op, struct ftrace_func_probe, ops);
4089 pr