1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * unlikely profiler |
4 | * |
5 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> |
6 | */ |
7 | #include <linux/kallsyms.h> |
8 | #include <linux/seq_file.h> |
9 | #include <linux/spinlock.h> |
10 | #include <linux/irqflags.h> |
11 | #include <linux/uaccess.h> |
12 | #include <linux/module.h> |
13 | #include <linux/ftrace.h> |
14 | #include <linux/hash.h> |
15 | #include <linux/fs.h> |
16 | #include <asm/local.h> |
17 | |
18 | #include "trace.h" |
19 | #include "trace_stat.h" |
20 | #include "trace_output.h" |
21 | |
22 | #ifdef CONFIG_BRANCH_TRACER |
23 | |
24 | static struct tracer branch_trace; |
25 | static int branch_tracing_enabled __read_mostly; |
26 | static DEFINE_MUTEX(branch_tracing_mutex); |
27 | |
28 | static struct trace_array *branch_tracer; |
29 | |
30 | static void |
31 | probe_likely_condition(struct ftrace_likely_data *f, int val, int expect) |
32 | { |
33 | struct trace_event_call *call = &event_branch; |
34 | struct trace_array *tr = branch_tracer; |
35 | struct trace_buffer *buffer; |
36 | struct trace_array_cpu *data; |
37 | struct ring_buffer_event *event; |
38 | struct trace_branch *entry; |
39 | unsigned long flags; |
40 | unsigned int trace_ctx; |
41 | const char *p; |
42 | |
43 | if (current->trace_recursion & TRACE_BRANCH_BIT) |
44 | return; |
45 | |
46 | /* |
47 | * I would love to save just the ftrace_likely_data pointer, but |
48 | * this code can also be used by modules. Ugly things can happen |
49 | * if the module is unloaded, and then we go and read the |
50 | * pointer. This is slower, but much safer. |
51 | */ |
52 | |
53 | if (unlikely(!tr)) |
54 | return; |
55 | |
56 | raw_local_irq_save(flags); |
57 | current->trace_recursion |= TRACE_BRANCH_BIT; |
58 | data = this_cpu_ptr(tr->array_buffer.data); |
59 | if (atomic_read(&data->disabled)) |
60 | goto out; |
61 | |
62 | trace_ctx = tracing_gen_ctx_flags(flags); |
63 | buffer = tr->array_buffer.buffer; |
64 | event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, |
65 | sizeof(*entry), trace_ctx); |
66 | if (!event) |
67 | goto out; |
68 | |
69 | entry = ring_buffer_event_data(event); |
70 | |
71 | /* Strip off the path, only save the file */ |
72 | p = f->data.file + strlen(f->data.file); |
73 | while (p >= f->data.file && *p != '/') |
74 | p--; |
75 | p++; |
76 | |
77 | strncpy(entry->func, f->data.func, TRACE_FUNC_SIZE); |
78 | strncpy(entry->file, p, TRACE_FILE_SIZE); |
79 | entry->func[TRACE_FUNC_SIZE] = 0; |
80 | entry->file[TRACE_FILE_SIZE] = 0; |
81 | entry->constant = f->constant; |
82 | entry->line = f->data.line; |
83 | entry->correct = val == expect; |
84 | |
85 | if (!call_filter_check_discard(call, entry, buffer, event)) |
86 | trace_buffer_unlock_commit_nostack(buffer, event); |
87 | |
88 | out: |
89 | current->trace_recursion &= ~TRACE_BRANCH_BIT; |
90 | raw_local_irq_restore(flags); |
91 | } |
92 | |
93 | static inline |
94 | void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect) |
95 | { |
96 | if (!branch_tracing_enabled) |
97 | return; |
98 | |
99 | probe_likely_condition(f, val, expect); |
100 | } |
101 | |
102 | int enable_branch_tracing(struct trace_array *tr) |
103 | { |
104 | mutex_lock(&branch_tracing_mutex); |
105 | branch_tracer = tr; |
106 | /* |
107 | * Must be seen before enabling. The reader is a condition |
108 | * where we do not need a matching rmb() |
109 | */ |
110 | smp_wmb(); |
111 | branch_tracing_enabled++; |
112 | mutex_unlock(&branch_tracing_mutex); |
113 | |
114 | return 0; |
115 | } |
116 | |
117 | void disable_branch_tracing(void) |
118 | { |
119 | mutex_lock(&branch_tracing_mutex); |
120 | |
121 | if (!branch_tracing_enabled) |
122 | goto out_unlock; |
123 | |
124 | branch_tracing_enabled--; |
125 | |
126 | out_unlock: |
127 | mutex_unlock(&branch_tracing_mutex); |
128 | } |
129 | |
130 | static int branch_trace_init(struct trace_array *tr) |
131 | { |
132 | return enable_branch_tracing(tr); |
133 | } |
134 | |
135 | static void branch_trace_reset(struct trace_array *tr) |
136 | { |
137 | disable_branch_tracing(); |
138 | } |
139 | |
140 | static enum print_line_t trace_branch_print(struct trace_iterator *iter, |
141 | int flags, struct trace_event *event) |
142 | { |
143 | struct trace_branch *field; |
144 | |
145 | trace_assign_type(field, iter->ent); |
146 | |
147 | trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n" , |
148 | field->correct ? " ok " : " MISS " , |
149 | field->func, |
150 | field->file, |
151 | field->line); |
152 | |
153 | return trace_handle_return(&iter->seq); |
154 | } |
155 | |
156 | static void branch_print_header(struct seq_file *s) |
157 | { |
158 | seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT" |
159 | " FUNC:FILE:LINE\n" |
160 | "# | | | | | " |
161 | " |\n" ); |
162 | } |
163 | |
164 | static struct trace_event_functions trace_branch_funcs = { |
165 | .trace = trace_branch_print, |
166 | }; |
167 | |
168 | static struct trace_event trace_branch_event = { |
169 | .type = TRACE_BRANCH, |
170 | .funcs = &trace_branch_funcs, |
171 | }; |
172 | |
173 | static struct tracer branch_trace __read_mostly = |
174 | { |
175 | .name = "branch" , |
176 | .init = branch_trace_init, |
177 | .reset = branch_trace_reset, |
178 | #ifdef CONFIG_FTRACE_SELFTEST |
179 | .selftest = trace_selftest_startup_branch, |
180 | #endif /* CONFIG_FTRACE_SELFTEST */ |
181 | .print_header = branch_print_header, |
182 | }; |
183 | |
184 | __init static int init_branch_tracer(void) |
185 | { |
186 | int ret; |
187 | |
188 | ret = register_trace_event(&trace_branch_event); |
189 | if (!ret) { |
190 | printk(KERN_WARNING "Warning: could not register " |
191 | "branch events\n" ); |
192 | return 1; |
193 | } |
194 | return register_tracer(&branch_trace); |
195 | } |
196 | core_initcall(init_branch_tracer); |
197 | |
198 | #else |
199 | static inline |
200 | void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect) |
201 | { |
202 | } |
203 | #endif /* CONFIG_BRANCH_TRACER */ |
204 | |
205 | void ftrace_likely_update(struct ftrace_likely_data *f, int val, |
206 | int expect, int is_constant) |
207 | { |
208 | unsigned long flags = user_access_save(); |
209 | |
210 | /* A constant is always correct */ |
211 | if (is_constant) { |
212 | f->constant++; |
213 | val = expect; |
214 | } |
215 | /* |
216 | * I would love to have a trace point here instead, but the |
217 | * trace point code is so inundated with unlikely and likely |
218 | * conditions that the recursive nightmare that exists is too |
219 | * much to try to get working. At least for now. |
220 | */ |
221 | trace_likely_condition(f, val, expect); |
222 | |
223 | /* FIXME: Make this atomic! */ |
224 | if (val == expect) |
225 | f->data.correct++; |
226 | else |
227 | f->data.incorrect++; |
228 | |
229 | user_access_restore(flags); |
230 | } |
231 | EXPORT_SYMBOL(ftrace_likely_update); |
232 | |
233 | extern unsigned long __start_annotated_branch_profile[]; |
234 | extern unsigned long __stop_annotated_branch_profile[]; |
235 | |
236 | static int (struct seq_file *m) |
237 | { |
238 | seq_puts(m, s: " correct incorrect % " |
239 | " Function " |
240 | " File Line\n" |
241 | " ------- --------- - " |
242 | " -------- " |
243 | " ---- ----\n" ); |
244 | return 0; |
245 | } |
246 | |
247 | static inline long get_incorrect_percent(const struct ftrace_branch_data *p) |
248 | { |
249 | long percent; |
250 | |
251 | if (p->correct) { |
252 | percent = p->incorrect * 100; |
253 | percent /= p->correct + p->incorrect; |
254 | } else |
255 | percent = p->incorrect ? 100 : -1; |
256 | |
257 | return percent; |
258 | } |
259 | |
260 | static const char *branch_stat_process_file(struct ftrace_branch_data *p) |
261 | { |
262 | const char *f; |
263 | |
264 | /* Only print the file, not the path */ |
265 | f = p->file + strlen(p->file); |
266 | while (f >= p->file && *f != '/') |
267 | f--; |
268 | return ++f; |
269 | } |
270 | |
271 | static void branch_stat_show(struct seq_file *m, |
272 | struct ftrace_branch_data *p, const char *f) |
273 | { |
274 | long percent; |
275 | |
276 | /* |
277 | * The miss is overlayed on correct, and hit on incorrect. |
278 | */ |
279 | percent = get_incorrect_percent(p); |
280 | |
281 | if (percent < 0) |
282 | seq_puts(m, s: " X " ); |
283 | else |
284 | seq_printf(m, fmt: "%3ld " , percent); |
285 | |
286 | seq_printf(m, fmt: "%-30.30s %-20.20s %d\n" , p->func, f, p->line); |
287 | } |
288 | |
289 | static int branch_stat_show_normal(struct seq_file *m, |
290 | struct ftrace_branch_data *p, const char *f) |
291 | { |
292 | seq_printf(m, fmt: "%8lu %8lu " , p->correct, p->incorrect); |
293 | branch_stat_show(m, p, f); |
294 | return 0; |
295 | } |
296 | |
297 | static int annotate_branch_stat_show(struct seq_file *m, void *v) |
298 | { |
299 | struct ftrace_likely_data *p = v; |
300 | const char *f; |
301 | int l; |
302 | |
303 | f = branch_stat_process_file(p: &p->data); |
304 | |
305 | if (!p->constant) |
306 | return branch_stat_show_normal(m, p: &p->data, f); |
307 | |
308 | l = snprintf(NULL, size: 0, fmt: "/%lu" , p->constant); |
309 | l = l > 8 ? 0 : 8 - l; |
310 | |
311 | seq_printf(m, fmt: "%8lu/%lu %*lu " , |
312 | p->data.correct, p->constant, l, p->data.incorrect); |
313 | branch_stat_show(m, p: &p->data, f); |
314 | return 0; |
315 | } |
316 | |
317 | static void *annotated_branch_stat_start(struct tracer_stat *trace) |
318 | { |
319 | return __start_annotated_branch_profile; |
320 | } |
321 | |
322 | static void * |
323 | annotated_branch_stat_next(void *v, int idx) |
324 | { |
325 | struct ftrace_likely_data *p = v; |
326 | |
327 | ++p; |
328 | |
329 | if ((void *)p >= (void *)__stop_annotated_branch_profile) |
330 | return NULL; |
331 | |
332 | return p; |
333 | } |
334 | |
335 | static int annotated_branch_stat_cmp(const void *p1, const void *p2) |
336 | { |
337 | const struct ftrace_branch_data *a = p1; |
338 | const struct ftrace_branch_data *b = p2; |
339 | |
340 | long percent_a, percent_b; |
341 | |
342 | percent_a = get_incorrect_percent(p: a); |
343 | percent_b = get_incorrect_percent(p: b); |
344 | |
345 | if (percent_a < percent_b) |
346 | return -1; |
347 | if (percent_a > percent_b) |
348 | return 1; |
349 | |
350 | if (a->incorrect < b->incorrect) |
351 | return -1; |
352 | if (a->incorrect > b->incorrect) |
353 | return 1; |
354 | |
355 | /* |
356 | * Since the above shows worse (incorrect) cases |
357 | * first, we continue that by showing best (correct) |
358 | * cases last. |
359 | */ |
360 | if (a->correct > b->correct) |
361 | return -1; |
362 | if (a->correct < b->correct) |
363 | return 1; |
364 | |
365 | return 0; |
366 | } |
367 | |
368 | static struct tracer_stat annotated_branch_stats = { |
369 | .name = "branch_annotated" , |
370 | .stat_start = annotated_branch_stat_start, |
371 | .stat_next = annotated_branch_stat_next, |
372 | .stat_cmp = annotated_branch_stat_cmp, |
373 | .stat_headers = annotated_branch_stat_headers, |
374 | .stat_show = annotate_branch_stat_show |
375 | }; |
376 | |
377 | __init static int init_annotated_branch_stats(void) |
378 | { |
379 | int ret; |
380 | |
381 | ret = register_stat_tracer(trace: &annotated_branch_stats); |
382 | if (!ret) { |
383 | printk(KERN_WARNING "Warning: could not register " |
384 | "annotated branches stats\n" ); |
385 | return 1; |
386 | } |
387 | return 0; |
388 | } |
389 | fs_initcall(init_annotated_branch_stats); |
390 | |
391 | #ifdef CONFIG_PROFILE_ALL_BRANCHES |
392 | |
393 | extern unsigned long __start_branch_profile[]; |
394 | extern unsigned long __stop_branch_profile[]; |
395 | |
396 | static int all_branch_stat_headers(struct seq_file *m) |
397 | { |
398 | seq_puts(m, " miss hit % " |
399 | " Function " |
400 | " File Line\n" |
401 | " ------- --------- - " |
402 | " -------- " |
403 | " ---- ----\n" ); |
404 | return 0; |
405 | } |
406 | |
407 | static void *all_branch_stat_start(struct tracer_stat *trace) |
408 | { |
409 | return __start_branch_profile; |
410 | } |
411 | |
412 | static void * |
413 | all_branch_stat_next(void *v, int idx) |
414 | { |
415 | struct ftrace_branch_data *p = v; |
416 | |
417 | ++p; |
418 | |
419 | if ((void *)p >= (void *)__stop_branch_profile) |
420 | return NULL; |
421 | |
422 | return p; |
423 | } |
424 | |
425 | static int all_branch_stat_show(struct seq_file *m, void *v) |
426 | { |
427 | struct ftrace_branch_data *p = v; |
428 | const char *f; |
429 | |
430 | f = branch_stat_process_file(p); |
431 | return branch_stat_show_normal(m, p, f); |
432 | } |
433 | |
434 | static struct tracer_stat all_branch_stats = { |
435 | .name = "branch_all" , |
436 | .stat_start = all_branch_stat_start, |
437 | .stat_next = all_branch_stat_next, |
438 | .stat_headers = all_branch_stat_headers, |
439 | .stat_show = all_branch_stat_show |
440 | }; |
441 | |
442 | __init static int all_annotated_branch_stats(void) |
443 | { |
444 | int ret; |
445 | |
446 | ret = register_stat_tracer(&all_branch_stats); |
447 | if (!ret) { |
448 | printk(KERN_WARNING "Warning: could not register " |
449 | "all branches stats\n" ); |
450 | return 1; |
451 | } |
452 | return 0; |
453 | } |
454 | fs_initcall(all_annotated_branch_stats); |
455 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
456 | |