1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include "util/bpf_counter.h" |
3 | #include "util/debug.h" |
4 | #include "util/evsel.h" |
5 | #include "util/evlist.h" |
6 | #include "util/off_cpu.h" |
7 | #include "util/perf-hooks.h" |
8 | #include "util/record.h" |
9 | #include "util/session.h" |
10 | #include "util/target.h" |
11 | #include "util/cpumap.h" |
12 | #include "util/thread_map.h" |
13 | #include "util/cgroup.h" |
14 | #include "util/strlist.h" |
15 | #include <bpf/bpf.h> |
16 | |
17 | #include "bpf_skel/off_cpu.skel.h" |
18 | |
19 | #define MAX_STACKS 32 |
20 | #define MAX_PROC 4096 |
21 | /* we don't need actual timestamp, just want to put the samples at last */ |
22 | #define OFF_CPU_TIMESTAMP (~0ull << 32) |
23 | |
24 | static struct off_cpu_bpf *skel; |
25 | |
26 | struct off_cpu_key { |
27 | u32 pid; |
28 | u32 tgid; |
29 | u32 stack_id; |
30 | u32 state; |
31 | u64 cgroup_id; |
32 | }; |
33 | |
34 | union off_cpu_data { |
35 | struct hdr; |
36 | u64 array[1024 / sizeof(u64)]; |
37 | }; |
38 | |
39 | static int off_cpu_config(struct evlist *evlist) |
40 | { |
41 | struct evsel *evsel; |
42 | struct perf_event_attr attr = { |
43 | .type = PERF_TYPE_SOFTWARE, |
44 | .config = PERF_COUNT_SW_BPF_OUTPUT, |
45 | .size = sizeof(attr), /* to capture ABI version */ |
46 | }; |
47 | char *evname = strdup(OFFCPU_EVENT); |
48 | |
49 | if (evname == NULL) |
50 | return -ENOMEM; |
51 | |
52 | evsel = evsel__new(&attr); |
53 | if (!evsel) { |
54 | free(evname); |
55 | return -ENOMEM; |
56 | } |
57 | |
58 | evsel->core.attr.freq = 1; |
59 | evsel->core.attr.sample_period = 1; |
60 | /* off-cpu analysis depends on stack trace */ |
61 | evsel->core.attr.sample_type = PERF_SAMPLE_CALLCHAIN; |
62 | |
63 | evlist__add(evlist, evsel); |
64 | |
65 | free(evsel->name); |
66 | evsel->name = evname; |
67 | |
68 | return 0; |
69 | } |
70 | |
71 | static void off_cpu_start(void *arg) |
72 | { |
73 | struct evlist *evlist = arg; |
74 | |
75 | /* update task filter for the given workload */ |
76 | if (!skel->bss->has_cpu && !skel->bss->has_task && |
77 | perf_thread_map__pid(evlist->core.threads, 0) != -1) { |
78 | int fd; |
79 | u32 pid; |
80 | u8 val = 1; |
81 | |
82 | skel->bss->has_task = 1; |
83 | skel->bss->uses_tgid = 1; |
84 | fd = bpf_map__fd(skel->maps.task_filter); |
85 | pid = perf_thread_map__pid(evlist->core.threads, 0); |
86 | bpf_map_update_elem(fd, &pid, &val, BPF_ANY); |
87 | } |
88 | |
89 | skel->bss->enabled = 1; |
90 | } |
91 | |
92 | static void off_cpu_finish(void *arg __maybe_unused) |
93 | { |
94 | skel->bss->enabled = 0; |
95 | off_cpu_bpf__destroy(skel); |
96 | } |
97 | |
98 | /* v5.18 kernel added prev_state arg, so it needs to check the signature */ |
99 | static void check_sched_switch_args(void) |
100 | { |
101 | struct btf *btf = btf__load_vmlinux_btf(); |
102 | const struct btf_type *t1, *t2, *t3; |
103 | u32 type_id; |
104 | |
105 | type_id = btf__find_by_name_kind(btf, "btf_trace_sched_switch" , |
106 | BTF_KIND_TYPEDEF); |
107 | if ((s32)type_id < 0) |
108 | goto cleanup; |
109 | |
110 | t1 = btf__type_by_id(btf, type_id); |
111 | if (t1 == NULL) |
112 | goto cleanup; |
113 | |
114 | t2 = btf__type_by_id(btf, t1->type); |
115 | if (t2 == NULL || !btf_is_ptr(t2)) |
116 | goto cleanup; |
117 | |
118 | t3 = btf__type_by_id(btf, t2->type); |
119 | /* btf_trace func proto has one more argument for the context */ |
120 | if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 5) { |
121 | /* new format: pass prev_state as 4th arg */ |
122 | skel->rodata->has_prev_state = true; |
123 | } |
124 | cleanup: |
125 | btf__free(btf); |
126 | } |
127 | |
128 | int off_cpu_prepare(struct evlist *evlist, struct target *target, |
129 | struct record_opts *opts) |
130 | { |
131 | int err, fd, i; |
132 | int ncpus = 1, ntasks = 1, ncgrps = 1; |
133 | struct strlist *pid_slist = NULL; |
134 | struct str_node *pos; |
135 | |
136 | if (off_cpu_config(evlist) < 0) { |
137 | pr_err("Failed to config off-cpu BPF event\n" ); |
138 | return -1; |
139 | } |
140 | |
141 | skel = off_cpu_bpf__open(); |
142 | if (!skel) { |
143 | pr_err("Failed to open off-cpu BPF skeleton\n" ); |
144 | return -1; |
145 | } |
146 | |
147 | /* don't need to set cpu filter for system-wide mode */ |
148 | if (target->cpu_list) { |
149 | ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus); |
150 | bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); |
151 | } |
152 | |
153 | if (target->pid) { |
154 | pid_slist = strlist__new(target->pid, NULL); |
155 | if (!pid_slist) { |
156 | pr_err("Failed to create a strlist for pid\n" ); |
157 | return -1; |
158 | } |
159 | |
160 | ntasks = 0; |
161 | strlist__for_each_entry(pos, pid_slist) { |
162 | char *end_ptr; |
163 | int pid = strtol(pos->s, &end_ptr, 10); |
164 | |
165 | if (pid == INT_MIN || pid == INT_MAX || |
166 | (*end_ptr != '\0' && *end_ptr != ',')) |
167 | continue; |
168 | |
169 | ntasks++; |
170 | } |
171 | |
172 | if (ntasks < MAX_PROC) |
173 | ntasks = MAX_PROC; |
174 | |
175 | bpf_map__set_max_entries(skel->maps.task_filter, ntasks); |
176 | } else if (target__has_task(target)) { |
177 | ntasks = perf_thread_map__nr(evlist->core.threads); |
178 | bpf_map__set_max_entries(skel->maps.task_filter, ntasks); |
179 | } else if (target__none(target)) { |
180 | bpf_map__set_max_entries(skel->maps.task_filter, MAX_PROC); |
181 | } |
182 | |
183 | if (evlist__first(evlist)->cgrp) { |
184 | ncgrps = evlist->core.nr_entries - 1; /* excluding a dummy */ |
185 | bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps); |
186 | |
187 | if (!cgroup_is_v2("perf_event" )) |
188 | skel->rodata->uses_cgroup_v1 = true; |
189 | } |
190 | |
191 | if (opts->record_cgroup) { |
192 | skel->rodata->needs_cgroup = true; |
193 | |
194 | if (!cgroup_is_v2("perf_event" )) |
195 | skel->rodata->uses_cgroup_v1 = true; |
196 | } |
197 | |
198 | set_max_rlimit(); |
199 | check_sched_switch_args(); |
200 | |
201 | err = off_cpu_bpf__load(skel); |
202 | if (err) { |
203 | pr_err("Failed to load off-cpu skeleton\n" ); |
204 | goto out; |
205 | } |
206 | |
207 | if (target->cpu_list) { |
208 | u32 cpu; |
209 | u8 val = 1; |
210 | |
211 | skel->bss->has_cpu = 1; |
212 | fd = bpf_map__fd(skel->maps.cpu_filter); |
213 | |
214 | for (i = 0; i < ncpus; i++) { |
215 | cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu; |
216 | bpf_map_update_elem(fd, &cpu, &val, BPF_ANY); |
217 | } |
218 | } |
219 | |
220 | if (target->pid) { |
221 | u8 val = 1; |
222 | |
223 | skel->bss->has_task = 1; |
224 | skel->bss->uses_tgid = 1; |
225 | fd = bpf_map__fd(skel->maps.task_filter); |
226 | |
227 | strlist__for_each_entry(pos, pid_slist) { |
228 | char *end_ptr; |
229 | u32 tgid; |
230 | int pid = strtol(pos->s, &end_ptr, 10); |
231 | |
232 | if (pid == INT_MIN || pid == INT_MAX || |
233 | (*end_ptr != '\0' && *end_ptr != ',')) |
234 | continue; |
235 | |
236 | tgid = pid; |
237 | bpf_map_update_elem(fd, &tgid, &val, BPF_ANY); |
238 | } |
239 | } else if (target__has_task(target)) { |
240 | u32 pid; |
241 | u8 val = 1; |
242 | |
243 | skel->bss->has_task = 1; |
244 | fd = bpf_map__fd(skel->maps.task_filter); |
245 | |
246 | for (i = 0; i < ntasks; i++) { |
247 | pid = perf_thread_map__pid(evlist->core.threads, i); |
248 | bpf_map_update_elem(fd, &pid, &val, BPF_ANY); |
249 | } |
250 | } |
251 | |
252 | if (evlist__first(evlist)->cgrp) { |
253 | struct evsel *evsel; |
254 | u8 val = 1; |
255 | |
256 | skel->bss->has_cgroup = 1; |
257 | fd = bpf_map__fd(skel->maps.cgroup_filter); |
258 | |
259 | evlist__for_each_entry(evlist, evsel) { |
260 | struct cgroup *cgrp = evsel->cgrp; |
261 | |
262 | if (cgrp == NULL) |
263 | continue; |
264 | |
265 | if (!cgrp->id && read_cgroup_id(cgrp) < 0) { |
266 | pr_err("Failed to read cgroup id of %s\n" , |
267 | cgrp->name); |
268 | goto out; |
269 | } |
270 | |
271 | bpf_map_update_elem(fd, &cgrp->id, &val, BPF_ANY); |
272 | } |
273 | } |
274 | |
275 | err = off_cpu_bpf__attach(skel); |
276 | if (err) { |
277 | pr_err("Failed to attach off-cpu BPF skeleton\n" ); |
278 | goto out; |
279 | } |
280 | |
281 | if (perf_hooks__set_hook("record_start" , off_cpu_start, evlist) || |
282 | perf_hooks__set_hook("record_end" , off_cpu_finish, evlist)) { |
283 | pr_err("Failed to attach off-cpu skeleton\n" ); |
284 | goto out; |
285 | } |
286 | |
287 | return 0; |
288 | |
289 | out: |
290 | off_cpu_bpf__destroy(skel); |
291 | return -1; |
292 | } |
293 | |
294 | int off_cpu_write(struct perf_session *session) |
295 | { |
296 | int bytes = 0, size; |
297 | int fd, stack; |
298 | u64 sample_type, val, sid = 0; |
299 | struct evsel *evsel; |
300 | struct perf_data_file *file = &session->data->file; |
301 | struct off_cpu_key prev, key; |
302 | union off_cpu_data data = { |
303 | .hdr = { |
304 | .type = PERF_RECORD_SAMPLE, |
305 | .misc = PERF_RECORD_MISC_USER, |
306 | }, |
307 | }; |
308 | u64 tstamp = OFF_CPU_TIMESTAMP; |
309 | |
310 | skel->bss->enabled = 0; |
311 | |
312 | evsel = evlist__find_evsel_by_str(session->evlist, OFFCPU_EVENT); |
313 | if (evsel == NULL) { |
314 | pr_err("%s evsel not found\n" , OFFCPU_EVENT); |
315 | return 0; |
316 | } |
317 | |
318 | sample_type = evsel->core.attr.sample_type; |
319 | |
320 | if (sample_type & ~OFFCPU_SAMPLE_TYPES) { |
321 | pr_err("not supported sample type: %llx\n" , |
322 | (unsigned long long)sample_type); |
323 | return -1; |
324 | } |
325 | |
326 | if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) { |
327 | if (evsel->core.id) |
328 | sid = evsel->core.id[0]; |
329 | } |
330 | |
331 | fd = bpf_map__fd(skel->maps.off_cpu); |
332 | stack = bpf_map__fd(skel->maps.stacks); |
333 | memset(&prev, 0, sizeof(prev)); |
334 | |
335 | while (!bpf_map_get_next_key(fd, &prev, &key)) { |
336 | int n = 1; /* start from perf_event_header */ |
337 | int ip_pos = -1; |
338 | |
339 | bpf_map_lookup_elem(fd, &key, &val); |
340 | |
341 | if (sample_type & PERF_SAMPLE_IDENTIFIER) |
342 | data.array[n++] = sid; |
343 | if (sample_type & PERF_SAMPLE_IP) { |
344 | ip_pos = n; |
345 | data.array[n++] = 0; /* will be updated */ |
346 | } |
347 | if (sample_type & PERF_SAMPLE_TID) |
348 | data.array[n++] = (u64)key.pid << 32 | key.tgid; |
349 | if (sample_type & PERF_SAMPLE_TIME) |
350 | data.array[n++] = tstamp; |
351 | if (sample_type & PERF_SAMPLE_ID) |
352 | data.array[n++] = sid; |
353 | if (sample_type & PERF_SAMPLE_CPU) |
354 | data.array[n++] = 0; |
355 | if (sample_type & PERF_SAMPLE_PERIOD) |
356 | data.array[n++] = val; |
357 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
358 | int len = 0; |
359 | |
360 | /* data.array[n] is callchain->nr (updated later) */ |
361 | data.array[n + 1] = PERF_CONTEXT_USER; |
362 | data.array[n + 2] = 0; |
363 | |
364 | bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]); |
365 | while (data.array[n + 2 + len]) |
366 | len++; |
367 | |
368 | /* update length of callchain */ |
369 | data.array[n] = len + 1; |
370 | |
371 | /* update sample ip with the first callchain entry */ |
372 | if (ip_pos >= 0) |
373 | data.array[ip_pos] = data.array[n + 2]; |
374 | |
375 | /* calculate sample callchain data array length */ |
376 | n += len + 2; |
377 | } |
378 | if (sample_type & PERF_SAMPLE_CGROUP) |
379 | data.array[n++] = key.cgroup_id; |
380 | |
381 | size = n * sizeof(u64); |
382 | data.hdr.size = size; |
383 | bytes += size; |
384 | |
385 | if (perf_data_file__write(file, &data, size) < 0) { |
386 | pr_err("failed to write perf data, error: %m\n" ); |
387 | return bytes; |
388 | } |
389 | |
390 | prev = key; |
391 | /* increase dummy timestamp to sort later samples */ |
392 | tstamp++; |
393 | } |
394 | return bytes; |
395 | } |
396 | |