1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <dirent.h> |
3 | #include <errno.h> |
4 | #include <limits.h> |
5 | #include <stdbool.h> |
6 | #include <stdlib.h> |
7 | #include <stdio.h> |
8 | #include <sys/types.h> |
9 | #include <sys/stat.h> |
10 | #include <unistd.h> |
11 | #include "string2.h" |
12 | #include "strlist.h" |
13 | #include <string.h> |
14 | #include <api/fs/fs.h> |
15 | #include <linux/string.h> |
16 | #include <linux/zalloc.h> |
17 | #include "asm/bug.h" |
18 | #include "thread_map.h" |
19 | #include "debug.h" |
20 | #include "event.h" |
21 | #include <internal/threadmap.h> |
22 | |
23 | /* Skip "." and ".." directories */ |
24 | static int filter(const struct dirent *dir) |
25 | { |
26 | if (dir->d_name[0] == '.') |
27 | return 0; |
28 | else |
29 | return 1; |
30 | } |
31 | |
32 | #define thread_map__alloc(__nr) perf_thread_map__realloc(NULL, __nr) |
33 | |
34 | struct perf_thread_map *thread_map__new_by_pid(pid_t pid) |
35 | { |
36 | struct perf_thread_map *threads; |
37 | char name[256]; |
38 | int items; |
39 | struct dirent **namelist = NULL; |
40 | int i; |
41 | |
42 | sprintf(buf: name, fmt: "/proc/%d/task" , pid); |
43 | items = scandir(name, &namelist, filter, NULL); |
44 | if (items <= 0) |
45 | return NULL; |
46 | |
47 | threads = thread_map__alloc(items); |
48 | if (threads != NULL) { |
49 | for (i = 0; i < items; i++) |
50 | perf_thread_map__set_pid(threads, i, atoi(namelist[i]->d_name)); |
51 | threads->nr = items; |
52 | refcount_set(&threads->refcnt, 1); |
53 | } |
54 | |
55 | for (i=0; i<items; i++) |
56 | zfree(&namelist[i]); |
57 | free(namelist); |
58 | |
59 | return threads; |
60 | } |
61 | |
62 | struct perf_thread_map *thread_map__new_by_tid(pid_t tid) |
63 | { |
64 | struct perf_thread_map *threads = thread_map__alloc(1); |
65 | |
66 | if (threads != NULL) { |
67 | perf_thread_map__set_pid(threads, 0, tid); |
68 | threads->nr = 1; |
69 | refcount_set(&threads->refcnt, 1); |
70 | } |
71 | |
72 | return threads; |
73 | } |
74 | |
75 | static struct perf_thread_map *__thread_map__new_all_cpus(uid_t uid) |
76 | { |
77 | DIR *proc; |
78 | int max_threads = 32, items, i; |
79 | char path[NAME_MAX + 1 + 6]; |
80 | struct dirent *dirent, **namelist = NULL; |
81 | struct perf_thread_map *threads = thread_map__alloc(max_threads); |
82 | |
83 | if (threads == NULL) |
84 | goto out; |
85 | |
86 | proc = opendir("/proc" ); |
87 | if (proc == NULL) |
88 | goto out_free_threads; |
89 | |
90 | threads->nr = 0; |
91 | refcount_set(&threads->refcnt, 1); |
92 | |
93 | while ((dirent = readdir(proc)) != NULL) { |
94 | char *end; |
95 | bool grow = false; |
96 | pid_t pid = strtol(dirent->d_name, &end, 10); |
97 | |
98 | if (*end) /* only interested in proper numerical dirents */ |
99 | continue; |
100 | |
101 | snprintf(buf: path, size: sizeof(path), fmt: "/proc/%s" , dirent->d_name); |
102 | |
103 | if (uid != UINT_MAX) { |
104 | struct stat st; |
105 | |
106 | if (stat(path, &st) != 0 || st.st_uid != uid) |
107 | continue; |
108 | } |
109 | |
110 | snprintf(buf: path, size: sizeof(path), fmt: "/proc/%d/task" , pid); |
111 | items = scandir(path, &namelist, filter, NULL); |
112 | if (items <= 0) { |
113 | pr_debug("scandir for %d returned empty, skipping\n" , pid); |
114 | continue; |
115 | } |
116 | while (threads->nr + items >= max_threads) { |
117 | max_threads *= 2; |
118 | grow = true; |
119 | } |
120 | |
121 | if (grow) { |
122 | struct perf_thread_map *tmp; |
123 | |
124 | tmp = perf_thread_map__realloc(threads, max_threads); |
125 | if (tmp == NULL) |
126 | goto out_free_namelist; |
127 | |
128 | threads = tmp; |
129 | } |
130 | |
131 | for (i = 0; i < items; i++) { |
132 | perf_thread_map__set_pid(threads, threads->nr + i, |
133 | atoi(namelist[i]->d_name)); |
134 | } |
135 | |
136 | for (i = 0; i < items; i++) |
137 | zfree(&namelist[i]); |
138 | free(namelist); |
139 | |
140 | threads->nr += items; |
141 | } |
142 | |
143 | out_closedir: |
144 | closedir(proc); |
145 | out: |
146 | return threads; |
147 | |
148 | out_free_threads: |
149 | free(threads); |
150 | return NULL; |
151 | |
152 | out_free_namelist: |
153 | for (i = 0; i < items; i++) |
154 | zfree(&namelist[i]); |
155 | free(namelist); |
156 | zfree(&threads); |
157 | goto out_closedir; |
158 | } |
159 | |
160 | struct perf_thread_map *thread_map__new_all_cpus(void) |
161 | { |
162 | return __thread_map__new_all_cpus(UINT_MAX); |
163 | } |
164 | |
165 | struct perf_thread_map *thread_map__new_by_uid(uid_t uid) |
166 | { |
167 | return __thread_map__new_all_cpus(uid); |
168 | } |
169 | |
170 | struct perf_thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid) |
171 | { |
172 | if (pid != -1) |
173 | return thread_map__new_by_pid(pid); |
174 | |
175 | if (tid == -1 && uid != UINT_MAX) |
176 | return thread_map__new_by_uid(uid); |
177 | |
178 | return thread_map__new_by_tid(tid); |
179 | } |
180 | |
181 | static struct perf_thread_map *thread_map__new_by_pid_str(const char *pid_str) |
182 | { |
183 | struct perf_thread_map *threads = NULL, *nt; |
184 | char name[256]; |
185 | int items, total_tasks = 0; |
186 | struct dirent **namelist = NULL; |
187 | int i, j = 0; |
188 | pid_t pid, prev_pid = INT_MAX; |
189 | char *end_ptr; |
190 | struct str_node *pos; |
191 | struct strlist_config slist_config = { .dont_dupstr = true, }; |
192 | struct strlist *slist = strlist__new(slist: pid_str, config: &slist_config); |
193 | |
194 | if (!slist) |
195 | return NULL; |
196 | |
197 | strlist__for_each_entry(pos, slist) { |
198 | pid = strtol(pos->s, &end_ptr, 10); |
199 | |
200 | if (pid == INT_MIN || pid == INT_MAX || |
201 | (*end_ptr != '\0' && *end_ptr != ',')) |
202 | goto out_free_threads; |
203 | |
204 | if (pid == prev_pid) |
205 | continue; |
206 | |
207 | sprintf(buf: name, fmt: "/proc/%d/task" , pid); |
208 | items = scandir(name, &namelist, filter, NULL); |
209 | if (items <= 0) |
210 | goto out_free_threads; |
211 | |
212 | total_tasks += items; |
213 | nt = perf_thread_map__realloc(threads, total_tasks); |
214 | if (nt == NULL) |
215 | goto out_free_namelist; |
216 | |
217 | threads = nt; |
218 | |
219 | for (i = 0; i < items; i++) { |
220 | perf_thread_map__set_pid(threads, j++, atoi(namelist[i]->d_name)); |
221 | zfree(&namelist[i]); |
222 | } |
223 | threads->nr = total_tasks; |
224 | free(namelist); |
225 | } |
226 | |
227 | out: |
228 | strlist__delete(slist); |
229 | if (threads) |
230 | refcount_set(&threads->refcnt, 1); |
231 | return threads; |
232 | |
233 | out_free_namelist: |
234 | for (i = 0; i < items; i++) |
235 | zfree(&namelist[i]); |
236 | free(namelist); |
237 | |
238 | out_free_threads: |
239 | zfree(&threads); |
240 | goto out; |
241 | } |
242 | |
243 | struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str) |
244 | { |
245 | struct perf_thread_map *threads = NULL, *nt; |
246 | int ntasks = 0; |
247 | pid_t tid, prev_tid = INT_MAX; |
248 | char *end_ptr; |
249 | struct str_node *pos; |
250 | struct strlist_config slist_config = { .dont_dupstr = true, }; |
251 | struct strlist *slist; |
252 | |
253 | /* perf-stat expects threads to be generated even if tid not given */ |
254 | if (!tid_str) |
255 | return perf_thread_map__new_dummy(); |
256 | |
257 | slist = strlist__new(slist: tid_str, config: &slist_config); |
258 | if (!slist) |
259 | return NULL; |
260 | |
261 | strlist__for_each_entry(pos, slist) { |
262 | tid = strtol(pos->s, &end_ptr, 10); |
263 | |
264 | if (tid == INT_MIN || tid == INT_MAX || |
265 | (*end_ptr != '\0' && *end_ptr != ',')) |
266 | goto out_free_threads; |
267 | |
268 | if (tid == prev_tid) |
269 | continue; |
270 | |
271 | ntasks++; |
272 | nt = perf_thread_map__realloc(threads, ntasks); |
273 | |
274 | if (nt == NULL) |
275 | goto out_free_threads; |
276 | |
277 | threads = nt; |
278 | perf_thread_map__set_pid(threads, ntasks - 1, tid); |
279 | threads->nr = ntasks; |
280 | } |
281 | out: |
282 | strlist__delete(slist); |
283 | if (threads) |
284 | refcount_set(&threads->refcnt, 1); |
285 | return threads; |
286 | |
287 | out_free_threads: |
288 | zfree(&threads); |
289 | goto out; |
290 | } |
291 | |
292 | struct perf_thread_map *thread_map__new_str(const char *pid, const char *tid, |
293 | uid_t uid, bool all_threads) |
294 | { |
295 | if (pid) |
296 | return thread_map__new_by_pid_str(pid_str: pid); |
297 | |
298 | if (!tid && uid != UINT_MAX) |
299 | return thread_map__new_by_uid(uid); |
300 | |
301 | if (all_threads) |
302 | return thread_map__new_all_cpus(); |
303 | |
304 | return thread_map__new_by_tid_str(tid_str: tid); |
305 | } |
306 | |
307 | size_t thread_map__fprintf(struct perf_thread_map *threads, FILE *fp) |
308 | { |
309 | int i; |
310 | size_t printed = fprintf(fp, "%d thread%s: " , |
311 | threads->nr, threads->nr > 1 ? "s" : "" ); |
312 | for (i = 0; i < threads->nr; ++i) |
313 | printed += fprintf(fp, "%s%d" , i ? ", " : "" , perf_thread_map__pid(threads, i)); |
314 | |
315 | return printed + fprintf(fp, "\n" ); |
316 | } |
317 | |
318 | static int get_comm(char **comm, pid_t pid) |
319 | { |
320 | char *path; |
321 | size_t size; |
322 | int err; |
323 | |
324 | if (asprintf(&path, "%s/%d/comm" , procfs__mountpoint(), pid) == -1) |
325 | return -ENOMEM; |
326 | |
327 | err = filename__read_str(path, comm, &size); |
328 | if (!err) { |
329 | /* |
330 | * We're reading 16 bytes, while filename__read_str |
331 | * allocates data per BUFSIZ bytes, so we can safely |
332 | * mark the end of the string. |
333 | */ |
334 | (*comm)[size] = 0; |
335 | strim(*comm); |
336 | } |
337 | |
338 | free(path); |
339 | return err; |
340 | } |
341 | |
342 | static void comm_init(struct perf_thread_map *map, int i) |
343 | { |
344 | pid_t pid = perf_thread_map__pid(map, i); |
345 | char *comm = NULL; |
346 | |
347 | /* dummy pid comm initialization */ |
348 | if (pid == -1) { |
349 | map->map[i].comm = strdup("dummy" ); |
350 | return; |
351 | } |
352 | |
353 | /* |
354 | * The comm name is like extra bonus ;-), |
355 | * so just warn if we fail for any reason. |
356 | */ |
357 | if (get_comm(comm: &comm, pid)) |
358 | pr_warning("Couldn't resolve comm name for pid %d\n" , pid); |
359 | |
360 | map->map[i].comm = comm; |
361 | } |
362 | |
363 | void thread_map__read_comms(struct perf_thread_map *threads) |
364 | { |
365 | int i; |
366 | |
367 | for (i = 0; i < threads->nr; ++i) |
368 | comm_init(map: threads, i); |
369 | } |
370 | |
371 | static void thread_map__copy_event(struct perf_thread_map *threads, |
372 | struct perf_record_thread_map *event) |
373 | { |
374 | unsigned i; |
375 | |
376 | threads->nr = (int) event->nr; |
377 | |
378 | for (i = 0; i < event->nr; i++) { |
379 | perf_thread_map__set_pid(threads, i, (pid_t) event->entries[i].pid); |
380 | threads->map[i].comm = strndup(event->entries[i].comm, 16); |
381 | } |
382 | |
383 | refcount_set(&threads->refcnt, 1); |
384 | } |
385 | |
386 | struct perf_thread_map *thread_map__new_event(struct perf_record_thread_map *event) |
387 | { |
388 | struct perf_thread_map *threads; |
389 | |
390 | threads = thread_map__alloc(event->nr); |
391 | if (threads) |
392 | thread_map__copy_event(threads, event); |
393 | |
394 | return threads; |
395 | } |
396 | |
397 | bool thread_map__has(struct perf_thread_map *threads, pid_t pid) |
398 | { |
399 | int i; |
400 | |
401 | for (i = 0; i < threads->nr; ++i) { |
402 | if (threads->map[i].pid == pid) |
403 | return true; |
404 | } |
405 | |
406 | return false; |
407 | } |
408 | |
409 | int thread_map__remove(struct perf_thread_map *threads, int idx) |
410 | { |
411 | int i; |
412 | |
413 | if (threads->nr < 1) |
414 | return -EINVAL; |
415 | |
416 | if (idx >= threads->nr) |
417 | return -EINVAL; |
418 | |
419 | /* |
420 | * Free the 'idx' item and shift the rest up. |
421 | */ |
422 | zfree(&threads->map[idx].comm); |
423 | |
424 | for (i = idx; i < threads->nr - 1; i++) |
425 | threads->map[i] = threads->map[i + 1]; |
426 | |
427 | threads->nr--; |
428 | return 0; |
429 | } |
430 | |