1/* Profile heap and stack memory usage of running program.
2 Copyright (C) 1998-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <atomic.h>
21#include <dlfcn.h>
22#include <errno.h>
23#include <fcntl.h>
24#include <inttypes.h>
25#include <signal.h>
26#include <stdarg.h>
27#include <stdbool.h>
28#include <stdio.h>
29#include <stdlib.h>
30#include <string.h>
31#include <unistd.h>
32#include <stdint.h>
33#include <sys/mman.h>
34#include <sys/time.h>
35
36#include <hp-timing.h>
37#include <machine-sp.h>
38#include <stackinfo.h> /* For _STACK_GROWS_UP */
39
40/* Pointer to the real functions. These are determined used `dlsym'
41 when really needed. */
42static void *(*mallocp)(size_t);
43static void *(*reallocp) (void *, size_t);
44static void *(*callocp) (size_t, size_t);
45static void (*freep) (void *);
46
47static void *(*mmapp) (void *, size_t, int, int, int, off_t);
48static void *(*mmap64p) (void *, size_t, int, int, int, off64_t);
49static int (*munmapp) (void *, size_t);
50static void *(*mremapp) (void *, size_t, size_t, int, void *);
51
52enum
53{
54 idx_malloc = 0,
55 idx_realloc,
56 idx_calloc,
57 idx_free,
58 idx_mmap_r,
59 idx_mmap_w,
60 idx_mmap_a,
61 idx_mremap,
62 idx_munmap,
63 idx_last
64};
65
66
67struct header
68{
69 size_t length;
70 size_t magic;
71};
72
73#define MAGIC 0xfeedbeaf
74
75
76static unsigned long int calls[idx_last];
77static unsigned long int failed[idx_last];
78static size_t total[idx_last];
79static size_t grand_total;
80static unsigned long int histogram[65536 / 16];
81static unsigned long int large;
82static unsigned long int calls_total;
83static unsigned long int inplace;
84static unsigned long int decreasing;
85static unsigned long int realloc_free;
86static unsigned long int inplace_mremap;
87static unsigned long int decreasing_mremap;
88static size_t current_heap;
89static size_t peak_use[3];
90static __thread uintptr_t start_sp;
91
92/* A few macros to make the source more readable. */
93#define peak_heap peak_use[0]
94#define peak_stack peak_use[1]
95#define peak_total peak_use[2]
96
97#define DEFAULT_BUFFER_SIZE 32768
98static size_t buffer_size;
99
100static int fd = -1;
101
102static bool not_me;
103static int initialized;
104static bool trace_mmap;
105extern const char *__progname;
106
107struct entry
108{
109 uint64_t heap;
110 uint64_t stack;
111 uint32_t time_low;
112 uint32_t time_high;
113};
114
115static struct entry buffer[2 * DEFAULT_BUFFER_SIZE];
116static uint32_t buffer_cnt;
117static struct entry first;
118
119static void
120gettime (struct entry *e)
121{
122#if HP_TIMING_INLINE
123 hp_timing_t now;
124 HP_TIMING_NOW (now);
125 e->time_low = now & 0xffffffff;
126 e->time_high = now >> 32;
127#else
128 struct __timespec64 now;
129 uint64_t usecs;
130 __clock_gettime64 (CLOCK_REALTIME, &now);
131 usecs = (uint64_t)now.tv_nsec / 1000 + (uint64_t)now.tv_sec * 1000000;
132 e->time_low = usecs & 0xffffffff;
133 e->time_high = usecs >> 32;
134#endif
135}
136
137/* Update the global data after a successful function call. */
138static void
139update_data (struct header *result, size_t len, size_t old_len)
140{
141 if (result != NULL)
142 {
143 /* Record the information we need and mark the block using a
144 magic number. */
145 result->length = len;
146 result->magic = MAGIC;
147 }
148
149 /* Compute current heap usage and compare it with the maximum value. */
150 size_t heap
151 = catomic_exchange_and_add (&current_heap, len - old_len) + len - old_len;
152 catomic_max (&peak_heap, heap);
153
154 /* Compute current stack usage and compare it with the maximum
155 value. The base stack pointer might not be set if this is not
156 the main thread and it is the first call to any of these
157 functions. */
158 if (__glibc_unlikely (!start_sp))
159 start_sp = __thread_stack_pointer ();
160
161 uintptr_t sp = __thread_stack_pointer ();
162#ifdef _STACK_GROWS_UP
163 /* This can happen in threads where we didn't catch the thread's
164 stack early enough. */
165 if (__glibc_unlikely (sp < start_sp))
166 start_sp = sp;
167 size_t current_stack = sp - start_sp;
168#else
169 /* This can happen in threads where we didn't catch the thread's
170 stack early enough. */
171 if (__glibc_unlikely (sp > start_sp))
172 start_sp = sp;
173 size_t current_stack = start_sp - sp;
174#endif
175 catomic_max (&peak_stack, current_stack);
176
177 /* Add up heap and stack usage and compare it with the maximum value. */
178 catomic_max (&peak_total, heap + current_stack);
179
180 /* Store the value only if we are writing to a file. */
181 if (fd != -1)
182 {
183 uint32_t idx = catomic_exchange_and_add (&buffer_cnt, 1);
184 if (idx + 1 >= 2 * buffer_size)
185 {
186 /* We try to reset the counter to the correct range. If
187 this fails because of another thread increasing the
188 counter it does not matter since that thread will take
189 care of the correction. */
190 uint32_t reset = (idx + 1) % (2 * buffer_size);
191 catomic_compare_and_exchange_val_acq (&buffer_cnt, reset, idx + 1);
192 if (idx >= 2 * buffer_size)
193 idx = reset - 1;
194 }
195 assert (idx < 2 * DEFAULT_BUFFER_SIZE);
196
197 buffer[idx].heap = current_heap;
198 buffer[idx].stack = current_stack;
199 gettime (e: &buffer[idx]);
200
201 /* Write out buffer if it is full. */
202 if (idx + 1 == buffer_size)
203 write (fd: fd, buf: buffer, n: buffer_size * sizeof (struct entry));
204 else if (idx + 1 == 2 * buffer_size)
205 write (fd: fd, buf: &buffer[buffer_size], n: buffer_size * sizeof (struct entry));
206 }
207}
208
209
210/* Interrupt handler. */
211static void
212int_handler (int signo)
213{
214 /* Nothing gets allocated. Just record the stack pointer position. */
215 update_data (NULL, len: 0, old_len: 0);
216}
217
218
219/* Find out whether this is the program we are supposed to profile.
220 For this the name in the variable `__progname' must match the one
221 given in the environment variable MEMUSAGE_PROG_NAME. If the variable
222 is not present every program assumes it should be profiling.
223
224 If this is the program open a file descriptor to the output file.
225 We will write to it whenever the buffer overflows. The name of the
226 output file is determined by the environment variable MEMUSAGE_OUTPUT.
227
228 If the environment variable MEMUSAGE_BUFFER_SIZE is set its numerical
229 value determines the size of the internal buffer. The number gives
230 the number of elements in the buffer. By setting the number to one
231 one effectively selects unbuffered operation.
232
233 If MEMUSAGE_NO_TIMER is not present an alarm handler is installed
234 which at the highest possible frequency records the stack pointer. */
235static void
236me (void)
237{
238 const char *env = getenv (name: "MEMUSAGE_PROG_NAME");
239 size_t prog_len = strlen (s: __progname);
240
241 initialized = -1;
242 mallocp = (void *(*)(size_t))dlsym (RTLD_NEXT, name: "malloc");
243 reallocp = (void *(*)(void *, size_t))dlsym (RTLD_NEXT, name: "realloc");
244 callocp = (void *(*)(size_t, size_t))dlsym (RTLD_NEXT, name: "calloc");
245 freep = (void (*)(void *))dlsym (RTLD_NEXT, name: "free");
246
247 mmapp = (void *(*)(void *, size_t, int, int, int, off_t))dlsym (RTLD_NEXT,
248 name: "mmap");
249 mmap64p =
250 (void *(*)(void *, size_t, int, int, int, off64_t))dlsym (RTLD_NEXT,
251 name: "mmap64");
252 mremapp = (void *(*)(void *, size_t, size_t, int, void *))dlsym (RTLD_NEXT,
253 name: "mremap");
254 munmapp = (int (*)(void *, size_t))dlsym (RTLD_NEXT, name: "munmap");
255 initialized = 1;
256
257 if (env != NULL)
258 {
259 /* Check for program name. */
260 size_t len = strlen (s: env);
261 if (len > prog_len || strcmp (s1: env, s2: &__progname[prog_len - len]) != 0
262 || (prog_len != len && __progname[prog_len - len - 1] != '/'))
263 not_me = true;
264 }
265
266 /* Only open the file if it's really us. */
267 if (!not_me && fd == -1)
268 {
269 const char *outname;
270
271 if (!start_sp)
272 start_sp = __thread_stack_pointer ();
273
274 outname = getenv (name: "MEMUSAGE_OUTPUT");
275 if (outname != NULL && outname[0] != '\0'
276 && (access (name: outname, R_OK | W_OK) == 0 || errno == ENOENT))
277 {
278 fd = creat64 (file: outname, mode: 0666);
279
280 if (fd == -1)
281 /* Don't do anything in future calls if we cannot write to
282 the output file. */
283 not_me = true;
284 else
285 {
286 /* Write the first entry. */
287 first.heap = 0;
288 first.stack = 0;
289 gettime (e: &first);
290 /* Write it two times since we need the starting and end time. */
291 write (fd: fd, buf: &first, n: sizeof (first));
292 write (fd: fd, buf: &first, n: sizeof (first));
293
294 /* Determine the buffer size. We use the default if the
295 environment variable is not present. */
296 buffer_size = DEFAULT_BUFFER_SIZE;
297 const char *str_buffer_size = getenv (name: "MEMUSAGE_BUFFER_SIZE");
298 if (str_buffer_size != NULL)
299 {
300 buffer_size = atoi (nptr: str_buffer_size);
301 if (buffer_size == 0 || buffer_size > DEFAULT_BUFFER_SIZE)
302 buffer_size = DEFAULT_BUFFER_SIZE;
303 }
304
305 /* Possibly enable timer-based stack pointer retrieval. */
306 if (getenv (name: "MEMUSAGE_NO_TIMER") == NULL)
307 {
308 struct sigaction act;
309
310 act.sa_handler = (sighandler_t) &int_handler;
311 act.sa_flags = SA_RESTART;
312 sigfillset (set: &act.sa_mask);
313
314 if (sigaction (SIGPROF, act: &act, NULL) >= 0)
315 {
316 struct itimerval timer;
317
318 timer.it_value.tv_sec = 0;
319 timer.it_value.tv_usec = 1;
320 timer.it_interval = timer.it_value;
321 setitimer (ITIMER_PROF, new: &timer, NULL);
322 }
323 }
324 }
325 }
326
327 if (!not_me && getenv (name: "MEMUSAGE_TRACE_MMAP") != NULL)
328 trace_mmap = true;
329 }
330}
331
332
333/* Record the initial stack position. */
334static void
335__attribute__ ((constructor))
336init (void)
337{
338 start_sp = __thread_stack_pointer ();
339 if (!initialized)
340 me ();
341}
342
343
344/* `malloc' replacement. We keep track of the memory usage if this is the
345 correct program. */
346void *
347malloc (size_t len)
348{
349 struct header *result = NULL;
350
351 /* Determine real implementation if not already happened. */
352 if (__glibc_unlikely (initialized <= 0))
353 {
354 if (initialized == -1)
355 return NULL;
356
357 me ();
358 }
359
360 /* If this is not the correct program just use the normal function. */
361 if (not_me)
362 return (*mallocp)(len);
363
364 /* Keep track of number of calls. */
365 catomic_increment (&calls[idx_malloc]);
366 /* Keep track of total memory consumption for `malloc'. */
367 catomic_add (&total[idx_malloc], len);
368 /* Keep track of total memory requirement. */
369 catomic_add (&grand_total, len);
370 /* Remember the size of the request. */
371 if (len < 65536)
372 catomic_increment (&histogram[len / 16]);
373 else
374 catomic_increment (&large);
375 /* Total number of calls of any of the functions. */
376 catomic_increment (&calls_total);
377
378 /* Do the real work. */
379 result = (struct header *) (*mallocp)(len + sizeof (struct header));
380 if (result == NULL)
381 {
382 catomic_increment (&failed[idx_malloc]);
383 return NULL;
384 }
385
386 /* Update the allocation data and write out the records if necessary. */
387 update_data (result, len, old_len: 0);
388
389 /* Return the pointer to the user buffer. */
390 return (void *) (result + 1);
391}
392
393
394/* `realloc' replacement. We keep track of the memory usage if this is the
395 correct program. */
396void *
397realloc (void *old, size_t len)
398{
399 struct header *result = NULL;
400 struct header *real;
401 size_t old_len;
402
403 /* Determine real implementation if not already happened. */
404 if (__glibc_unlikely (initialized <= 0))
405 {
406 if (initialized == -1)
407 return NULL;
408
409 me ();
410 }
411
412 /* If this is not the correct program just use the normal function. */
413 if (not_me)
414 return (*reallocp)(old, len);
415
416 if (old == NULL)
417 {
418 /* This is really a `malloc' call. */
419 real = NULL;
420 old_len = 0;
421 }
422 else
423 {
424 real = ((struct header *) old) - 1;
425 if (real->magic != MAGIC)
426 /* This is no memory allocated here. */
427 return (*reallocp)(old, len);
428
429 old_len = real->length;
430 }
431
432 /* Keep track of number of calls. */
433 catomic_increment (&calls[idx_realloc]);
434 if (len > old_len)
435 {
436 /* Keep track of total memory consumption for `realloc'. */
437 catomic_add (&total[idx_realloc], len - old_len);
438 /* Keep track of total memory requirement. */
439 catomic_add (&grand_total, len - old_len);
440 }
441
442 if (len == 0 && old != NULL)
443 {
444 /* Special case. */
445 catomic_increment (&realloc_free);
446 /* Keep track of total memory freed using `free'. */
447 catomic_add (&total[idx_free], real->length);
448
449 /* Update the allocation data and write out the records if necessary. */
450 update_data (NULL, len: 0, old_len);
451
452 /* Do the real work. */
453 (*freep) (real);
454
455 return NULL;
456 }
457
458 /* Remember the size of the request. */
459 if (len < 65536)
460 catomic_increment (&histogram[len / 16]);
461 else
462 catomic_increment (&large);
463 /* Total number of calls of any of the functions. */
464 catomic_increment (&calls_total);
465
466 /* Do the real work. */
467 result = (struct header *) (*reallocp)(real, len + sizeof (struct header));
468 if (result == NULL)
469 {
470 catomic_increment (&failed[idx_realloc]);
471 return NULL;
472 }
473
474 /* Record whether the reduction/increase happened in place. */
475 if (real == result)
476 catomic_increment (&inplace);
477 /* Was the buffer increased? */
478 if (old_len > len)
479 catomic_increment (&decreasing);
480
481 /* Update the allocation data and write out the records if necessary. */
482 update_data (result, len, old_len);
483
484 /* Return the pointer to the user buffer. */
485 return (void *) (result + 1);
486}
487
488
489/* `calloc' replacement. We keep track of the memory usage if this is the
490 correct program. */
491void *
492calloc (size_t n, size_t len)
493{
494 struct header *result;
495 size_t size = n * len;
496
497 /* Determine real implementation if not already happened. */
498 if (__glibc_unlikely (initialized <= 0))
499 {
500 if (initialized == -1)
501 return NULL;
502
503 me ();
504 }
505
506 /* If this is not the correct program just use the normal function. */
507 if (not_me)
508 return (*callocp)(n, len);
509
510 /* Keep track of number of calls. */
511 catomic_increment (&calls[idx_calloc]);
512 /* Keep track of total memory consumption for `calloc'. */
513 catomic_add (&total[idx_calloc], size);
514 /* Keep track of total memory requirement. */
515 catomic_add (&grand_total, size);
516 /* Remember the size of the request. */
517 if (size < 65536)
518 catomic_increment (&histogram[size / 16]);
519 else
520 catomic_increment (&large);
521 /* Total number of calls of any of the functions. */
522 ++calls_total;
523
524 /* Do the real work. */
525 result = (struct header *) (*mallocp)(size + sizeof (struct header));
526 if (result == NULL)
527 {
528 catomic_increment (&failed[idx_calloc]);
529 return NULL;
530 }
531
532 /* Update the allocation data and write out the records if necessary. */
533 update_data (result, len: size, old_len: 0);
534
535 /* Do what `calloc' would have done and return the buffer to the caller. */
536 return memset (s: result + 1, c: '\0', n: size);
537}
538
539
540/* `free' replacement. We keep track of the memory usage if this is the
541 correct program. */
542void
543free (void *ptr)
544{
545 struct header *real;
546
547 /* Determine real implementation if not already happened. */
548 if (__glibc_unlikely (initialized <= 0))
549 {
550 if (initialized == -1)
551 return;
552
553 me ();
554 }
555
556 /* If this is not the correct program just use the normal function. */
557 if (not_me)
558 {
559 (*freep) (ptr);
560 return;
561 }
562
563 /* `free (NULL)' has no effect. */
564 if (ptr == NULL)
565 {
566 catomic_increment (&calls[idx_free]);
567 return;
568 }
569
570 /* Determine the pointer to the header. */
571 real = ((struct header *) ptr) - 1;
572 if (real->magic != MAGIC)
573 {
574 /* This block wasn't allocated here. */
575 (*freep) (ptr);
576 return;
577 }
578
579 /* Keep track of number of calls. */
580 catomic_increment (&calls[idx_free]);
581 /* Keep track of total memory freed using `free'. */
582 catomic_add (&total[idx_free], real->length);
583
584 /* Update the allocation data and write out the records if necessary. */
585 update_data (NULL, len: 0, old_len: real->length);
586
587 /* Do the real work. */
588 (*freep) (real);
589}
590
591
592/* `mmap' replacement. We do not have to keep track of the size since
593 `munmap' will get it as a parameter. */
594void *
595mmap (void *start, size_t len, int prot, int flags, int fd, off_t offset)
596{
597 void *result = NULL;
598
599 /* Determine real implementation if not already happened. */
600 if (__glibc_unlikely (initialized <= 0))
601 {
602 if (initialized == -1)
603 return NULL;
604
605 me ();
606 }
607
608 /* Always get a block. We don't need extra memory. */
609 result = (*mmapp)(start, len, prot, flags, fd, offset);
610
611 if (!not_me && trace_mmap)
612 {
613 int idx = (flags & MAP_ANON
614 ? idx_mmap_a : prot & PROT_WRITE ? idx_mmap_w : idx_mmap_r);
615
616 /* Keep track of number of calls. */
617 catomic_increment (&calls[idx]);
618 /* Keep track of total memory consumption for `malloc'. */
619 catomic_add (&total[idx], len);
620 /* Keep track of total memory requirement. */
621 catomic_add (&grand_total, len);
622 /* Remember the size of the request. */
623 if (len < 65536)
624 catomic_increment (&histogram[len / 16]);
625 else
626 catomic_increment (&large);
627 /* Total number of calls of any of the functions. */
628 catomic_increment (&calls_total);
629
630 /* Check for failures. */
631 if (result == NULL)
632 catomic_increment (&failed[idx]);
633 else if (idx == idx_mmap_w)
634 /* Update the allocation data and write out the records if
635 necessary. Note the first parameter is NULL which means
636 the size is not tracked. */
637 update_data (NULL, len, old_len: 0);
638 }
639
640 /* Return the pointer to the user buffer. */
641 return result;
642}
643
644
645/* `mmap64' replacement. We do not have to keep track of the size since
646 `munmap' will get it as a parameter. */
647void *
648mmap64 (void *start, size_t len, int prot, int flags, int fd, off64_t offset)
649{
650 void *result = NULL;
651
652 /* Determine real implementation if not already happened. */
653 if (__glibc_unlikely (initialized <= 0))
654 {
655 if (initialized == -1)
656 return NULL;
657
658 me ();
659 }
660
661 /* Always get a block. We don't need extra memory. */
662 result = (*mmap64p)(start, len, prot, flags, fd, offset);
663
664 if (!not_me && trace_mmap)
665 {
666 int idx = (flags & MAP_ANON
667 ? idx_mmap_a : prot & PROT_WRITE ? idx_mmap_w : idx_mmap_r);
668
669 /* Keep track of number of calls. */
670 catomic_increment (&calls[idx]);
671 /* Keep track of total memory consumption for `malloc'. */
672 catomic_add (&total[idx], len);
673 /* Keep track of total memory requirement. */
674 catomic_add (&grand_total, len);
675 /* Remember the size of the request. */
676 if (len < 65536)
677 catomic_increment (&histogram[len / 16]);
678 else
679 catomic_increment (&large);
680 /* Total number of calls of any of the functions. */
681 catomic_increment (&calls_total);
682
683 /* Check for failures. */
684 if (result == NULL)
685 catomic_increment (&failed[idx]);
686 else if (idx == idx_mmap_w)
687 /* Update the allocation data and write out the records if
688 necessary. Note the first parameter is NULL which means
689 the size is not tracked. */
690 update_data (NULL, len, old_len: 0);
691 }
692
693 /* Return the pointer to the user buffer. */
694 return result;
695}
696
697
698/* `mremap' replacement. We do not have to keep track of the size since
699 `munmap' will get it as a parameter. */
700void *
701mremap (void *start, size_t old_len, size_t len, int flags, ...)
702{
703 void *result = NULL;
704 va_list ap;
705
706 va_start (ap, flags);
707 void *newaddr = (flags & MREMAP_FIXED) ? va_arg (ap, void *) : NULL;
708 va_end (ap);
709
710 /* Determine real implementation if not already happened. */
711 if (__glibc_unlikely (initialized <= 0))
712 {
713 if (initialized == -1)
714 return NULL;
715
716 me ();
717 }
718
719 /* Always get a block. We don't need extra memory. */
720 result = (*mremapp)(start, old_len, len, flags, newaddr);
721
722 if (!not_me && trace_mmap)
723 {
724 /* Keep track of number of calls. */
725 catomic_increment (&calls[idx_mremap]);
726 if (len > old_len)
727 {
728 /* Keep track of total memory consumption for `malloc'. */
729 catomic_add (&total[idx_mremap], len - old_len);
730 /* Keep track of total memory requirement. */
731 catomic_add (&grand_total, len - old_len);
732 }
733 /* Remember the size of the request. */
734 if (len < 65536)
735 catomic_increment (&histogram[len / 16]);
736 else
737 catomic_increment (&large);
738 /* Total number of calls of any of the functions. */
739 catomic_increment (&calls_total);
740
741 /* Check for failures. */
742 if (result == NULL)
743 catomic_increment (&failed[idx_mremap]);
744 else
745 {
746 /* Record whether the reduction/increase happened in place. */
747 if (start == result)
748 catomic_increment (&inplace_mremap);
749 /* Was the buffer increased? */
750 if (old_len > len)
751 catomic_increment (&decreasing_mremap);
752
753 /* Update the allocation data and write out the records if
754 necessary. Note the first parameter is NULL which means
755 the size is not tracked. */
756 update_data (NULL, len, old_len);
757 }
758 }
759
760 /* Return the pointer to the user buffer. */
761 return result;
762}
763
764
765/* `munmap' replacement. */
766int
767munmap (void *start, size_t len)
768{
769 int result;
770
771 /* Determine real implementation if not already happened. */
772 if (__glibc_unlikely (initialized <= 0))
773 {
774 if (initialized == -1)
775 return -1;
776
777 me ();
778 }
779
780 /* Do the real work. */
781 result = (*munmapp)(start, len);
782
783 if (!not_me && trace_mmap)
784 {
785 /* Keep track of number of calls. */
786 catomic_increment (&calls[idx_munmap]);
787
788 if (__glibc_likely (result == 0))
789 {
790 /* Keep track of total memory freed using `free'. */
791 catomic_add (&total[idx_munmap], len);
792
793 /* Update the allocation data and write out the records if
794 necessary. */
795 update_data (NULL, len: 0, old_len: len);
796 }
797 else
798 catomic_increment (&failed[idx_munmap]);
799 }
800
801 return result;
802}
803
804
805/* Write some statistics to standard error. */
806static void
807__attribute__ ((destructor))
808dest (void)
809{
810 int percent, cnt;
811 unsigned long int maxcalls;
812
813 /* If we haven't done anything here just return. */
814 if (not_me)
815 return;
816
817 /* If we should call any of the memory functions don't do any profiling. */
818 not_me = true;
819
820 /* Finish the output file. */
821 if (fd != -1)
822 {
823 /* Write the partially filled buffer. */
824 if (buffer_cnt > buffer_size)
825 write (fd: fd, buf: buffer + buffer_size,
826 n: (buffer_cnt - buffer_size) * sizeof (struct entry));
827 else
828 write (fd: fd, buf: buffer, n: buffer_cnt * sizeof (struct entry));
829
830 /* Go back to the beginning of the file. We allocated two records
831 here when we opened the file. */
832 lseek (fd: fd, offset: 0, SEEK_SET);
833 /* Write out a record containing the total size. */
834 first.stack = peak_total;
835 write (fd: fd, buf: &first, n: sizeof (struct entry));
836 /* Write out another record containing the maximum for heap and
837 stack. */
838 first.heap = peak_heap;
839 first.stack = peak_stack;
840 gettime (e: &first);
841 write (fd: fd, buf: &first, n: sizeof (struct entry));
842
843 /* Close the file. */
844 close (fd: fd);
845 fd = -1;
846 }
847
848 /* Write a colorful statistic. */
849 fprintf (stderr, format: "\n\
850\e[01;32mMemory usage summary:\e[0;0m heap total: %llu, heap peak: %lu, stack peak: %lu\n\
851\e[04;34m total calls total memory failed calls\e[0m\n\
852\e[00;34m malloc|\e[0m %10lu %12llu %s%12lu\e[00;00m\n\
853\e[00;34mrealloc|\e[0m %10lu %12llu %s%12lu\e[00;00m (nomove:%ld, dec:%ld, free:%ld)\n\
854\e[00;34m calloc|\e[0m %10lu %12llu %s%12lu\e[00;00m\n\
855\e[00;34m free|\e[0m %10lu %12llu\n",
856 (unsigned long long int) grand_total, (unsigned long int) peak_heap,
857 (unsigned long int) peak_stack,
858 (unsigned long int) calls[idx_malloc],
859 (unsigned long long int) total[idx_malloc],
860 failed[idx_malloc] ? "\e[01;41m" : "",
861 (unsigned long int) failed[idx_malloc],
862 (unsigned long int) calls[idx_realloc],
863 (unsigned long long int) total[idx_realloc],
864 failed[idx_realloc] ? "\e[01;41m" : "",
865 (unsigned long int) failed[idx_realloc],
866 (unsigned long int) inplace,
867 (unsigned long int) decreasing,
868 (unsigned long int) realloc_free,
869 (unsigned long int) calls[idx_calloc],
870 (unsigned long long int) total[idx_calloc],
871 failed[idx_calloc] ? "\e[01;41m" : "",
872 (unsigned long int) failed[idx_calloc],
873 (unsigned long int) calls[idx_free],
874 (unsigned long long int) total[idx_free]);
875
876 if (trace_mmap)
877 fprintf (stderr, format: "\
878\e[00;34mmmap(r)|\e[0m %10lu %12llu %s%12lu\e[00;00m\n\
879\e[00;34mmmap(w)|\e[0m %10lu %12llu %s%12lu\e[00;00m\n\
880\e[00;34mmmap(a)|\e[0m %10lu %12llu %s%12lu\e[00;00m\n\
881\e[00;34m mremap|\e[0m %10lu %12llu %s%12lu\e[00;00m (nomove: %ld, dec:%ld)\n\
882\e[00;34m munmap|\e[0m %10lu %12llu %s%12lu\e[00;00m\n",
883 (unsigned long int) calls[idx_mmap_r],
884 (unsigned long long int) total[idx_mmap_r],
885 failed[idx_mmap_r] ? "\e[01;41m" : "",
886 (unsigned long int) failed[idx_mmap_r],
887 (unsigned long int) calls[idx_mmap_w],
888 (unsigned long long int) total[idx_mmap_w],
889 failed[idx_mmap_w] ? "\e[01;41m" : "",
890 (unsigned long int) failed[idx_mmap_w],
891 (unsigned long int) calls[idx_mmap_a],
892 (unsigned long long int) total[idx_mmap_a],
893 failed[idx_mmap_a] ? "\e[01;41m" : "",
894 (unsigned long int) failed[idx_mmap_a],
895 (unsigned long int) calls[idx_mremap],
896 (unsigned long long int) total[idx_mremap],
897 failed[idx_mremap] ? "\e[01;41m" : "",
898 (unsigned long int) failed[idx_mremap],
899 (unsigned long int) inplace_mremap,
900 (unsigned long int) decreasing_mremap,
901 (unsigned long int) calls[idx_munmap],
902 (unsigned long long int) total[idx_munmap],
903 failed[idx_munmap] ? "\e[01;41m" : "",
904 (unsigned long int) failed[idx_munmap]);
905
906 /* Write out a histoogram of the sizes of the allocations. */
907 fprintf (stderr, format: "\e[01;32mHistogram for block sizes:\e[0;0m\n");
908
909 /* Determine the maximum of all calls for each size range. */
910 maxcalls = large;
911 for (cnt = 0; cnt < 65536; cnt += 16)
912 if (histogram[cnt / 16] > maxcalls)
913 maxcalls = histogram[cnt / 16];
914
915 for (cnt = 0; cnt < 65536; cnt += 16)
916 /* Only write out the nonzero entries. */
917 if (histogram[cnt / 16] != 0)
918 {
919 percent = (histogram[cnt / 16] * 100) / calls_total;
920 fprintf (stderr, format: "%5d-%-5d%12lu ", cnt, cnt + 15,
921 (unsigned long int) histogram[cnt / 16]);
922 if (percent == 0)
923 fputs (s: " <1% \e[41;37m", stderr);
924 else
925 fprintf (stderr, format: "%3d%% \e[41;37m", percent);
926
927 /* Draw a bar with a length corresponding to the current
928 percentage. */
929 percent = (histogram[cnt / 16] * 50) / maxcalls;
930 while (percent-- > 0)
931 fputc (c: '=', stderr);
932 fputs (s: "\e[0;0m\n", stderr);
933 }
934
935 if (large != 0)
936 {
937 percent = (large * 100) / calls_total;
938 fprintf (stderr, format: " large %12lu ", (unsigned long int) large);
939 if (percent == 0)
940 fputs (s: " <1% \e[41;37m", stderr);
941 else
942 fprintf (stderr, format: "%3d%% \e[41;37m", percent);
943 percent = (large * 50) / maxcalls;
944 while (percent-- > 0)
945 fputc (c: '=', stderr);
946 fputs (s: "\e[0;0m\n", stderr);
947 }
948
949 /* Any following malloc/free etc. calls should generate statistics again,
950 because otherwise freeing something that has been malloced before
951 this destructor (including struct header in front of it) wouldn't
952 be properly freed. */
953 not_me = false;
954}
955

source code of glibc/malloc/memusage.c