• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //--------------------------------------------------------------------*/
2 //--- Massif: a heap profiling tool.                     ms_main.c ---*/
3 //--------------------------------------------------------------------*/
4 
5 /*
6    This file is part of Massif, a Valgrind tool for profiling memory
7    usage of programs.
8 
9    Copyright (C) 2003-2017 Nicholas Nethercote
10       njn@valgrind.org
11 
12    This program is free software; you can redistribute it and/or
13    modify it under the terms of the GNU General Public License as
14    published by the Free Software Foundation; either version 2 of the
15    License, or (at your option) any later version.
16 
17    This program is distributed in the hope that it will be useful, but
18    WITHOUT ANY WARRANTY; without even the implied warranty of
19    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20    General Public License for more details.
21 
22    You should have received a copy of the GNU General Public License
23    along with this program; if not, write to the Free Software
24    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
25    02111-1307, USA.
26 
27    The GNU General Public License is contained in the file COPYING.
28 */
29 
30 //---------------------------------------------------------------------------
31 // XXX:
32 //---------------------------------------------------------------------------
33 // Todo -- nice, but less critical:
34 // - do a graph-drawing test
35 // - make file format more generic.  Obstacles:
36 //   - unit prefixes are not generic
37 //   - preset column widths for stats are not generic
38 //   - preset column headers are not generic
39 //   - "Massif arguments:" line is not generic
40 // - do snapshots on some specific client requests
41 //     - "show me the extra allocations since the last snapshot"
42 //     - "start/stop logging" (eg. quickly skip boring bits)
43 // - Add ability to draw multiple graphs, eg. heap-only, stack-only, total.
44 //   Give each graph a title.  (try to do it generically!)
45 // - make --show-below-main=no work
46 // - Options like --alloc-fn='operator new(unsigned, std::nothrow_t const&)'
47 //   don't work in a .valgrindrc file or in $VALGRIND_OPTS.
48 //   m_commandline.c:add_args_from_string() needs to respect single quotes.
49 // - With --stack=yes, want to add a stack trace for detailed snapshots so
50 //   it's clear where/why the peak is occurring. (Mattieu Castet)  Also,
51 //   possibly useful even with --stack=no? (Andi Yin)
52 //
53 // Performance:
54 // - To run the benchmarks:
55 //
56 //     perl perf/vg_perf --tools=massif --reps=3 perf/{heap,tinycc} massif
57 //     time valgrind --tool=massif --depth=100 konqueror
58 //
59 //   The other benchmarks don't do much allocation, and so give similar speeds
60 //   to Nulgrind.
61 //
62 //   Timing results on 'nevermore' (njn's machine) as of r7013:
63 //
64 //     heap      0.53s  ma:12.4s (23.5x, -----)
65 //     tinycc    0.46s  ma: 4.9s (10.7x, -----)
66 //     many-xpts 0.08s  ma: 2.0s (25.0x, -----)
67 //     konqueror 29.6s real  0:21.0s user
68 //
69 //   [Introduction of --time-unit=i as the default slowed things down by
70 //   roughly 0--20%.]
71 //
72 // Todo -- low priority:
73 // - In each XPt, record both bytes and the number of allocations, and
74 //   possibly the global number of allocations.
75 // - (Andy Lin) Give a stack trace on detailed snapshots?
76 // - (Artur Wisz) add a feature to Massif to ignore any heap blocks larger
77 //   than a certain size!  Because: "linux's malloc allows to set a
78 //   MMAP_THRESHOLD value, so we set it to 4096 - all blocks above that will
79 //   be handled directly by the kernel, and are guaranteed to be returned to
80 //   the system when freed. So we needed to profile only blocks below this
81 //   limit."
82 //
83 // File format working notes:
84 
85 #if 0
86 desc: --heap-admin=foo
87 cmd: date
88 time_unit: ms
89 #-----------
90 snapshot=0
91 #-----------
92 time=0
93 mem_heap_B=0
94 mem_heap_admin_B=0
95 mem_stacks_B=0
96 heap_tree=empty
97 #-----------
98 snapshot=1
99 #-----------
100 time=353
101 mem_heap_B=5
102 mem_heap_admin_B=0
103 mem_stacks_B=0
104 heap_tree=detailed
105 n1: 5 (heap allocation functions) malloc/new/new[], --alloc-fns, etc.
106  n1: 5 0x27F6E0: _nl_normalize_codeset (in /lib/libc-2.3.5.so)
107   n1: 5 0x279DE6: _nl_load_locale_from_archive (in /lib/libc-2.3.5.so)
108    n1: 5 0x278E97: _nl_find_locale (in /lib/libc-2.3.5.so)
109     n1: 5 0x278871: setlocale (in /lib/libc-2.3.5.so)
110      n1: 5 0x8049821: (within /bin/date)
111       n0: 5 0x26ED5E: (below main) (in /lib/libc-2.3.5.so)
112 
113 
114 n_events: n  time(ms)  total(B)    useful-heap(B)  admin-heap(B)  stacks(B)
115 t_events: B
116 n 0 0 0 0 0
117 n 0 0 0 0 0
118 t1: 5 <string...>
119  t1: 6 <string...>
120 
121 Ideas:
122 - each snapshot specifies an x-axis value and one or more y-axis values.
123 - can display the y-axis values separately if you like
124 - can completely separate connection between snapshots and trees.
125 
126 Challenges:
127 - how to specify and scale/abbreviate units on axes?
128 - how to combine multiple values into the y-axis?
129 
130 --------------------------------------------------------------------------------Command:            date
131 Massif arguments:   --heap-admin=foo
132 ms_print arguments: massif.out
133 --------------------------------------------------------------------------------
134     KB
135 6.472^                                                       :#
136      |                                                       :#  ::  .    .
137      ...
138      |                                     ::@  :@    :@ :@:::#  ::  :    ::::
139    0 +-----------------------------------@---@---@-----@--@---#-------------->ms     0                                                                     713
140 
141 Number of snapshots: 50
142  Detailed snapshots: [2, 11, 13, 19, 25, 32 (peak)]
143 --------------------------------------------------------------------------------  n       time(ms)         total(B)   useful-heap(B) admin-heap(B)    stacks(B)
144 --------------------------------------------------------------------------------  0              0                0                0             0            0
145   1            345                5                5             0            0
146   2            353                5                5             0            0
147 100.00% (5B) (heap allocation functions) malloc/new/new[], --alloc-fns, etc.
148 ->100.00% (5B) 0x27F6E0: _nl_normalize_codeset (in /lib/libc-2.3.5.so)
149 #endif
150 
151 //---------------------------------------------------------------------------
152 
153 #include "pub_tool_basics.h"
154 #include "pub_tool_vki.h"
155 #include "pub_tool_aspacemgr.h"
156 #include "pub_tool_debuginfo.h"
157 #include "pub_tool_hashtable.h"
158 #include "pub_tool_libcbase.h"
159 #include "pub_tool_libcassert.h"
160 #include "pub_tool_libcfile.h"
161 #include "pub_tool_libcprint.h"
162 #include "pub_tool_libcproc.h"
163 #include "pub_tool_machine.h"
164 #include "pub_tool_mallocfree.h"
165 #include "pub_tool_options.h"
166 #include "pub_tool_poolalloc.h"
167 #include "pub_tool_replacemalloc.h"
168 #include "pub_tool_stacktrace.h"
169 #include "pub_tool_threadstate.h"
170 #include "pub_tool_tooliface.h"
171 #include "pub_tool_xarray.h"
172 #include "pub_tool_xtree.h"
173 #include "pub_tool_xtmemory.h"
174 #include "pub_tool_clientstate.h"
175 #include "pub_tool_gdbserver.h"
176 
177 #include "pub_tool_clreq.h"           // For {MALLOC,FREE}LIKE_BLOCK
178 
179 //------------------------------------------------------------*/
180 //--- Overview of operation                                ---*/
181 //------------------------------------------------------------*/
182 
183 // The size of the stacks and heap is tracked.  The heap is tracked in a lot
184 // of detail, enough to tell how many bytes each line of code is responsible
185 // for, more or less.  The main data structure is an xtree maintaining the
186 // call tree beneath all the allocation functions like malloc().
187 // (Alternatively, if --pages-as-heap=yes is specified, memory is tracked at
188 // the page level, and each page is treated much like a heap block.  We use
189 // "heap" throughout below to cover this case because the concepts are all the
190 // same.)
191 //
192 // "Snapshots" are recordings of the memory usage.  There are two basic
193 // kinds:
194 // - Normal:  these record the current time, total memory size, total heap
195 //   size, heap admin size and stack size.
196 // - Detailed: these record those things in a normal snapshot, plus a very
197 //   detailed XTree (see below) indicating how the heap is structured.
198 //
199 // Snapshots are taken every so often.  There are two storage classes of
200 // snapshots:
201 // - Temporary:  Massif does a temporary snapshot every so often.  The idea
202 //   is to always have a certain number of temporary snapshots around.  So
203 //   we take them frequently to begin with, but decreasingly often as the
204 //   program continues to run.  Also, we remove some old ones after a while.
205 //   Overall it's a kind of exponential decay thing.  Most of these are
206 //   normal snapshots, a small fraction are detailed snapshots.
207 // - Permanent:  Massif takes a permanent (detailed) snapshot in some
208 //   circumstances.  They are:
209 //   - Peak snapshot:  When the memory usage peak is reached, it takes a
210 //     snapshot.  It keeps this, unless the peak is subsequently exceeded,
211 //     in which case it will overwrite the peak snapshot.
212 //   - User-requested snapshots:  These are done in response to client
213 //     requests.  They are always kept.
214 
215 // Used for printing things when clo_verbosity > 1.
216 #define VERB(verb, format, args...) \
217    if (UNLIKELY(VG_(clo_verbosity) > verb)) { \
218       VG_(dmsg)("Massif: " format, ##args);   \
219    }
220 
221 //------------------------------------------------------------//
222 //--- Statistics                                           ---//
223 //------------------------------------------------------------//
224 
225 // Konqueror startup, to give an idea of the numbers involved with a biggish
226 // program, with default depth:
227 //
228 //  depth=3                   depth=40
229 //  - 310,000 allocations
230 //  - 300,000 frees
231 //  -  15,000 XPts            800,000 XPts
232 //  -   1,800 top-XPts
233 
234 static UInt n_heap_allocs           = 0;
235 static UInt n_heap_reallocs         = 0;
236 static UInt n_heap_frees            = 0;
237 static UInt n_ignored_heap_allocs   = 0;
238 static UInt n_ignored_heap_frees    = 0;
239 static UInt n_ignored_heap_reallocs = 0;
240 static UInt n_stack_allocs          = 0;
241 static UInt n_stack_frees           = 0;
242 
243 static UInt n_skipped_snapshots     = 0;
244 static UInt n_real_snapshots        = 0;
245 static UInt n_detailed_snapshots    = 0;
246 static UInt n_peak_snapshots        = 0;
247 static UInt n_cullings              = 0;
248 
249 //------------------------------------------------------------//
250 //--- Globals                                              ---//
251 //------------------------------------------------------------//
252 
253 // Number of guest instructions executed so far.  Only used with
254 // --time-unit=i.
255 static Long guest_instrs_executed = 0;
256 
257 static SizeT heap_szB       = 0; // Live heap size
258 static SizeT heap_extra_szB = 0; // Live heap extra size -- slop + admin bytes
259 static SizeT stacks_szB     = 0; // Live stacks size
260 
261 // This is the total size from the current peak snapshot, or 0 if no peak
262 // snapshot has been taken yet.
263 static SizeT peak_snapshot_total_szB = 0;
264 
265 // Incremented every time memory is allocated/deallocated, by the
266 // allocated/deallocated amount;  includes heap, heap-admin and stack
267 // memory.  An alternative to milliseconds as a unit of program "time".
268 static ULong total_allocs_deallocs_szB = 0;
269 
270 // When running with --heap=yes --pages-as-heap=no, we don't start taking
271 // snapshots until the first basic block is executed, rather than doing it in
272 // ms_post_clo_init (which is the obvious spot), for two reasons.
273 // - It lets us ignore stack events prior to that, because they're not
274 //   really proper ones and just would screw things up.
275 // - Because there's still some core initialisation to do, and so there
276 //   would be an artificial time gap between the first and second snapshots.
277 //
278 // When running with --heap=yes --pages-as-heap=yes, snapshots start much
279 // earlier due to new_mem_startup so this isn't relevant.
280 //
281 static Bool have_started_executing_code = False;
282 
283 //------------------------------------------------------------//
284 //--- Alloc fns                                            ---//
285 //------------------------------------------------------------//
286 
287 static XArray* alloc_fns;
288 static XArray* ignore_fns;
289 
init_alloc_fns(void)290 static void init_alloc_fns(void)
291 {
292    // Create the list, and add the default elements.
293    alloc_fns = VG_(newXA)(VG_(malloc), "ms.main.iaf.1",
294                                        VG_(free), sizeof(HChar*));
295    #define DO(x)  { const HChar* s = x; VG_(addToXA)(alloc_fns, &s); }
296 
297    // Ordered roughly according to (presumed) frequency.
298    // Nb: The C++ "operator new*" ones are overloadable.  We include them
299    // always anyway, because even if they're overloaded, it would be a
300    // prodigiously stupid overloading that caused them to not allocate
301    // memory.
302    //
303    // XXX: because we don't look at the first stack entry (unless it's a
304    // custom allocation) there's not much point to having all these alloc
305    // functions here -- they should never appear anywhere (I think?) other
306    // than the top stack entry.  The only exceptions are those that in
307    // vg_replace_malloc.c are partly or fully implemented in terms of another
308    // alloc function: realloc (which uses malloc);  valloc,
309    // malloc_zone_valloc, posix_memalign and memalign_common (which use
310    // memalign).
311    //
312    DO("malloc"                                              );
313    DO("__builtin_new"                                       );
314    DO("operator new(unsigned)"                              );
315    DO("operator new(unsigned long)"                         );
316    DO("__builtin_vec_new"                                   );
317    DO("operator new[](unsigned)"                            );
318    DO("operator new[](unsigned long)"                       );
319    DO("calloc"                                              );
320    DO("realloc"                                             );
321    DO("memalign"                                            );
322    DO("posix_memalign"                                      );
323    DO("valloc"                                              );
324    DO("operator new(unsigned, std::nothrow_t const&)"       );
325    DO("operator new[](unsigned, std::nothrow_t const&)"     );
326    DO("operator new(unsigned long, std::nothrow_t const&)"  );
327    DO("operator new[](unsigned long, std::nothrow_t const&)");
328 #if defined(VGO_darwin)
329    DO("malloc_zone_malloc"                                  );
330    DO("malloc_zone_calloc"                                  );
331    DO("malloc_zone_realloc"                                 );
332    DO("malloc_zone_memalign"                                );
333    DO("malloc_zone_valloc"                                  );
334 #endif
335 }
336 
init_ignore_fns(void)337 static void init_ignore_fns(void)
338 {
339    // Create the (empty) list.
340    ignore_fns = VG_(newXA)(VG_(malloc), "ms.main.iif.1",
341                                         VG_(free), sizeof(HChar*));
342 }
343 
344 //------------------------------------------------------------//
345 //--- Command line args                                    ---//
346 //------------------------------------------------------------//
347 
348 #define MAX_DEPTH       200
349 
350 typedef enum { TimeI, TimeMS, TimeB } TimeUnit;
351 
TimeUnit_to_string(TimeUnit time_unit)352 static const HChar* TimeUnit_to_string(TimeUnit time_unit)
353 {
354    switch (time_unit) {
355    case TimeI:  return "i";
356    case TimeMS: return "ms";
357    case TimeB:  return "B";
358    default:     tl_assert2(0, "TimeUnit_to_string: unrecognised TimeUnit");
359    }
360 }
361 
362 static Bool   clo_heap            = True;
363    // clo_heap_admin is deliberately a word-sized type.  At one point it was
364    // a UInt, but this caused problems on 64-bit machines when it was
365    // multiplied by a small negative number and then promoted to a
366    // word-sized type -- it ended up with a value of 4.2 billion.  Sigh.
367 static SSizeT clo_heap_admin      = 8;
368 static Bool   clo_pages_as_heap   = False;
369 static Bool   clo_stacks          = False;
370 static Int    clo_depth           = 30;
371 static double clo_threshold       = 1.0;  // percentage
372 static double clo_peak_inaccuracy = 1.0;  // percentage
373 static Int    clo_time_unit       = TimeI;
374 static Int    clo_detailed_freq   = 10;
375 static Int    clo_max_snapshots   = 100;
376 static const HChar* clo_massif_out_file = "massif.out.%p";
377 
378 static XArray* args_for_massif;
379 
ms_process_cmd_line_option(const HChar * arg)380 static Bool ms_process_cmd_line_option(const HChar* arg)
381 {
382    const HChar* tmp_str;
383 
384    // Remember the arg for later use.
385    VG_(addToXA)(args_for_massif, &arg);
386 
387         if VG_BOOL_CLO(arg, "--heap",           clo_heap)   {}
388    else if VG_BINT_CLO(arg, "--heap-admin",     clo_heap_admin, 0, 1024) {}
389 
390    else if VG_BOOL_CLO(arg, "--stacks",         clo_stacks) {}
391 
392    else if VG_BOOL_CLO(arg, "--pages-as-heap",  clo_pages_as_heap) {}
393 
394    else if VG_BINT_CLO(arg, "--depth",          clo_depth, 1, MAX_DEPTH) {}
395 
396    else if VG_STR_CLO(arg, "--alloc-fn",        tmp_str) {
397       VG_(addToXA)(alloc_fns, &tmp_str);
398    }
399    else if VG_STR_CLO(arg, "--ignore-fn",       tmp_str) {
400       VG_(addToXA)(ignore_fns, &tmp_str);
401    }
402 
403    else if VG_DBL_CLO(arg, "--threshold",  clo_threshold) {
404       if (clo_threshold < 0 || clo_threshold > 100) {
405          VG_(fmsg_bad_option)(arg,
406             "--threshold must be between 0.0 and 100.0\n");
407       }
408    }
409 
410    else if VG_DBL_CLO(arg, "--peak-inaccuracy", clo_peak_inaccuracy) {}
411 
412    else if VG_XACT_CLO(arg, "--time-unit=i",    clo_time_unit, TimeI)  {}
413    else if VG_XACT_CLO(arg, "--time-unit=ms",   clo_time_unit, TimeMS) {}
414    else if VG_XACT_CLO(arg, "--time-unit=B",    clo_time_unit, TimeB)  {}
415 
416    else if VG_BINT_CLO(arg, "--detailed-freq",  clo_detailed_freq, 1, 1000000) {}
417 
418    else if VG_BINT_CLO(arg, "--max-snapshots",  clo_max_snapshots, 10, 1000) {}
419 
420    else if VG_STR_CLO(arg, "--massif-out-file", clo_massif_out_file) {}
421 
422    else
423       return VG_(replacement_malloc_process_cmd_line_option)(arg);
424 
425    return True;
426 }
427 
ms_print_usage(void)428 static void ms_print_usage(void)
429 {
430    VG_(printf)(
431 "    --heap=no|yes             profile heap blocks [yes]\n"
432 "    --heap-admin=<size>       average admin bytes per heap block;\n"
433 "                               ignored if --heap=no [8]\n"
434 "    --stacks=no|yes           profile stack(s) [no]\n"
435 "    --pages-as-heap=no|yes    profile memory at the page level [no]\n"
436 "    --depth=<number>          depth of contexts [30]\n"
437 "    --alloc-fn=<name>         specify <name> as an alloc function [empty]\n"
438 "    --ignore-fn=<name>        ignore heap allocations within <name> [empty]\n"
439 "    --threshold=<m.n>         significance threshold, as a percentage [1.0]\n"
440 "    --peak-inaccuracy=<m.n>   maximum peak inaccuracy, as a percentage [1.0]\n"
441 "    --time-unit=i|ms|B        time unit: instructions executed, milliseconds\n"
442 "                              or heap bytes alloc'd/dealloc'd [i]\n"
443 "    --detailed-freq=<N>       every Nth snapshot should be detailed [10]\n"
444 "    --max-snapshots=<N>       maximum number of snapshots recorded [100]\n"
445 "    --massif-out-file=<file>  output file name [massif.out.%%p]\n"
446    );
447 }
448 
ms_print_debug_usage(void)449 static void ms_print_debug_usage(void)
450 {
451    VG_(printf)(
452 "    (none)\n"
453    );
454 }
455 
456 
457 //------------------------------------------------------------//
458 //--- XTrees                                               ---//
459 //------------------------------------------------------------//
460 
461 // The details of the heap are represented by a single XTree.
462 // This XTree maintains the nr of allocated bytes for each
463 // stacktrace/execontext.
464 //
465 // The root of the Xtree will be output as a top node  'alloc functions',
466 //  which represents all allocation functions, eg:
467 // - malloc/calloc/realloc/memalign/new/new[];
468 // - user-specified allocation functions (using --alloc-fn);
469 // - custom allocation (MALLOCLIKE) points
470 static XTree* heap_xt;
471 /* heap_xt contains a SizeT: the nr of allocated bytes by this execontext. */
init_szB(void * value)472 static void init_szB(void* value)
473 {
474    *((SizeT*)value) = 0;
475 }
add_szB(void * to,const void * value)476 static void add_szB(void* to, const void* value)
477 {
478    *((SizeT*)to) += *((const SizeT*)value);
479 }
sub_szB(void * from,const void * value)480 static void sub_szB(void* from, const void* value)
481 {
482    *((SizeT*)from) -= *((const SizeT*)value);
483 }
alloc_szB(const void * value)484 static ULong alloc_szB(const void* value)
485 {
486    return (ULong)*((const SizeT*)value);
487 }
488 
489 
490 //------------------------------------------------------------//
491 //--- XTree Operations                                     ---//
492 //------------------------------------------------------------//
493 
494 // This is the limit on the number of filtered alloc-fns that can be in a
495 // single stacktrace.
496 #define MAX_OVERESTIMATE   50
497 #define MAX_IPS            (MAX_DEPTH + MAX_OVERESTIMATE)
498 
499 // filtering out uninteresting entries:
500 // alloc-fns and entries above alloc-fns, and entries below main-or-below-main.
501 //   Eg:       alloc-fn1 / alloc-fn2 / a / b / main / (below main) / c
502 //   becomes:  a / b / main
503 // Nb: it's possible to end up with an empty trace, eg. if 'main' is marked
504 // as an alloc-fn.  This is ok.
505 static
filter_IPs(Addr * ips,Int n_ips,UInt * top,UInt * n_ips_sel)506 void filter_IPs (Addr* ips, Int n_ips,
507                  UInt* top, UInt* n_ips_sel)
508 {
509    Int i;
510    Bool top_has_fnname;
511    const HChar *fnname;
512 
513    *top = 0;
514    *n_ips_sel = n_ips;
515 
516    // Advance *top as long as we find alloc functions
517    // PW Nov 2016 xtree work:
518    //  old massif code was doing something really strange(?buggy):
519    //  'sliding' a bunch of functions without names by removing an
520    //  alloc function 'inside' a stacktrace e.g.
521    //    0x1 0x2 0x3 alloc func1 main
522    //  becomes   0x1 0x2 0x3 func1 main
523    for (i = *top; i < n_ips; i++) {
524       top_has_fnname = VG_(get_fnname)(ips[*top], &fnname);
525       if (top_has_fnname &&  VG_(strIsMemberXA)(alloc_fns, fnname)) {
526          VERB(4, "filtering alloc fn %s\n", fnname);
527          (*top)++;
528          (*n_ips_sel)--;
529       } else {
530          break;
531       }
532    }
533 
534    // filter the whole stacktrace if this allocation has to be ignored.
535    if (*n_ips_sel > 0
536        && top_has_fnname
537        && VG_(strIsMemberXA)(ignore_fns, fnname)) {
538       VERB(4, "ignored allocation from fn %s\n", fnname);
539       *top = n_ips;
540       *n_ips_sel = 0;
541    }
542 
543 
544    if (!VG_(clo_show_below_main) && *n_ips_sel > 0 ) {
545       Int mbm = VG_(XT_offset_main_or_below_main)(ips, n_ips);
546 
547       if (mbm < *top) {
548          // Special case: the first main (or below main) function is an
549          // alloc function.
550          *n_ips_sel = 1;
551          VERB(4, "main/below main: keeping 1 fn\n");
552       } else {
553          *n_ips_sel -= n_ips - mbm - 1;
554          VERB(4, "main/below main: filtering %d\n", n_ips - mbm - 1);
555       }
556    }
557 
558    // filter the frames if we have more than clo_depth
559    if (*n_ips_sel > clo_depth) {
560       VERB(4, "filtering IPs above clo_depth\n");
561       *n_ips_sel = clo_depth;
562    }
563 }
564 
565 // Capture a stacktrace, and make an ec of it, without the first entry
566 // if exclude_first_entry is True.
make_ec(ThreadId tid,Bool exclude_first_entry)567 static ExeContext* make_ec(ThreadId tid, Bool exclude_first_entry)
568 {
569    static Addr ips[MAX_IPS];
570 
571    // After this call, the IPs we want are in ips[0]..ips[n_ips-1].
572    Int n_ips = VG_(get_StackTrace)( tid, ips, clo_depth +  MAX_OVERESTIMATE,
573                                     NULL/*array to dump SP values in*/,
574                                     NULL/*array to dump FP values in*/,
575                                     0/*first_ip_delta*/ );
576    if (exclude_first_entry && n_ips > 0) {
577       const HChar *fnname;
578       VERB(4, "removing top fn %s from stacktrace\n",
579            VG_(get_fnname)(ips[0], &fnname) ? fnname : "???");
580       return VG_(make_ExeContext_from_StackTrace)(ips+1, n_ips-1);
581    } else
582       return VG_(make_ExeContext_from_StackTrace)(ips, n_ips);
583 }
584 
585 // Create (or update) in heap_xt an xec corresponding to the stacktrace of tid.
586 // req_szB is added to the xec (unless ec is fully filtered).
587 // Returns the correspding XTree xec.
588 // exclude_first_entry is an optimisation: if True, automatically removes
589 // the top level IP from the stacktrace. Should be set to True if it is known
590 // that this is an alloc fn. The top function presumably will be something like
591 // malloc or __builtin_new that we're sure to filter out).
add_heap_xt(ThreadId tid,SizeT req_szB,Bool exclude_first_entry)592 static Xecu add_heap_xt( ThreadId tid, SizeT req_szB, Bool exclude_first_entry)
593 {
594    ExeContext *ec = make_ec(tid, exclude_first_entry);
595 
596    if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
597       VG_(XTMemory_Full_alloc)(req_szB, ec);
598    return VG_(XT_add_to_ec) (heap_xt, ec, &req_szB);
599 }
600 
601 // Substract req_szB from the heap_xt where.
sub_heap_xt(Xecu where,SizeT req_szB,Bool exclude_first_entry)602 static void sub_heap_xt(Xecu where, SizeT req_szB, Bool exclude_first_entry)
603 {
604    tl_assert(clo_heap);
605 
606    if (0 == req_szB)
607       return;
608 
609    VG_(XT_sub_from_xecu) (heap_xt, where, &req_szB);
610    if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full)) {
611       ExeContext *ec_free = make_ec(VG_(get_running_tid)(),
612                                     exclude_first_entry);
613       VG_(XTMemory_Full_free)(req_szB,
614                               VG_(XT_get_ec_from_xecu)(heap_xt, where),
615                               ec_free);
616    }
617 }
618 
619 
620 //------------------------------------------------------------//
621 //--- Snapshots                                            ---//
622 //------------------------------------------------------------//
623 
624 // Snapshots are done in a way so that we always have a reasonable number of
625 // them.  We start by taking them quickly.  Once we hit our limit, we cull
626 // some (eg. half), and start taking them more slowly.  Once we hit the
627 // limit again, we again cull and then take them even more slowly, and so
628 // on.
629 
630 #define UNUSED_SNAPSHOT_TIME  -333  // A conspicuous negative number.
631 
632 typedef
633    enum {
634       Normal = 77,
635       Peak,
636       Unused
637    }
638    SnapshotKind;
639 
640 typedef
641    struct {
642       SnapshotKind kind;
643       Time  time;
644       SizeT heap_szB;
645       SizeT heap_extra_szB;// Heap slop + admin bytes.
646       SizeT stacks_szB;
647       XTree* xt;    // Snapshot of heap_xt, if a detailed snapshot,
648    }                // otherwise NULL.
649    Snapshot;
650 
651 static UInt      next_snapshot_i = 0;  // Index of where next snapshot will go.
652 static Snapshot* snapshots;            // Array of snapshots.
653 
is_snapshot_in_use(Snapshot * snapshot)654 static Bool is_snapshot_in_use(Snapshot* snapshot)
655 {
656    if (Unused == snapshot->kind) {
657       // If snapshot is unused, check all the fields are unset.
658       tl_assert(snapshot->time           == UNUSED_SNAPSHOT_TIME);
659       tl_assert(snapshot->heap_extra_szB == 0);
660       tl_assert(snapshot->heap_szB       == 0);
661       tl_assert(snapshot->stacks_szB     == 0);
662       tl_assert(snapshot->xt             == NULL);
663       return False;
664    } else {
665       tl_assert(snapshot->time           != UNUSED_SNAPSHOT_TIME);
666       return True;
667    }
668 }
669 
is_detailed_snapshot(Snapshot * snapshot)670 static Bool is_detailed_snapshot(Snapshot* snapshot)
671 {
672    return (snapshot->xt ? True : False);
673 }
674 
is_uncullable_snapshot(Snapshot * snapshot)675 static Bool is_uncullable_snapshot(Snapshot* snapshot)
676 {
677    return &snapshots[0] == snapshot                   // First snapshot
678        || &snapshots[next_snapshot_i-1] == snapshot   // Last snapshot
679        || snapshot->kind == Peak;                     // Peak snapshot
680 }
681 
sanity_check_snapshot(Snapshot * snapshot)682 static void sanity_check_snapshot(Snapshot* snapshot)
683 {
684    // Not much we can sanity check.
685    tl_assert(snapshot->xt == NULL || snapshot->kind != Unused);
686 }
687 
688 // All the used entries should look used, all the unused ones should be clear.
sanity_check_snapshots_array(void)689 static void sanity_check_snapshots_array(void)
690 {
691    Int i;
692    for (i = 0; i < next_snapshot_i; i++) {
693       tl_assert( is_snapshot_in_use( & snapshots[i] ));
694    }
695    for (    ; i < clo_max_snapshots; i++) {
696       tl_assert(!is_snapshot_in_use( & snapshots[i] ));
697    }
698 }
699 
700 // This zeroes all the fields in the snapshot, but does not free the xt
701 // XTree if present.  It also does a sanity check unless asked not to;  we
702 // can't sanity check at startup when clearing the initial snapshots because
703 // they're full of junk.
clear_snapshot(Snapshot * snapshot,Bool do_sanity_check)704 static void clear_snapshot(Snapshot* snapshot, Bool do_sanity_check)
705 {
706    if (do_sanity_check) sanity_check_snapshot(snapshot);
707    snapshot->kind           = Unused;
708    snapshot->time           = UNUSED_SNAPSHOT_TIME;
709    snapshot->heap_extra_szB = 0;
710    snapshot->heap_szB       = 0;
711    snapshot->stacks_szB     = 0;
712    snapshot->xt             = NULL;
713 }
714 
715 // This zeroes all the fields in the snapshot, and frees the heap XTree xt if
716 // present.
delete_snapshot(Snapshot * snapshot)717 static void delete_snapshot(Snapshot* snapshot)
718 {
719    // Nb: if there's an XTree, we free it after calling clear_snapshot,
720    // because clear_snapshot does a sanity check which includes checking the
721    // XTree.
722    XTree* tmp_xt = snapshot->xt;
723    clear_snapshot(snapshot, /*do_sanity_check*/True);
724    if (tmp_xt) {
725        VG_(XT_delete)(tmp_xt);
726    }
727 }
728 
VERB_snapshot(Int verbosity,const HChar * prefix,Int i)729 static void VERB_snapshot(Int verbosity, const HChar* prefix, Int i)
730 {
731    Snapshot* snapshot = &snapshots[i];
732    const HChar* suffix;
733    switch (snapshot->kind) {
734    case Peak:   suffix = "p";                                            break;
735    case Normal: suffix = ( is_detailed_snapshot(snapshot) ? "d" : "." ); break;
736    case Unused: suffix = "u";                                            break;
737    default:
738       tl_assert2(0, "VERB_snapshot: unknown snapshot kind: %d", snapshot->kind);
739    }
740    VERB(verbosity, "%s S%s%3d (t:%lld, hp:%lu, ex:%lu, st:%lu)\n",
741       prefix, suffix, i,
742       snapshot->time,
743       snapshot->heap_szB,
744       snapshot->heap_extra_szB,
745       snapshot->stacks_szB
746    );
747 }
748 
749 // Cull half the snapshots;  we choose those that represent the smallest
750 // time-spans, because that gives us the most even distribution of snapshots
751 // over time.  (It's possible to lose interesting spikes, however.)
752 //
753 // Algorithm for N snapshots:  We find the snapshot representing the smallest
754 // timeframe, and remove it.  We repeat this until (N/2) snapshots are gone.
755 // We have to do this one snapshot at a time, rather than finding the (N/2)
756 // smallest snapshots in one hit, because when a snapshot is removed, its
757 // neighbours immediately cover greater timespans.  So it's O(N^2), but N is
758 // small, and it's not done very often.
759 //
760 // Once we're done, we return the new smallest interval between snapshots.
761 // That becomes our minimum time interval.
cull_snapshots(void)762 static UInt cull_snapshots(void)
763 {
764    Int  i, jp, j, jn, min_timespan_i;
765    Int  n_deleted = 0;
766    Time min_timespan;
767 
768    n_cullings++;
769 
770    // Sets j to the index of the first not-yet-removed snapshot at or after i
771    #define FIND_SNAPSHOT(i, j) \
772       for (j = i; \
773            j < clo_max_snapshots && !is_snapshot_in_use(&snapshots[j]); \
774            j++) { }
775 
776    VERB(2, "Culling...\n");
777 
778    // First we remove enough snapshots by clearing them in-place.  Once
779    // that's done, we can slide the remaining ones down.
780    for (i = 0; i < clo_max_snapshots/2; i++) {
781       // Find the snapshot representing the smallest timespan.  The timespan
782       // for snapshot n = d(N-1,N)+d(N,N+1), where d(A,B) is the time between
783       // snapshot A and B.  We don't consider the first and last snapshots for
784       // removal.
785       Snapshot* min_snapshot;
786       Int min_j;
787 
788       // Initial triple: (prev, curr, next) == (jp, j, jn)
789       // Initial min_timespan is the first one.
790       jp = 0;
791       FIND_SNAPSHOT(1,   j);
792       FIND_SNAPSHOT(j+1, jn);
793       min_timespan = 0x7fffffffffffffffLL;
794       min_j        = -1;
795       while (jn < clo_max_snapshots) {
796          Time timespan = snapshots[jn].time - snapshots[jp].time;
797          tl_assert(timespan >= 0);
798          // Nb: We never cull the peak snapshot.
799          if (Peak != snapshots[j].kind && timespan < min_timespan) {
800             min_timespan = timespan;
801             min_j        = j;
802          }
803          // Move on to next triple
804          jp = j;
805          j  = jn;
806          FIND_SNAPSHOT(jn+1, jn);
807       }
808       // We've found the least important snapshot, now delete it.  First
809       // print it if necessary.
810       tl_assert(-1 != min_j);    // Check we found a minimum.
811       min_snapshot = & snapshots[ min_j ];
812       if (VG_(clo_verbosity) > 1) {
813          HChar buf[64];   // large enough
814          VG_(snprintf)(buf, 64, " %3d (t-span = %lld)", i, min_timespan);
815          VERB_snapshot(2, buf, min_j);
816       }
817       delete_snapshot(min_snapshot);
818       n_deleted++;
819    }
820 
821    // Slide down the remaining snapshots over the removed ones.  First set i
822    // to point to the first empty slot, and j to the first full slot after
823    // i.  Then slide everything down.
824    for (i = 0;  is_snapshot_in_use( &snapshots[i] ); i++) { }
825    for (j = i; !is_snapshot_in_use( &snapshots[j] ); j++) { }
826    for (  ; j < clo_max_snapshots; j++) {
827       if (is_snapshot_in_use( &snapshots[j] )) {
828          snapshots[i++] = snapshots[j];
829          clear_snapshot(&snapshots[j], /*do_sanity_check*/True);
830       }
831    }
832    next_snapshot_i = i;
833 
834    // Check snapshots array looks ok after changes.
835    sanity_check_snapshots_array();
836 
837    // Find the minimum timespan remaining;  that will be our new minimum
838    // time interval.  Note that above we were finding timespans by measuring
839    // two intervals around a snapshot that was under consideration for
840    // deletion.  Here we only measure single intervals because all the
841    // deletions have occurred.
842    //
843    // But we have to be careful -- some snapshots (eg. snapshot 0, and the
844    // peak snapshot) are uncullable.  If two uncullable snapshots end up
845    // next to each other, they'll never be culled (assuming the peak doesn't
846    // change), and the time gap between them will not change.  However, the
847    // time between the remaining cullable snapshots will grow ever larger.
848    // This means that the min_timespan found will always be that between the
849    // two uncullable snapshots, and it will be much smaller than it should
850    // be.  To avoid this problem, when computing the minimum timespan, we
851    // ignore any timespans between two uncullable snapshots.
852    tl_assert(next_snapshot_i > 1);
853    min_timespan = 0x7fffffffffffffffLL;
854    min_timespan_i = -1;
855    for (i = 1; i < next_snapshot_i; i++) {
856       if (is_uncullable_snapshot(&snapshots[i]) &&
857           is_uncullable_snapshot(&snapshots[i-1]))
858       {
859          VERB(2, "(Ignoring interval %d--%d when computing minimum)\n", i-1, i);
860       } else {
861          Time timespan = snapshots[i].time - snapshots[i-1].time;
862          tl_assert(timespan >= 0);
863          if (timespan < min_timespan) {
864             min_timespan = timespan;
865             min_timespan_i = i;
866          }
867       }
868    }
869    tl_assert(-1 != min_timespan_i);    // Check we found a minimum.
870 
871    // Print remaining snapshots, if necessary.
872    if (VG_(clo_verbosity) > 1) {
873       VERB(2, "Finished culling (%3d of %3d deleted)\n",
874          n_deleted, clo_max_snapshots);
875       for (i = 0; i < next_snapshot_i; i++) {
876          VERB_snapshot(2, "  post-cull", i);
877       }
878       VERB(2, "New time interval = %lld (between snapshots %d and %d)\n",
879          min_timespan, min_timespan_i-1, min_timespan_i);
880    }
881 
882    return min_timespan;
883 }
884 
get_time(void)885 static Time get_time(void)
886 {
887    // Get current time, in whatever time unit we're using.
888    if (clo_time_unit == TimeI) {
889       return guest_instrs_executed;
890    } else if (clo_time_unit == TimeMS) {
891       // Some stuff happens between the millisecond timer being initialised
892       // to zero and us taking our first snapshot.  We determine that time
893       // gap so we can subtract it from all subsequent times so that our
894       // first snapshot is considered to be at t = 0ms.  Unfortunately, a
895       // bunch of symbols get read after the first snapshot is taken but
896       // before the second one (which is triggered by the first allocation),
897       // so when the time-unit is 'ms' we always have a big gap between the
898       // first two snapshots.  But at least users won't have to wonder why
899       // the first snapshot isn't at t=0.
900       static Bool is_first_get_time = True;
901       static Time start_time_ms;
902       if (is_first_get_time) {
903          start_time_ms = VG_(read_millisecond_timer)();
904          is_first_get_time = False;
905          return 0;
906       } else {
907          return VG_(read_millisecond_timer)() - start_time_ms;
908       }
909    } else if (clo_time_unit == TimeB) {
910       return total_allocs_deallocs_szB;
911    } else {
912       tl_assert2(0, "bad --time-unit value");
913    }
914 }
915 
916 // Take a snapshot, and only that -- decisions on whether to take a
917 // snapshot, or what kind of snapshot, are made elsewhere.
918 // Nb: we call the arg "my_time" because "time" shadows a global declaration
919 // in /usr/include/time.h on Darwin.
920 static void
take_snapshot(Snapshot * snapshot,SnapshotKind kind,Time my_time,Bool is_detailed)921 take_snapshot(Snapshot* snapshot, SnapshotKind kind, Time my_time,
922               Bool is_detailed)
923 {
924    tl_assert(!is_snapshot_in_use(snapshot));
925    if (!clo_pages_as_heap) {
926       tl_assert(have_started_executing_code);
927    }
928 
929    // Heap and heap admin.
930    if (clo_heap) {
931       snapshot->heap_szB = heap_szB;
932       if (is_detailed) {
933          snapshot->xt = VG_(XT_snapshot)(heap_xt);
934       }
935       snapshot->heap_extra_szB = heap_extra_szB;
936    }
937 
938    // Stack(s).
939    if (clo_stacks) {
940       snapshot->stacks_szB = stacks_szB;
941    }
942 
943    // Rest of snapshot.
944    snapshot->kind = kind;
945    snapshot->time = my_time;
946    sanity_check_snapshot(snapshot);
947 
948    // Update stats.
949    if (Peak == kind) n_peak_snapshots++;
950    if (is_detailed)  n_detailed_snapshots++;
951    n_real_snapshots++;
952 }
953 
954 
955 // Take a snapshot, if it's time, or if we've hit a peak.
956 static void
maybe_take_snapshot(SnapshotKind kind,const HChar * what)957 maybe_take_snapshot(SnapshotKind kind, const HChar* what)
958 {
959    // 'min_time_interval' is the minimum time interval between snapshots.
960    // If we try to take a snapshot and less than this much time has passed,
961    // we don't take it.  It gets larger as the program runs longer.  It's
962    // initialised to zero so that we begin by taking snapshots as quickly as
963    // possible.
964    static Time min_time_interval = 0;
965    // Zero allows startup snapshot.
966    static Time earliest_possible_time_of_next_snapshot = 0;
967    static Int  n_snapshots_since_last_detailed         = 0;
968    static Int  n_skipped_snapshots_since_last_snapshot = 0;
969 
970    Snapshot* snapshot;
971    Bool      is_detailed;
972    // Nb: we call this variable "my_time" because "time" shadows a global
973    // declaration in /usr/include/time.h on Darwin.
974    Time      my_time = get_time();
975 
976    switch (kind) {
977     case Normal:
978       // Only do a snapshot if it's time.
979       if (my_time < earliest_possible_time_of_next_snapshot) {
980          n_skipped_snapshots++;
981          n_skipped_snapshots_since_last_snapshot++;
982          return;
983       }
984       is_detailed = (clo_detailed_freq-1 == n_snapshots_since_last_detailed);
985       break;
986 
987     case Peak: {
988       // Because we're about to do a deallocation, we're coming down from a
989       // local peak.  If it is (a) actually a global peak, and (b) a certain
990       // amount bigger than the previous peak, then we take a peak snapshot.
991       // By not taking a snapshot for every peak, we save a lot of effort --
992       // because many peaks remain peak only for a short time.
993       SizeT total_szB = heap_szB + heap_extra_szB + stacks_szB;
994       SizeT excess_szB_for_new_peak =
995          (SizeT)((peak_snapshot_total_szB * clo_peak_inaccuracy) / 100);
996       if (total_szB <= peak_snapshot_total_szB + excess_szB_for_new_peak) {
997          return;
998       }
999       is_detailed = True;
1000       break;
1001     }
1002 
1003     default:
1004       tl_assert2(0, "maybe_take_snapshot: unrecognised snapshot kind");
1005    }
1006 
1007    // Take the snapshot.
1008    snapshot = & snapshots[next_snapshot_i];
1009    take_snapshot(snapshot, kind, my_time, is_detailed);
1010 
1011    // Record if it was detailed.
1012    if (is_detailed) {
1013       n_snapshots_since_last_detailed = 0;
1014    } else {
1015       n_snapshots_since_last_detailed++;
1016    }
1017 
1018    // Update peak data, if it's a Peak snapshot.
1019    if (Peak == kind) {
1020       Int i, number_of_peaks_snapshots_found = 0;
1021 
1022       // Sanity check the size, then update our recorded peak.
1023       SizeT snapshot_total_szB =
1024          snapshot->heap_szB + snapshot->heap_extra_szB + snapshot->stacks_szB;
1025       tl_assert2(snapshot_total_szB > peak_snapshot_total_szB,
1026          "%ld, %ld\n", snapshot_total_szB, peak_snapshot_total_szB);
1027       peak_snapshot_total_szB = snapshot_total_szB;
1028 
1029       // Find the old peak snapshot, if it exists, and mark it as normal.
1030       for (i = 0; i < next_snapshot_i; i++) {
1031          if (Peak == snapshots[i].kind) {
1032             snapshots[i].kind = Normal;
1033             number_of_peaks_snapshots_found++;
1034          }
1035       }
1036       tl_assert(number_of_peaks_snapshots_found <= 1);
1037    }
1038 
1039    // Finish up verbosity and stats stuff.
1040    if (n_skipped_snapshots_since_last_snapshot > 0) {
1041       VERB(2, "  (skipped %d snapshot%s)\n",
1042          n_skipped_snapshots_since_last_snapshot,
1043          ( 1 == n_skipped_snapshots_since_last_snapshot ? "" : "s") );
1044    }
1045    VERB_snapshot(2, what, next_snapshot_i);
1046    n_skipped_snapshots_since_last_snapshot = 0;
1047 
1048    // Cull the entries, if our snapshot table is full.
1049    next_snapshot_i++;
1050    if (clo_max_snapshots == next_snapshot_i) {
1051       min_time_interval = cull_snapshots();
1052    }
1053 
1054    // Work out the earliest time when the next snapshot can happen.
1055    earliest_possible_time_of_next_snapshot = my_time + min_time_interval;
1056 }
1057 
1058 
1059 //------------------------------------------------------------//
1060 //--- Sanity checking                                      ---//
1061 //------------------------------------------------------------//
1062 
ms_cheap_sanity_check(void)1063 static Bool ms_cheap_sanity_check ( void )
1064 {
1065    return True;   // Nothing useful we can cheaply check.
1066 }
1067 
ms_expensive_sanity_check(void)1068 static Bool ms_expensive_sanity_check ( void )
1069 {
1070    tl_assert(heap_xt);
1071    sanity_check_snapshots_array();
1072    return True;
1073 }
1074 
1075 
1076 //------------------------------------------------------------//
1077 //--- Heap management                                      ---//
1078 //------------------------------------------------------------//
1079 
1080 // Metadata for heap blocks.  Each one contains an Xecu,
1081 // which identifies the XTree ec at which it was allocated.  From
1082 // HP_Chunks, XTree ec 'space' field is incremented (at allocation) and
1083 // decremented (at deallocation).
1084 //
1085 // Nb: first two fields must match core's VgHashNode.
1086 typedef
1087    struct _HP_Chunk {
1088       struct _HP_Chunk* next;
1089       Addr              data;       // Ptr to actual block
1090       SizeT             req_szB;    // Size requested
1091       SizeT             slop_szB;   // Extra bytes given above those requested
1092       Xecu              where;      // Where allocated; XTree xecu from heap_xt
1093    }
1094    HP_Chunk;
1095 
1096 /* Pool allocator for HP_Chunk. */
1097 static PoolAlloc *HP_chunk_poolalloc = NULL;
1098 
1099 static VgHashTable *malloc_list  = NULL;   // HP_Chunks
1100 
update_alloc_stats(SSizeT szB_delta)1101 static void update_alloc_stats(SSizeT szB_delta)
1102 {
1103    // Update total_allocs_deallocs_szB.
1104    if (szB_delta < 0) szB_delta = -szB_delta;
1105    total_allocs_deallocs_szB += szB_delta;
1106 }
1107 
update_heap_stats(SSizeT heap_szB_delta,Int heap_extra_szB_delta)1108 static void update_heap_stats(SSizeT heap_szB_delta, Int heap_extra_szB_delta)
1109 {
1110    if (heap_szB_delta < 0)
1111       tl_assert(heap_szB >= -heap_szB_delta);
1112    if (heap_extra_szB_delta < 0)
1113       tl_assert(heap_extra_szB >= -heap_extra_szB_delta);
1114 
1115    heap_extra_szB += heap_extra_szB_delta;
1116    heap_szB       += heap_szB_delta;
1117 
1118    update_alloc_stats(heap_szB_delta + heap_extra_szB_delta);
1119 }
1120 
1121 static
record_block(ThreadId tid,void * p,SizeT req_szB,SizeT slop_szB,Bool exclude_first_entry,Bool maybe_snapshot)1122 void* record_block( ThreadId tid, void* p, SizeT req_szB, SizeT slop_szB,
1123                     Bool exclude_first_entry, Bool maybe_snapshot )
1124 {
1125    // Make new HP_Chunk node, add to malloc_list
1126    HP_Chunk* hc = VG_(allocEltPA)(HP_chunk_poolalloc);
1127    hc->req_szB  = req_szB;
1128    hc->slop_szB = slop_szB;
1129    hc->data     = (Addr)p;
1130    hc->where    = 0;
1131    VG_(HT_add_node)(malloc_list, hc);
1132 
1133    if (clo_heap) {
1134       VERB(3, "<<< record_block (%lu, %lu)\n", req_szB, slop_szB);
1135 
1136       hc->where = add_heap_xt( tid, req_szB, exclude_first_entry);
1137 
1138       if (VG_(XT_n_ips_sel)(heap_xt, hc->where) > 0) {
1139          // Update statistics.
1140          n_heap_allocs++;
1141 
1142          // Update heap stats.
1143          update_heap_stats(req_szB, clo_heap_admin + slop_szB);
1144 
1145          // Maybe take a snapshot.
1146          if (maybe_snapshot) {
1147             maybe_take_snapshot(Normal, "  alloc");
1148          }
1149 
1150       } else {
1151          // Ignored allocation.
1152          n_ignored_heap_allocs++;
1153 
1154          VERB(3, "(ignored)\n");
1155       }
1156 
1157       VERB(3, ">>>\n");
1158    }
1159 
1160    return p;
1161 }
1162 
1163 static __inline__
alloc_and_record_block(ThreadId tid,SizeT req_szB,SizeT req_alignB,Bool is_zeroed)1164 void* alloc_and_record_block ( ThreadId tid, SizeT req_szB, SizeT req_alignB,
1165                                Bool is_zeroed )
1166 {
1167    SizeT actual_szB, slop_szB;
1168    void* p;
1169 
1170    if ((SSizeT)req_szB < 0) return NULL;
1171 
1172    // Allocate and zero if necessary.
1173    p = VG_(cli_malloc)( req_alignB, req_szB );
1174    if (!p) {
1175       return NULL;
1176    }
1177    if (is_zeroed) VG_(memset)(p, 0, req_szB);
1178    actual_szB = VG_(cli_malloc_usable_size)(p);
1179    tl_assert(actual_szB >= req_szB);
1180    slop_szB = actual_szB - req_szB;
1181 
1182    // Record block.
1183    record_block(tid, p, req_szB, slop_szB, /*exclude_first_entry*/True,
1184                 /*maybe_snapshot*/True);
1185 
1186    return p;
1187 }
1188 
1189 static __inline__
unrecord_block(void * p,Bool maybe_snapshot,Bool exclude_first_entry)1190 void unrecord_block ( void* p, Bool maybe_snapshot, Bool exclude_first_entry )
1191 {
1192    // Remove HP_Chunk from malloc_list
1193    HP_Chunk* hc = VG_(HT_remove)(malloc_list, (UWord)p);
1194    if (NULL == hc) {
1195       return;   // must have been a bogus free()
1196    }
1197 
1198    if (clo_heap) {
1199       VERB(3, "<<< unrecord_block\n");
1200 
1201       if (VG_(XT_n_ips_sel)(heap_xt, hc->where) > 0) {
1202          // Update statistics.
1203          n_heap_frees++;
1204 
1205          // Maybe take a peak snapshot, since it's a deallocation.
1206          if (maybe_snapshot) {
1207             maybe_take_snapshot(Peak, "de-PEAK");
1208          }
1209 
1210          // Update heap stats.
1211          update_heap_stats(-hc->req_szB, -clo_heap_admin - hc->slop_szB);
1212 
1213          // Update XTree.
1214          sub_heap_xt(hc->where, hc->req_szB, exclude_first_entry);
1215 
1216          // Maybe take a snapshot.
1217          if (maybe_snapshot) {
1218             maybe_take_snapshot(Normal, "dealloc");
1219          }
1220 
1221       } else {
1222          n_ignored_heap_frees++;
1223 
1224          VERB(3, "(ignored)\n");
1225       }
1226 
1227       VERB(3, ">>> (-%lu, -%lu)\n", hc->req_szB, hc->slop_szB);
1228    }
1229 
1230    // Actually free the chunk, and the heap block (if necessary)
1231    VG_(freeEltPA) (HP_chunk_poolalloc, hc);  hc = NULL;
1232 }
1233 
1234 // Nb: --ignore-fn is tricky for realloc.  If the block's original alloc was
1235 // ignored, but the realloc is not requested to be ignored, and we are
1236 // shrinking the block, then we have to ignore the realloc -- otherwise we
1237 // could end up with negative heap sizes.  This isn't a danger if we are
1238 // growing such a block, but for consistency (it also simplifies things) we
1239 // ignore such reallocs as well.
1240 // PW Nov 2016 xtree work: why can't we just consider that a realloc of an
1241 // ignored  alloc is just a new alloc (i.e. do not remove the old sz from the
1242 // stats). Then everything would be fine, and a non ignored realloc would be
1243 // counted properly.
1244 static __inline__
realloc_block(ThreadId tid,void * p_old,SizeT new_req_szB)1245 void* realloc_block ( ThreadId tid, void* p_old, SizeT new_req_szB )
1246 {
1247    HP_Chunk* hc;
1248    void*     p_new;
1249    SizeT     old_req_szB, old_slop_szB, new_slop_szB, new_actual_szB;
1250    Xecu      old_where;
1251    Bool      is_ignored = False;
1252 
1253    // Remove the old block
1254    hc = VG_(HT_remove)(malloc_list, (UWord)p_old);
1255    if (hc == NULL) {
1256       return NULL;   // must have been a bogus realloc()
1257    }
1258 
1259    old_req_szB  = hc->req_szB;
1260    old_slop_szB = hc->slop_szB;
1261 
1262    tl_assert(!clo_pages_as_heap);  // Shouldn't be here if --pages-as-heap=yes.
1263    if (clo_heap) {
1264       VERB(3, "<<< realloc_block (%lu)\n", new_req_szB);
1265 
1266       if (VG_(XT_n_ips_sel)(heap_xt, hc->where) > 0) {
1267          // Update statistics.
1268          n_heap_reallocs++;
1269 
1270          // Maybe take a peak snapshot, if it's (effectively) a deallocation.
1271          if (new_req_szB < old_req_szB) {
1272             maybe_take_snapshot(Peak, "re-PEAK");
1273          }
1274       } else {
1275          // The original malloc was ignored, so we have to ignore the
1276          // realloc as well.
1277          is_ignored = True;
1278       }
1279    }
1280 
1281    // Actually do the allocation, if necessary.
1282    if (new_req_szB <= old_req_szB + old_slop_szB) {
1283       // New size is smaller or same;  block not moved.
1284       p_new = p_old;
1285       new_slop_szB = old_slop_szB + (old_req_szB - new_req_szB);
1286 
1287    } else {
1288       // New size is bigger;  make new block, copy shared contents, free old.
1289       p_new = VG_(cli_malloc)(VG_(clo_alignment), new_req_szB);
1290       if (!p_new) {
1291          // Nb: if realloc fails, NULL is returned but the old block is not
1292          // touched.  What an awful function.
1293          return NULL;
1294       }
1295       VG_(memcpy)(p_new, p_old, old_req_szB + old_slop_szB);
1296       VG_(cli_free)(p_old);
1297       new_actual_szB = VG_(cli_malloc_usable_size)(p_new);
1298       tl_assert(new_actual_szB >= new_req_szB);
1299       new_slop_szB = new_actual_szB - new_req_szB;
1300    }
1301 
1302    if (p_new) {
1303       // Update HP_Chunk.
1304       hc->data     = (Addr)p_new;
1305       hc->req_szB  = new_req_szB;
1306       hc->slop_szB = new_slop_szB;
1307       old_where    = hc->where;
1308       hc->where    = 0;
1309 
1310       // Update XTree.
1311       if (clo_heap) {
1312          hc->where = add_heap_xt( tid, new_req_szB,
1313                                   /*exclude_first_entry*/True);
1314          if (!is_ignored && VG_(XT_n_ips_sel)(heap_xt, hc->where) > 0) {
1315             sub_heap_xt(old_where, old_req_szB, /*exclude_first_entry*/True);
1316          } else {
1317             // The realloc itself is ignored.
1318             is_ignored = True;
1319 
1320             /* XTREE??? hack to have something compatible with pre
1321                m_xtree massif: if the previous alloc/realloc was
1322                ignored, and this one is not ignored, then keep the
1323                previous where, to continue marking this memory as
1324                ignored. */
1325             if (VG_(XT_n_ips_sel)(heap_xt, hc->where) > 0
1326                 && VG_(XT_n_ips_sel)(heap_xt, old_where) == 0)
1327                hc->where = old_where;
1328 
1329             // Update statistics.
1330             n_ignored_heap_reallocs++;
1331          }
1332       }
1333    }
1334 
1335    // Now insert the new hc (with a possibly new 'data' field) into
1336    // malloc_list.  If this realloc() did not increase the memory size, we
1337    // will have removed and then re-added hc unnecessarily.  But that's ok
1338    // because shrinking a block with realloc() is (presumably) much rarer
1339    // than growing it, and this way simplifies the growing case.
1340    VG_(HT_add_node)(malloc_list, hc);
1341 
1342    if (clo_heap) {
1343       if (!is_ignored) {
1344          // Update heap stats.
1345          update_heap_stats(new_req_szB - old_req_szB,
1346                            new_slop_szB - old_slop_szB);
1347 
1348          // Maybe take a snapshot.
1349          maybe_take_snapshot(Normal, "realloc");
1350       } else {
1351 
1352          VERB(3, "(ignored)\n");
1353       }
1354 
1355       VERB(3, ">>> (%ld, %ld)\n",
1356            (SSizeT)(new_req_szB - old_req_szB),
1357            (SSizeT)(new_slop_szB - old_slop_szB));
1358    }
1359 
1360    return p_new;
1361 }
1362 
1363 
1364 //------------------------------------------------------------//
1365 //--- malloc() et al replacement wrappers                  ---//
1366 //------------------------------------------------------------//
1367 
ms_malloc(ThreadId tid,SizeT szB)1368 static void* ms_malloc ( ThreadId tid, SizeT szB )
1369 {
1370    return alloc_and_record_block( tid, szB, VG_(clo_alignment), /*is_zeroed*/False );
1371 }
1372 
ms___builtin_new(ThreadId tid,SizeT szB)1373 static void* ms___builtin_new ( ThreadId tid, SizeT szB )
1374 {
1375    return alloc_and_record_block( tid, szB, VG_(clo_alignment), /*is_zeroed*/False );
1376 }
1377 
ms___builtin_vec_new(ThreadId tid,SizeT szB)1378 static void* ms___builtin_vec_new ( ThreadId tid, SizeT szB )
1379 {
1380    return alloc_and_record_block( tid, szB, VG_(clo_alignment), /*is_zeroed*/False );
1381 }
1382 
ms_calloc(ThreadId tid,SizeT m,SizeT szB)1383 static void* ms_calloc ( ThreadId tid, SizeT m, SizeT szB )
1384 {
1385    return alloc_and_record_block( tid, m*szB, VG_(clo_alignment), /*is_zeroed*/True );
1386 }
1387 
ms_memalign(ThreadId tid,SizeT alignB,SizeT szB)1388 static void *ms_memalign ( ThreadId tid, SizeT alignB, SizeT szB )
1389 {
1390    return alloc_and_record_block( tid, szB, alignB, False );
1391 }
1392 
ms_free(ThreadId tid,void * p)1393 static void ms_free ( ThreadId tid __attribute__((unused)), void* p )
1394 {
1395    unrecord_block(p, /*maybe_snapshot*/True, /*exclude_first_entry*/True);
1396    VG_(cli_free)(p);
1397 }
1398 
ms___builtin_delete(ThreadId tid,void * p)1399 static void ms___builtin_delete ( ThreadId tid, void* p )
1400 {
1401    unrecord_block(p, /*maybe_snapshot*/True, /*exclude_first_entry*/True);
1402    VG_(cli_free)(p);
1403 }
1404 
ms___builtin_vec_delete(ThreadId tid,void * p)1405 static void ms___builtin_vec_delete ( ThreadId tid, void* p )
1406 {
1407    unrecord_block(p, /*maybe_snapshot*/True, /*exclude_first_entry*/True);
1408    VG_(cli_free)(p);
1409 }
1410 
ms_realloc(ThreadId tid,void * p_old,SizeT new_szB)1411 static void* ms_realloc ( ThreadId tid, void* p_old, SizeT new_szB )
1412 {
1413    return realloc_block(tid, p_old, new_szB);
1414 }
1415 
ms_malloc_usable_size(ThreadId tid,void * p)1416 static SizeT ms_malloc_usable_size ( ThreadId tid, void* p )
1417 {
1418    HP_Chunk* hc = VG_(HT_lookup)( malloc_list, (UWord)p );
1419 
1420    return ( hc ? hc->req_szB + hc->slop_szB : 0 );
1421 }
1422 
1423 //------------------------------------------------------------//
1424 //--- Page handling                                        ---//
1425 //------------------------------------------------------------//
1426 
1427 static
ms_record_page_mem(Addr a,SizeT len)1428 void ms_record_page_mem ( Addr a, SizeT len )
1429 {
1430    ThreadId tid = VG_(get_running_tid)();
1431    Addr end;
1432    tl_assert(VG_IS_PAGE_ALIGNED(len));
1433    tl_assert(len >= VKI_PAGE_SIZE);
1434    // Record the first N-1 pages as blocks, but don't do any snapshots.
1435    for (end = a + len - VKI_PAGE_SIZE; a < end; a += VKI_PAGE_SIZE) {
1436       record_block( tid, (void*)a, VKI_PAGE_SIZE, /*slop_szB*/0,
1437                     /*exclude_first_entry*/False, /*maybe_snapshot*/False );
1438    }
1439    // Record the last page as a block, and maybe do a snapshot afterwards.
1440    record_block( tid, (void*)a, VKI_PAGE_SIZE, /*slop_szB*/0,
1441                  /*exclude_first_entry*/False, /*maybe_snapshot*/True );
1442 }
1443 
1444 static
ms_unrecord_page_mem(Addr a,SizeT len)1445 void ms_unrecord_page_mem( Addr a, SizeT len )
1446 {
1447    Addr end;
1448    tl_assert(VG_IS_PAGE_ALIGNED(len));
1449    tl_assert(len >= VKI_PAGE_SIZE);
1450    // Unrecord the first page. This might be the peak, so do a snapshot.
1451    unrecord_block((void*)a, /*maybe_snapshot*/True,
1452                   /*exclude_first_entry*/False);
1453    a += VKI_PAGE_SIZE;
1454    // Then unrecord the remaining pages, but without snapshots.
1455    for (end = a + len - VKI_PAGE_SIZE; a < end; a += VKI_PAGE_SIZE) {
1456       unrecord_block((void*)a, /*maybe_snapshot*/False,
1457                      /*exclude_first_entry*/False);
1458    }
1459 }
1460 
1461 //------------------------------------------------------------//
1462 
1463 static
ms_new_mem_mmap(Addr a,SizeT len,Bool rr,Bool ww,Bool xx,ULong di_handle)1464 void ms_new_mem_mmap ( Addr a, SizeT len,
1465                        Bool rr, Bool ww, Bool xx, ULong di_handle )
1466 {
1467    tl_assert(VG_IS_PAGE_ALIGNED(len));
1468    ms_record_page_mem(a, len);
1469 }
1470 
1471 static
ms_new_mem_startup(Addr a,SizeT len,Bool rr,Bool ww,Bool xx,ULong di_handle)1472 void ms_new_mem_startup( Addr a, SizeT len,
1473                          Bool rr, Bool ww, Bool xx, ULong di_handle )
1474 {
1475    // startup maps are always be page-sized, except the trampoline page is
1476    // marked by the core as only being the size of the trampoline itself,
1477    // which is something like 57 bytes.  Round it up to page size.
1478    len = VG_PGROUNDUP(len);
1479    ms_record_page_mem(a, len);
1480 }
1481 
1482 static
ms_new_mem_brk(Addr a,SizeT len,ThreadId tid)1483 void ms_new_mem_brk ( Addr a, SizeT len, ThreadId tid )
1484 {
1485    // brk limit is not necessarily aligned on a page boundary.
1486    // If new memory being brk-ed implies to allocate a new page,
1487    // then call ms_record_page_mem with page aligned parameters
1488    // otherwise just ignore.
1489    Addr old_bottom_page = VG_PGROUNDDN(a - 1);
1490    Addr new_top_page = VG_PGROUNDDN(a + len - 1);
1491    if (old_bottom_page != new_top_page)
1492       ms_record_page_mem(VG_PGROUNDDN(a),
1493                          (new_top_page - old_bottom_page));
1494 }
1495 
1496 static
ms_copy_mem_remap(Addr from,Addr to,SizeT len)1497 void ms_copy_mem_remap( Addr from, Addr to, SizeT len)
1498 {
1499    tl_assert(VG_IS_PAGE_ALIGNED(len));
1500    ms_unrecord_page_mem(from, len);
1501    ms_record_page_mem(to, len);
1502 }
1503 
1504 static
ms_die_mem_munmap(Addr a,SizeT len)1505 void ms_die_mem_munmap( Addr a, SizeT len )
1506 {
1507    tl_assert(VG_IS_PAGE_ALIGNED(len));
1508    ms_unrecord_page_mem(a, len);
1509 }
1510 
1511 static
ms_die_mem_brk(Addr a,SizeT len)1512 void ms_die_mem_brk( Addr a, SizeT len )
1513 {
1514    // Call ms_unrecord_page_mem only if one or more pages are de-allocated.
1515    // See ms_new_mem_brk for more details.
1516    Addr new_bottom_page = VG_PGROUNDDN(a - 1);
1517    Addr old_top_page = VG_PGROUNDDN(a + len - 1);
1518    if (old_top_page != new_bottom_page)
1519       ms_unrecord_page_mem(VG_PGROUNDDN(a),
1520                            (old_top_page - new_bottom_page));
1521 
1522 }
1523 
1524 //------------------------------------------------------------//
1525 //--- Stacks                                               ---//
1526 //------------------------------------------------------------//
1527 
1528 // We really want the inlining to occur...
1529 #define INLINE    inline __attribute__((always_inline))
1530 
update_stack_stats(SSizeT stack_szB_delta)1531 static void update_stack_stats(SSizeT stack_szB_delta)
1532 {
1533    if (stack_szB_delta < 0) tl_assert(stacks_szB >= -stack_szB_delta);
1534    stacks_szB += stack_szB_delta;
1535 
1536    update_alloc_stats(stack_szB_delta);
1537 }
1538 
new_mem_stack_2(SizeT len,const HChar * what)1539 static INLINE void new_mem_stack_2(SizeT len, const HChar* what)
1540 {
1541    if (have_started_executing_code) {
1542       VERB(3, "<<< new_mem_stack (%lu)\n", len);
1543       n_stack_allocs++;
1544       update_stack_stats(len);
1545       maybe_take_snapshot(Normal, what);
1546       VERB(3, ">>>\n");
1547    }
1548 }
1549 
die_mem_stack_2(SizeT len,const HChar * what)1550 static INLINE void die_mem_stack_2(SizeT len, const HChar* what)
1551 {
1552    if (have_started_executing_code) {
1553       VERB(3, "<<< die_mem_stack (-%lu)\n", len);
1554       n_stack_frees++;
1555       maybe_take_snapshot(Peak,   "stkPEAK");
1556       update_stack_stats(-len);
1557       maybe_take_snapshot(Normal, what);
1558       VERB(3, ">>>\n");
1559    }
1560 }
1561 
new_mem_stack(Addr a,SizeT len)1562 static void new_mem_stack(Addr a, SizeT len)
1563 {
1564    new_mem_stack_2(len, "stk-new");
1565 }
1566 
die_mem_stack(Addr a,SizeT len)1567 static void die_mem_stack(Addr a, SizeT len)
1568 {
1569    die_mem_stack_2(len, "stk-die");
1570 }
1571 
new_mem_stack_signal(Addr a,SizeT len,ThreadId tid)1572 static void new_mem_stack_signal(Addr a, SizeT len, ThreadId tid)
1573 {
1574    new_mem_stack_2(len, "sig-new");
1575 }
1576 
die_mem_stack_signal(Addr a,SizeT len)1577 static void die_mem_stack_signal(Addr a, SizeT len)
1578 {
1579    die_mem_stack_2(len, "sig-die");
1580 }
1581 
1582 
1583 //------------------------------------------------------------//
1584 //--- Client Requests                                      ---//
1585 //------------------------------------------------------------//
1586 
print_monitor_help(void)1587 static void print_monitor_help ( void )
1588 {
1589    VG_(gdb_printf) (
1590 "\n"
1591 "massif monitor commands:\n"
1592 "  snapshot [<filename>]\n"
1593 "  detailed_snapshot [<filename>]\n"
1594 "      takes a snapshot (or a detailed snapshot)\n"
1595 "      and saves it in <filename>\n"
1596 "             default <filename> is massif.vgdb.out\n"
1597 "  all_snapshots [<filename>]\n"
1598 "      saves all snapshot(s) taken so far in <filename>\n"
1599 "             default <filename> is massif.vgdb.out\n"
1600 "  xtmemory [<filename>]\n"
1601 "        dump xtree memory profile in <filename> (default xtmemory.kcg)\n"
1602 "\n");
1603 }
1604 
1605 
1606 /* Forward declaration.
1607    return True if request recognised, False otherwise */
1608 static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req);
ms_handle_client_request(ThreadId tid,UWord * argv,UWord * ret)1609 static Bool ms_handle_client_request ( ThreadId tid, UWord* argv, UWord* ret )
1610 {
1611    switch (argv[0]) {
1612    case VG_USERREQ__MALLOCLIKE_BLOCK: {
1613       void* p   = (void*)argv[1];
1614       SizeT szB =        argv[2];
1615       record_block( tid, p, szB, /*slop_szB*/0, /*exclude_first_entry*/False,
1616                     /*maybe_snapshot*/True );
1617       *ret = 0;
1618       return True;
1619    }
1620    case VG_USERREQ__RESIZEINPLACE_BLOCK: {
1621       void* p        = (void*)argv[1];
1622       SizeT newSizeB =       argv[3];
1623 
1624       unrecord_block(p, /*maybe_snapshot*/True, /*exclude_first_entry*/False);
1625       record_block(tid, p, newSizeB, /*slop_szB*/0,
1626                    /*exclude_first_entry*/False, /*maybe_snapshot*/True);
1627       return True;
1628    }
1629    case VG_USERREQ__FREELIKE_BLOCK: {
1630       void* p = (void*)argv[1];
1631       unrecord_block(p, /*maybe_snapshot*/True, /*exclude_first_entry*/False);
1632       *ret = 0;
1633       return True;
1634    }
1635    case VG_USERREQ__GDB_MONITOR_COMMAND: {
1636      Bool handled = handle_gdb_monitor_command (tid, (HChar*)argv[1]);
1637      if (handled)
1638        *ret = 1;
1639      else
1640        *ret = 0;
1641      return handled;
1642    }
1643 
1644    default:
1645       *ret = 0;
1646       return False;
1647    }
1648 }
1649 
1650 //------------------------------------------------------------//
1651 //--- Instrumentation                                      ---//
1652 //------------------------------------------------------------//
1653 
add_counter_update(IRSB * sbOut,Int n)1654 static void add_counter_update(IRSB* sbOut, Int n)
1655 {
1656    #if defined(VG_BIGENDIAN)
1657    # define END Iend_BE
1658    #elif defined(VG_LITTLEENDIAN)
1659    # define END Iend_LE
1660    #else
1661    # error "Unknown endianness"
1662    #endif
1663    // Add code to increment 'guest_instrs_executed' by 'n', like this:
1664    //   WrTmp(t1, Load64(&guest_instrs_executed))
1665    //   WrTmp(t2, Add64(RdTmp(t1), Const(n)))
1666    //   Store(&guest_instrs_executed, t2)
1667    IRTemp t1 = newIRTemp(sbOut->tyenv, Ity_I64);
1668    IRTemp t2 = newIRTemp(sbOut->tyenv, Ity_I64);
1669    IRExpr* counter_addr = mkIRExpr_HWord( (HWord)&guest_instrs_executed );
1670 
1671    IRStmt* st1 = IRStmt_WrTmp(t1, IRExpr_Load(END, Ity_I64, counter_addr));
1672    IRStmt* st2 =
1673       IRStmt_WrTmp(t2,
1674                    IRExpr_Binop(Iop_Add64, IRExpr_RdTmp(t1),
1675                                            IRExpr_Const(IRConst_U64(n))));
1676    IRStmt* st3 = IRStmt_Store(END, counter_addr, IRExpr_RdTmp(t2));
1677 
1678    addStmtToIRSB( sbOut, st1 );
1679    addStmtToIRSB( sbOut, st2 );
1680    addStmtToIRSB( sbOut, st3 );
1681 }
1682 
ms_instrument2(IRSB * sbIn)1683 static IRSB* ms_instrument2( IRSB* sbIn )
1684 {
1685    Int   i, n = 0;
1686    IRSB* sbOut;
1687 
1688    // We increment the instruction count in two places:
1689    // - just before any Ist_Exit statements;
1690    // - just before the IRSB's end.
1691    // In the former case, we zero 'n' and then continue instrumenting.
1692 
1693    sbOut = deepCopyIRSBExceptStmts(sbIn);
1694 
1695    for (i = 0; i < sbIn->stmts_used; i++) {
1696       IRStmt* st = sbIn->stmts[i];
1697 
1698       if (!st || st->tag == Ist_NoOp) continue;
1699 
1700       if (st->tag == Ist_IMark) {
1701          n++;
1702       } else if (st->tag == Ist_Exit) {
1703          if (n > 0) {
1704             // Add an increment before the Exit statement, then reset 'n'.
1705             add_counter_update(sbOut, n);
1706             n = 0;
1707          }
1708       }
1709       addStmtToIRSB( sbOut, st );
1710    }
1711 
1712    if (n > 0) {
1713       // Add an increment before the SB end.
1714       add_counter_update(sbOut, n);
1715    }
1716    return sbOut;
1717 }
1718 
1719 static
ms_instrument(VgCallbackClosure * closure,IRSB * sbIn,const VexGuestLayout * layout,const VexGuestExtents * vge,const VexArchInfo * archinfo_host,IRType gWordTy,IRType hWordTy)1720 IRSB* ms_instrument ( VgCallbackClosure* closure,
1721                       IRSB* sbIn,
1722                       const VexGuestLayout* layout,
1723                       const VexGuestExtents* vge,
1724                       const VexArchInfo* archinfo_host,
1725                       IRType gWordTy, IRType hWordTy )
1726 {
1727    if (! have_started_executing_code) {
1728       // Do an initial sample to guarantee that we have at least one.
1729       // We use 'maybe_take_snapshot' instead of 'take_snapshot' to ensure
1730       // 'maybe_take_snapshot's internal static variables are initialised.
1731       have_started_executing_code = True;
1732       maybe_take_snapshot(Normal, "startup");
1733    }
1734 
1735    if      (clo_time_unit == TimeI)  { return ms_instrument2(sbIn); }
1736    else if (clo_time_unit == TimeMS) { return sbIn; }
1737    else if (clo_time_unit == TimeB)  { return sbIn; }
1738    else                              { tl_assert2(0, "bad --time-unit value"); }
1739 }
1740 
1741 
1742 //------------------------------------------------------------//
1743 //--- Writing snapshots                                    ---//
1744 //------------------------------------------------------------//
1745 
pp_snapshot(MsFile * fp,Snapshot * snapshot,Int snapshot_n)1746 static void pp_snapshot(MsFile *fp, Snapshot* snapshot, Int snapshot_n)
1747 {
1748    const Massif_Header header = (Massif_Header) {
1749       .snapshot_n    = snapshot_n,
1750       .time          = snapshot->time,
1751       .sz_B          = snapshot->heap_szB,
1752       .extra_B       = snapshot->heap_extra_szB,
1753       .stacks_B      = snapshot->stacks_szB,
1754       .detailed      = is_detailed_snapshot(snapshot),
1755       .peak          = Peak == snapshot->kind,
1756       .top_node_desc = clo_pages_as_heap ?
1757         "(page allocation syscalls) mmap/mremap/brk, --alloc-fns, etc."
1758         : "(heap allocation functions) malloc/new/new[], --alloc-fns, etc.",
1759       .sig_threshold = clo_threshold
1760    };
1761 
1762    sanity_check_snapshot(snapshot);
1763 
1764    VG_(XT_massif_print)(fp, snapshot->xt, &header, alloc_szB);
1765 }
1766 
write_snapshots_to_file(const HChar * massif_out_file,Snapshot snapshots_array[],Int nr_elements)1767 static void write_snapshots_to_file(const HChar* massif_out_file,
1768                                     Snapshot snapshots_array[],
1769                                     Int nr_elements)
1770 {
1771    Int i;
1772    MsFile *fp;
1773 
1774    fp = VG_(XT_massif_open)(massif_out_file,
1775                             NULL,
1776                             args_for_massif,
1777                             TimeUnit_to_string(clo_time_unit));
1778    if (fp == NULL)
1779       return; // Error reported by VG_(XT_massif_open)
1780 
1781    for (i = 0; i < nr_elements; i++) {
1782       Snapshot* snapshot = & snapshots_array[i];
1783       pp_snapshot(fp, snapshot, i);     // Detailed snapshot!
1784    }
1785    VG_(XT_massif_close) (fp);
1786 }
1787 
write_snapshots_array_to_file(void)1788 static void write_snapshots_array_to_file(void)
1789 {
1790    // Setup output filename.  Nb: it's important to do this now, ie. as late
1791    // as possible.  If we do it at start-up and the program forks and the
1792    // output file format string contains a %p (pid) specifier, both the
1793    // parent and child will incorrectly write to the same file;  this
1794    // happened in 3.3.0.
1795    HChar* massif_out_file =
1796       VG_(expand_file_name)("--massif-out-file", clo_massif_out_file);
1797    write_snapshots_to_file (massif_out_file, snapshots, next_snapshot_i);
1798    VG_(free)(massif_out_file);
1799 }
1800 
handle_snapshot_monitor_command(const HChar * filename,Bool detailed)1801 static void handle_snapshot_monitor_command (const HChar *filename,
1802                                              Bool detailed)
1803 {
1804    Snapshot snapshot;
1805 
1806    if (!clo_pages_as_heap && !have_started_executing_code) {
1807       // See comments of variable have_started_executing_code.
1808       VG_(gdb_printf)
1809          ("error: cannot take snapshot before execution has started\n");
1810       return;
1811    }
1812 
1813    clear_snapshot(&snapshot, /* do_sanity_check */ False);
1814    take_snapshot(&snapshot, Normal, get_time(), detailed);
1815    write_snapshots_to_file ((filename == NULL) ?
1816                             "massif.vgdb.out" : filename,
1817                             &snapshot,
1818                             1);
1819    delete_snapshot(&snapshot);
1820 }
1821 
handle_all_snapshots_monitor_command(const HChar * filename)1822 static void handle_all_snapshots_monitor_command (const HChar *filename)
1823 {
1824    if (!clo_pages_as_heap && !have_started_executing_code) {
1825       // See comments of variable have_started_executing_code.
1826       VG_(gdb_printf)
1827          ("error: cannot take snapshot before execution has started\n");
1828       return;
1829    }
1830 
1831    write_snapshots_to_file ((filename == NULL) ?
1832                             "massif.vgdb.out" : filename,
1833                             snapshots, next_snapshot_i);
1834 }
1835 
xtmemory_report_next_block(XT_Allocs * xta,ExeContext ** ec_alloc)1836 static void xtmemory_report_next_block(XT_Allocs* xta, ExeContext** ec_alloc)
1837 {
1838    const HP_Chunk* hc = VG_(HT_Next)(malloc_list);
1839    if (hc) {
1840       xta->nbytes = hc->req_szB;
1841       xta->nblocks = 1;
1842       *ec_alloc = VG_(XT_get_ec_from_xecu)(heap_xt, hc->where);
1843    } else
1844       xta->nblocks = 0;
1845 }
ms_xtmemory_report(const HChar * filename,Bool fini)1846 static void ms_xtmemory_report ( const HChar* filename, Bool fini )
1847 {
1848    // Make xtmemory_report_next_block ready to be called.
1849    VG_(HT_ResetIter)(malloc_list);
1850    VG_(XTMemory_report)(filename, fini, xtmemory_report_next_block,
1851                         VG_(XT_filter_maybe_below_main));
1852    /* As massif already filters one top function, use as filter
1853       VG_(XT_filter_maybe_below_main). */
1854 }
1855 
handle_gdb_monitor_command(ThreadId tid,HChar * req)1856 static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req)
1857 {
1858    HChar* wcmd;
1859    HChar s[VG_(strlen)(req) + 1]; /* copy for strtok_r */
1860    HChar *ssaveptr;
1861 
1862    VG_(strcpy) (s, req);
1863 
1864    wcmd = VG_(strtok_r) (s, " ", &ssaveptr);
1865    switch (VG_(keyword_id) ("help snapshot detailed_snapshot all_snapshots"
1866                             " xtmemory",
1867                             wcmd, kwd_report_duplicated_matches)) {
1868    case -2: /* multiple matches */
1869       return True;
1870    case -1: /* not found */
1871       return False;
1872    case  0: /* help */
1873       print_monitor_help();
1874       return True;
1875    case  1: { /* snapshot */
1876       HChar* filename;
1877       filename = VG_(strtok_r) (NULL, " ", &ssaveptr);
1878       handle_snapshot_monitor_command (filename, False /* detailed */);
1879       return True;
1880    }
1881    case  2: { /* detailed_snapshot */
1882       HChar* filename;
1883       filename = VG_(strtok_r) (NULL, " ", &ssaveptr);
1884       handle_snapshot_monitor_command (filename, True /* detailed */);
1885       return True;
1886    }
1887    case  3: { /* all_snapshots */
1888       HChar* filename;
1889       filename = VG_(strtok_r) (NULL, " ", &ssaveptr);
1890       handle_all_snapshots_monitor_command (filename);
1891       return True;
1892    }
1893    case  4: { /* xtmemory */
1894       HChar* filename;
1895       filename = VG_(strtok_r) (NULL, " ", &ssaveptr);
1896       ms_xtmemory_report (filename, False);
1897       return True;
1898    }
1899    default:
1900       tl_assert(0);
1901       return False;
1902    }
1903 }
1904 
ms_print_stats(void)1905 static void ms_print_stats (void)
1906 {
1907 #define STATS(format, args...) \
1908       VG_(dmsg)("Massif: " format, ##args)
1909 
1910    STATS("heap allocs:           %u\n", n_heap_allocs);
1911    STATS("heap reallocs:         %u\n", n_heap_reallocs);
1912    STATS("heap frees:            %u\n", n_heap_frees);
1913    STATS("ignored heap allocs:   %u\n", n_ignored_heap_allocs);
1914    STATS("ignored heap frees:    %u\n", n_ignored_heap_frees);
1915    STATS("ignored heap reallocs: %u\n", n_ignored_heap_reallocs);
1916    STATS("stack allocs:          %u\n", n_stack_allocs);
1917    STATS("skipped snapshots:     %u\n", n_skipped_snapshots);
1918    STATS("real snapshots:        %u\n", n_real_snapshots);
1919    STATS("detailed snapshots:    %u\n", n_detailed_snapshots);
1920    STATS("peak snapshots:        %u\n", n_peak_snapshots);
1921    STATS("cullings:              %u\n", n_cullings);
1922 #undef STATS
1923 }
1924 
1925 
1926 //------------------------------------------------------------//
1927 //--- Finalisation                                         ---//
1928 //------------------------------------------------------------//
1929 
ms_fini(Int exit_status)1930 static void ms_fini(Int exit_status)
1931 {
1932    ms_xtmemory_report(VG_(clo_xtree_memory_file), True);
1933 
1934    // Output.
1935    write_snapshots_array_to_file();
1936 
1937    if (VG_(clo_stats))
1938       ms_print_stats();
1939 }
1940 
1941 
1942 //------------------------------------------------------------//
1943 //--- Initialisation                                       ---//
1944 //------------------------------------------------------------//
1945 
ms_post_clo_init(void)1946 static void ms_post_clo_init(void)
1947 {
1948    Int i;
1949    HChar* LD_PRELOAD_val;
1950 
1951    /* We will record execontext up to clo_depth + overestimate and
1952       we will store this as ec => we need to increase the backtrace size
1953       if smaller than what we will store. */
1954    if (VG_(clo_backtrace_size) < clo_depth + MAX_OVERESTIMATE)
1955       VG_(clo_backtrace_size) = clo_depth + MAX_OVERESTIMATE;
1956 
1957    // Check options.
1958    if (clo_pages_as_heap) {
1959       if (clo_stacks) {
1960          VG_(fmsg_bad_option)("--pages-as-heap=yes",
1961             "Cannot be used together with --stacks=yes");
1962       }
1963    }
1964    if (!clo_heap) {
1965       clo_pages_as_heap = False;
1966    }
1967 
1968    // If --pages-as-heap=yes we don't want malloc replacement to occur.  So we
1969    // disable vgpreload_massif-$PLATFORM.so by removing it from LD_PRELOAD (or
1970    // platform-equivalent). This is a bit of a hack, but LD_PRELOAD is setup
1971    // well before tool initialisation, so this seems the best way to do it.
1972    if (clo_pages_as_heap) {
1973       HChar* s1;
1974       HChar* s2;
1975 
1976       clo_heap_admin = 0;     // No heap admin on pages.
1977 
1978       LD_PRELOAD_val = VG_(getenv)( VG_(LD_PRELOAD_var_name) );
1979       tl_assert(LD_PRELOAD_val);
1980 
1981       VERB(2, "clo_pages_as_heap orig LD_PRELOAD '%s'\n", LD_PRELOAD_val);
1982 
1983       // Make sure the vgpreload_core-$PLATFORM entry is there, for sanity.
1984       s1 = VG_(strstr)(LD_PRELOAD_val, "vgpreload_core");
1985       tl_assert(s1);
1986 
1987       // Now find the vgpreload_massif-$PLATFORM entry.
1988       s1 = VG_(strstr)(LD_PRELOAD_val, "vgpreload_massif");
1989       tl_assert(s1);
1990       s2 = s1;
1991 
1992       // Position s1 on the previous ':', which must be there because
1993       // of the preceding vgpreload_core-$PLATFORM entry.
1994       for (; *s1 != ':'; s1--)
1995          ;
1996 
1997       // Position s2 on the next ':' or \0
1998       for (; *s2 != ':' && *s2 != '\0'; s2++)
1999          ;
2000 
2001       // Move all characters from s2 to s1
2002       while ((*s1++ = *s2++))
2003          ;
2004 
2005       VERB(2, "clo_pages_as_heap cleaned LD_PRELOAD '%s'\n", LD_PRELOAD_val);
2006    }
2007 
2008    // Print alloc-fns and ignore-fns, if necessary.
2009    if (VG_(clo_verbosity) > 1) {
2010       VERB(1, "alloc-fns:\n");
2011       for (i = 0; i < VG_(sizeXA)(alloc_fns); i++) {
2012          HChar** fn_ptr = VG_(indexXA)(alloc_fns, i);
2013          VERB(1, "  %s\n", *fn_ptr);
2014       }
2015 
2016       VERB(1, "ignore-fns:\n");
2017       if (0 == VG_(sizeXA)(ignore_fns)) {
2018          VERB(1, "  <empty>\n");
2019       }
2020       for (i = 0; i < VG_(sizeXA)(ignore_fns); i++) {
2021          HChar** fn_ptr = VG_(indexXA)(ignore_fns, i);
2022          VERB(1, "  %d: %s\n", i, *fn_ptr);
2023       }
2024    }
2025 
2026    // Events to track.
2027    if (clo_stacks) {
2028       VG_(track_new_mem_stack)        ( new_mem_stack        );
2029       VG_(track_die_mem_stack)        ( die_mem_stack        );
2030       VG_(track_new_mem_stack_signal) ( new_mem_stack_signal );
2031       VG_(track_die_mem_stack_signal) ( die_mem_stack_signal );
2032    }
2033 
2034    if (clo_pages_as_heap) {
2035       VG_(track_new_mem_startup) ( ms_new_mem_startup );
2036       VG_(track_new_mem_brk)     ( ms_new_mem_brk     );
2037       VG_(track_new_mem_mmap)    ( ms_new_mem_mmap    );
2038 
2039       VG_(track_copy_mem_remap)  ( ms_copy_mem_remap  );
2040 
2041       VG_(track_die_mem_brk)     ( ms_die_mem_brk     );
2042       VG_(track_die_mem_munmap)  ( ms_die_mem_munmap  );
2043    }
2044 
2045    // Initialise snapshot array, and sanity-check it.
2046    snapshots = VG_(malloc)("ms.main.mpoci.1",
2047                            sizeof(Snapshot) * clo_max_snapshots);
2048    // We don't want to do snapshot sanity checks here, because they're
2049    // currently uninitialised.
2050    for (i = 0; i < clo_max_snapshots; i++) {
2051       clear_snapshot( & snapshots[i], /*do_sanity_check*/False );
2052    }
2053    sanity_check_snapshots_array();
2054 
2055    if (VG_(clo_xtree_memory) == Vg_XTMemory_Full)
2056       // Activate full xtree memory profiling.
2057       // As massif already filters one top function, use as filter
2058       // VG_(XT_filter_maybe_below_main).
2059       VG_(XTMemory_Full_init)(VG_(XT_filter_maybe_below_main));
2060 
2061 }
2062 
ms_pre_clo_init(void)2063 static void ms_pre_clo_init(void)
2064 {
2065    VG_(details_name)            ("Massif");
2066    VG_(details_version)         (NULL);
2067    VG_(details_description)     ("a heap profiler");
2068    VG_(details_copyright_author)(
2069       "Copyright (C) 2003-2017, and GNU GPL'd, by Nicholas Nethercote");
2070    VG_(details_bug_reports_to)  (VG_BUGS_TO);
2071 
2072    VG_(details_avg_translation_sizeB) ( 330 );
2073 
2074    VG_(clo_vex_control).iropt_register_updates_default
2075       = VG_(clo_px_file_backed)
2076       = VexRegUpdSpAtMemAccess; // overridable by the user.
2077 
2078    // Basic functions.
2079    VG_(basic_tool_funcs)          (ms_post_clo_init,
2080                                    ms_instrument,
2081                                    ms_fini);
2082 
2083    // Needs.
2084    VG_(needs_libc_freeres)();
2085    VG_(needs_cxx_freeres)();
2086    VG_(needs_command_line_options)(ms_process_cmd_line_option,
2087                                    ms_print_usage,
2088                                    ms_print_debug_usage);
2089    VG_(needs_client_requests)     (ms_handle_client_request);
2090    VG_(needs_sanity_checks)       (ms_cheap_sanity_check,
2091                                    ms_expensive_sanity_check);
2092    VG_(needs_print_stats)         (ms_print_stats);
2093    VG_(needs_malloc_replacement)  (ms_malloc,
2094                                    ms___builtin_new,
2095                                    ms___builtin_vec_new,
2096                                    ms_memalign,
2097                                    ms_calloc,
2098                                    ms_free,
2099                                    ms___builtin_delete,
2100                                    ms___builtin_vec_delete,
2101                                    ms_realloc,
2102                                    ms_malloc_usable_size,
2103                                    0 );
2104 
2105    // HP_Chunks.
2106    HP_chunk_poolalloc = VG_(newPA)
2107       (sizeof(HP_Chunk),
2108        1000,
2109        VG_(malloc),
2110        "massif MC_Chunk pool",
2111        VG_(free));
2112    malloc_list = VG_(HT_construct)( "Massif's malloc list" );
2113 
2114    // Heap XTree
2115    heap_xt = VG_(XT_create)(VG_(malloc),
2116                             "ms.xtrees",
2117                             VG_(free),
2118                             sizeof(SizeT),
2119                             init_szB, add_szB, sub_szB,
2120                             filter_IPs);
2121 
2122    // Initialise alloc_fns and ignore_fns.
2123    init_alloc_fns();
2124    init_ignore_fns();
2125 
2126    // Initialise args_for_massif.
2127    args_for_massif = VG_(newXA)(VG_(malloc), "ms.main.mprci.1",
2128                                 VG_(free), sizeof(HChar*));
2129 }
2130 
2131 VG_DETERMINE_INTERFACE_VERSION(ms_pre_clo_init)
2132 
2133 //--------------------------------------------------------------------//
2134 //--- end                                                          ---//
2135 //--------------------------------------------------------------------//
2136