1 //--------------------------------------------------------------------*/
2 //--- Massif: a heap profiling tool. ms_main.c ---*/
3 //--------------------------------------------------------------------*/
4
5 /*
6 This file is part of Massif, a Valgrind tool for profiling memory
7 usage of programs.
8
9 Copyright (C) 2003-2010 Nicholas Nethercote
10 njn@valgrind.org
11
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
25 02111-1307, USA.
26
27 The GNU General Public License is contained in the file COPYING.
28 */
29
30 //---------------------------------------------------------------------------
31 // XXX:
32 //---------------------------------------------------------------------------
33 // Todo -- nice, but less critical:
34 // - do a graph-drawing test
35 // - make file format more generic. Obstacles:
36 // - unit prefixes are not generic
37 // - preset column widths for stats are not generic
38 // - preset column headers are not generic
39 // - "Massif arguments:" line is not generic
40 // - do snapshots on client requests
41 // - (Michael Meeks): have an interactive way to request a dump
42 // (callgrind_control-style)
43 // - "profile now"
44 // - "show me the extra allocations since the last snapshot"
45 // - "start/stop logging" (eg. quickly skip boring bits)
46 // - Add ability to draw multiple graphs, eg. heap-only, stack-only, total.
47 // Give each graph a title. (try to do it generically!)
48 // - allow truncation of long fnnames if the exact line number is
49 // identified? [hmm, could make getting the name of alloc-fns more
50 // difficult] [could dump full names to file, truncate in ms_print]
51 // - make --show-below-main=no work
52 // - Options like --alloc-fn='operator new(unsigned, std::nothrow_t const&)'
53 // don't work in a .valgrindrc file or in $VALGRIND_OPTS.
54 // m_commandline.c:add_args_from_string() needs to respect single quotes.
55 // - With --stack=yes, want to add a stack trace for detailed snapshots so
56 // it's clear where/why the peak is occurring. (Mattieu Castet) Also,
57 // possibly useful even with --stack=no? (Andi Yin)
58 //
59 // Performance:
60 // - To run the benchmarks:
61 //
62 // perl perf/vg_perf --tools=massif --reps=3 perf/{heap,tinycc} massif
63 // time valgrind --tool=massif --depth=100 konqueror
64 //
65 // The other benchmarks don't do much allocation, and so give similar speeds
66 // to Nulgrind.
67 //
68 // Timing results on 'nevermore' (njn's machine) as of r7013:
69 //
70 // heap 0.53s ma:12.4s (23.5x, -----)
71 // tinycc 0.46s ma: 4.9s (10.7x, -----)
72 // many-xpts 0.08s ma: 2.0s (25.0x, -----)
73 // konqueror 29.6s real 0:21.0s user
74 //
75 // [Introduction of --time-unit=i as the default slowed things down by
76 // roughly 0--20%.]
77 //
78 // - get_XCon accounts for about 9% of konqueror startup time. Try
79 // keeping XPt children sorted by 'ip' and use binary search in get_XCon.
80 // Requires factoring out binary search code from various places into a
81 // VG_(bsearch) function.
82 //
83 // Todo -- low priority:
84 // - In each XPt, record both bytes and the number of allocations, and
85 // possibly the global number of allocations.
86 // - (Andy Lin) Give a stack trace on detailed snapshots?
87 // - (Artur Wisz) add a feature to Massif to ignore any heap blocks larger
88 // than a certain size! Because: "linux's malloc allows to set a
89 // MMAP_THRESHOLD value, so we set it to 4096 - all blocks above that will
90 // be handled directly by the kernel, and are guaranteed to be returned to
91 // the system when freed. So we needed to profile only blocks below this
92 // limit."
93 //
94 // File format working notes:
95
96 #if 0
97 desc: --heap-admin=foo
98 cmd: date
99 time_unit: ms
100 #-----------
101 snapshot=0
102 #-----------
103 time=0
104 mem_heap_B=0
105 mem_heap_admin_B=0
106 mem_stacks_B=0
107 heap_tree=empty
108 #-----------
109 snapshot=1
110 #-----------
111 time=353
112 mem_heap_B=5
113 mem_heap_admin_B=0
114 mem_stacks_B=0
115 heap_tree=detailed
116 n1: 5 (heap allocation functions) malloc/new/new[], --alloc-fns, etc.
117 n1: 5 0x27F6E0: _nl_normalize_codeset (in /lib/libc-2.3.5.so)
118 n1: 5 0x279DE6: _nl_load_locale_from_archive (in /lib/libc-2.3.5.so)
119 n1: 5 0x278E97: _nl_find_locale (in /lib/libc-2.3.5.so)
120 n1: 5 0x278871: setlocale (in /lib/libc-2.3.5.so)
121 n1: 5 0x8049821: (within /bin/date)
122 n0: 5 0x26ED5E: (below main) (in /lib/libc-2.3.5.so)
123
124
125 n_events: n time(ms) total(B) useful-heap(B) admin-heap(B) stacks(B)
126 t_events: B
127 n 0 0 0 0 0
128 n 0 0 0 0 0
129 t1: 5 <string...>
130 t1: 6 <string...>
131
132 Ideas:
133 - each snapshot specifies an x-axis value and one or more y-axis values.
134 - can display the y-axis values separately if you like
135 - can completely separate connection between snapshots and trees.
136
137 Challenges:
138 - how to specify and scale/abbreviate units on axes?
139 - how to combine multiple values into the y-axis?
140
141 --------------------------------------------------------------------------------Command: date
142 Massif arguments: --heap-admin=foo
143 ms_print arguments: massif.out
144 --------------------------------------------------------------------------------
145 KB
146 6.472^ :#
147 | :# :: . .
148 ...
149 | ::@ :@ :@ :@:::# :: : ::::
150 0 +-----------------------------------@---@---@-----@--@---#-------------->ms 0 713
151
152 Number of snapshots: 50
153 Detailed snapshots: [2, 11, 13, 19, 25, 32 (peak)]
154 -------------------------------------------------------------------------------- n time(ms) total(B) useful-heap(B) admin-heap(B) stacks(B)
155 -------------------------------------------------------------------------------- 0 0 0 0 0 0
156 1 345 5 5 0 0
157 2 353 5 5 0 0
158 100.00% (5B) (heap allocation functions) malloc/new/new[], --alloc-fns, etc.
159 ->100.00% (5B) 0x27F6E0: _nl_normalize_codeset (in /lib/libc-2.3.5.so)
160 #endif
161
162 //---------------------------------------------------------------------------
163
164 #include "pub_tool_basics.h"
165 #include "pub_tool_vki.h"
166 #include "pub_tool_aspacemgr.h"
167 #include "pub_tool_debuginfo.h"
168 #include "pub_tool_hashtable.h"
169 #include "pub_tool_libcbase.h"
170 #include "pub_tool_libcassert.h"
171 #include "pub_tool_libcfile.h"
172 #include "pub_tool_libcprint.h"
173 #include "pub_tool_libcproc.h"
174 #include "pub_tool_machine.h"
175 #include "pub_tool_mallocfree.h"
176 #include "pub_tool_options.h"
177 #include "pub_tool_replacemalloc.h"
178 #include "pub_tool_stacktrace.h"
179 #include "pub_tool_threadstate.h"
180 #include "pub_tool_tooliface.h"
181 #include "pub_tool_xarray.h"
182 #include "pub_tool_clientstate.h"
183
184 #include "valgrind.h" // For {MALLOC,FREE}LIKE_BLOCK
185
186 //------------------------------------------------------------*/
187 //--- Overview of operation ---*/
188 //------------------------------------------------------------*/
189
190 // The size of the stacks and heap is tracked. The heap is tracked in a lot
191 // of detail, enough to tell how many bytes each line of code is responsible
192 // for, more or less. The main data structure is a tree representing the
193 // call tree beneath all the allocation functions like malloc().
194 // (Alternatively, if --pages-as-heap=yes is specified, memory is tracked at
195 // the page level, and each page is treated much like a heap block. We use
196 // "heap" throughout below to cover this case because the concepts are all the
197 // same.)
198 //
199 // "Snapshots" are recordings of the memory usage. There are two basic
200 // kinds:
201 // - Normal: these record the current time, total memory size, total heap
202 // size, heap admin size and stack size.
203 // - Detailed: these record those things in a normal snapshot, plus a very
204 // detailed XTree (see below) indicating how the heap is structured.
205 //
206 // Snapshots are taken every so often. There are two storage classes of
207 // snapshots:
208 // - Temporary: Massif does a temporary snapshot every so often. The idea
209 // is to always have a certain number of temporary snapshots around. So
210 // we take them frequently to begin with, but decreasingly often as the
211 // program continues to run. Also, we remove some old ones after a while.
212 // Overall it's a kind of exponential decay thing. Most of these are
213 // normal snapshots, a small fraction are detailed snapshots.
214 // - Permanent: Massif takes a permanent (detailed) snapshot in some
215 // circumstances. They are:
216 // - Peak snapshot: When the memory usage peak is reached, it takes a
217 // snapshot. It keeps this, unless the peak is subsequently exceeded,
218 // in which case it will overwrite the peak snapshot.
219 // - User-requested snapshots: These are done in response to client
220 // requests. They are always kept.
221
222 // Used for printing things when clo_verbosity > 1.
223 #define VERB(verb, format, args...) \
224 if (VG_(clo_verbosity) > verb) { \
225 VG_(dmsg)("Massif: " format, ##args); \
226 }
227
228 // Used for printing stats when clo_stats == True.
229 #define STATS(format, args...) \
230 if (VG_(clo_stats)) { \
231 VG_(dmsg)("Massif: " format, ##args); \
232 }
233
234 //------------------------------------------------------------//
235 //--- Statistics ---//
236 //------------------------------------------------------------//
237
238 // Konqueror startup, to give an idea of the numbers involved with a biggish
239 // program, with default depth:
240 //
241 // depth=3 depth=40
242 // - 310,000 allocations
243 // - 300,000 frees
244 // - 15,000 XPts 800,000 XPts
245 // - 1,800 top-XPts
246
247 static UInt n_heap_allocs = 0;
248 static UInt n_heap_reallocs = 0;
249 static UInt n_heap_frees = 0;
250 static UInt n_ignored_heap_allocs = 0;
251 static UInt n_ignored_heap_frees = 0;
252 static UInt n_ignored_heap_reallocs = 0;
253 static UInt n_stack_allocs = 0;
254 static UInt n_stack_frees = 0;
255 static UInt n_xpts = 0;
256 static UInt n_xpt_init_expansions = 0;
257 static UInt n_xpt_later_expansions = 0;
258 static UInt n_sxpt_allocs = 0;
259 static UInt n_sxpt_frees = 0;
260 static UInt n_skipped_snapshots = 0;
261 static UInt n_real_snapshots = 0;
262 static UInt n_detailed_snapshots = 0;
263 static UInt n_peak_snapshots = 0;
264 static UInt n_cullings = 0;
265 static UInt n_XCon_redos = 0;
266
267 //------------------------------------------------------------//
268 //--- Globals ---//
269 //------------------------------------------------------------//
270
271 // Number of guest instructions executed so far. Only used with
272 // --time-unit=i.
273 static Long guest_instrs_executed = 0;
274
275 static SizeT heap_szB = 0; // Live heap size
276 static SizeT heap_extra_szB = 0; // Live heap extra size -- slop + admin bytes
277 static SizeT stacks_szB = 0; // Live stacks size
278
279 // This is the total size from the current peak snapshot, or 0 if no peak
280 // snapshot has been taken yet.
281 static SizeT peak_snapshot_total_szB = 0;
282
283 // Incremented every time memory is allocated/deallocated, by the
284 // allocated/deallocated amount; includes heap, heap-admin and stack
285 // memory. An alternative to milliseconds as a unit of program "time".
286 static ULong total_allocs_deallocs_szB = 0;
287
288 // When running with --heap=yes --pages-as-heap=no, we don't start taking
289 // snapshots until the first basic block is executed, rather than doing it in
290 // ms_post_clo_init (which is the obvious spot), for two reasons.
291 // - It lets us ignore stack events prior to that, because they're not
292 // really proper ones and just would screw things up.
293 // - Because there's still some core initialisation to do, and so there
294 // would be an artificial time gap between the first and second snapshots.
295 //
296 // When running with --heap=yes --pages-as-heap=yes, snapshots start much
297 // earlier due to new_mem_startup so this isn't relevant.
298 //
299 static Bool have_started_executing_code = False;
300
301 //------------------------------------------------------------//
302 //--- Alloc fns ---//
303 //------------------------------------------------------------//
304
305 static XArray* alloc_fns;
306 static XArray* ignore_fns;
307
init_alloc_fns(void)308 static void init_alloc_fns(void)
309 {
310 // Create the list, and add the default elements.
311 alloc_fns = VG_(newXA)(VG_(malloc), "ms.main.iaf.1",
312 VG_(free), sizeof(Char*));
313 #define DO(x) { Char* s = x; VG_(addToXA)(alloc_fns, &s); }
314
315 // Ordered roughly according to (presumed) frequency.
316 // Nb: The C++ "operator new*" ones are overloadable. We include them
317 // always anyway, because even if they're overloaded, it would be a
318 // prodigiously stupid overloading that caused them to not allocate
319 // memory.
320 //
321 // XXX: because we don't look at the first stack entry (unless it's a
322 // custom allocation) there's not much point to having all these alloc
323 // functions here -- they should never appear anywhere (I think?) other
324 // than the top stack entry. The only exceptions are those that in
325 // vg_replace_malloc.c are partly or fully implemented in terms of another
326 // alloc function: realloc (which uses malloc); valloc,
327 // malloc_zone_valloc, posix_memalign and memalign_common (which use
328 // memalign).
329 //
330 DO("malloc" );
331 DO("__builtin_new" );
332 DO("operator new(unsigned)" );
333 DO("operator new(unsigned long)" );
334 DO("__builtin_vec_new" );
335 DO("operator new[](unsigned)" );
336 DO("operator new[](unsigned long)" );
337 DO("calloc" );
338 DO("realloc" );
339 DO("memalign" );
340 DO("posix_memalign" );
341 DO("valloc" );
342 DO("operator new(unsigned, std::nothrow_t const&)" );
343 DO("operator new[](unsigned, std::nothrow_t const&)" );
344 DO("operator new(unsigned long, std::nothrow_t const&)" );
345 DO("operator new[](unsigned long, std::nothrow_t const&)");
346 #if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
347 DO("malloc_common" );
348 DO("calloc_common" );
349 DO("realloc_common" );
350 DO("memalign_common" );
351 #elif defined(VGO_darwin)
352 DO("malloc_zone_malloc" );
353 DO("malloc_zone_calloc" );
354 DO("malloc_zone_realloc" );
355 DO("malloc_zone_memalign" );
356 DO("malloc_zone_valloc" );
357 #endif
358 }
359
init_ignore_fns(void)360 static void init_ignore_fns(void)
361 {
362 // Create the (empty) list.
363 ignore_fns = VG_(newXA)(VG_(malloc), "ms.main.iif.1",
364 VG_(free), sizeof(Char*));
365 }
366
367 // Determines if the named function is a member of the XArray.
is_member_fn(XArray * fns,Char * fnname)368 static Bool is_member_fn(XArray* fns, Char* fnname)
369 {
370 Char** fn_ptr;
371 Int i;
372
373 // Nb: It's a linear search through the list, because we're comparing
374 // strings rather than pointers to strings.
375 // Nb: This gets called a lot. It was an OSet, but they're quite slow to
376 // iterate through so it wasn't a good choice.
377 for (i = 0; i < VG_(sizeXA)(fns); i++) {
378 fn_ptr = VG_(indexXA)(fns, i);
379 if (VG_STREQ(fnname, *fn_ptr))
380 return True;
381 }
382 return False;
383 }
384
385
386 //------------------------------------------------------------//
387 //--- Command line args ---//
388 //------------------------------------------------------------//
389
390 #define MAX_DEPTH 200
391
392 typedef enum { TimeI, TimeMS, TimeB } TimeUnit;
393
TimeUnit_to_string(TimeUnit time_unit)394 static Char* TimeUnit_to_string(TimeUnit time_unit)
395 {
396 switch (time_unit) {
397 case TimeI: return "i";
398 case TimeMS: return "ms";
399 case TimeB: return "B";
400 default: tl_assert2(0, "TimeUnit_to_string: unrecognised TimeUnit");
401 }
402 }
403
404 static Bool clo_heap = True;
405 // clo_heap_admin is deliberately a word-sized type. At one point it was
406 // a UInt, but this caused problems on 64-bit machines when it was
407 // multiplied by a small negative number and then promoted to a
408 // word-sized type -- it ended up with a value of 4.2 billion. Sigh.
409 static SSizeT clo_heap_admin = 8;
410 static Bool clo_pages_as_heap = False;
411 static Bool clo_stacks = False;
412 static Int clo_depth = 30;
413 static double clo_threshold = 1.0; // percentage
414 static double clo_peak_inaccuracy = 1.0; // percentage
415 static Int clo_time_unit = TimeI;
416 static Int clo_detailed_freq = 10;
417 static Int clo_max_snapshots = 100;
418 static Char* clo_massif_out_file = "massif.out.%p";
419
420 static XArray* args_for_massif;
421
ms_process_cmd_line_option(Char * arg)422 static Bool ms_process_cmd_line_option(Char* arg)
423 {
424 Char* tmp_str;
425
426 // Remember the arg for later use.
427 VG_(addToXA)(args_for_massif, &arg);
428
429 if VG_BOOL_CLO(arg, "--heap", clo_heap) {}
430 else if VG_BINT_CLO(arg, "--heap-admin", clo_heap_admin, 0, 1024) {}
431
432 else if VG_BOOL_CLO(arg, "--stacks", clo_stacks) {}
433
434 else if VG_BOOL_CLO(arg, "--pages-as-heap", clo_pages_as_heap) {}
435
436 else if VG_BINT_CLO(arg, "--depth", clo_depth, 1, MAX_DEPTH) {}
437
438 else if VG_STR_CLO(arg, "--alloc-fn", tmp_str) {
439 VG_(addToXA)(alloc_fns, &tmp_str);
440 }
441 else if VG_STR_CLO(arg, "--ignore-fn", tmp_str) {
442 VG_(addToXA)(ignore_fns, &tmp_str);
443 }
444
445 else if VG_DBL_CLO(arg, "--threshold", clo_threshold) {
446 if (clo_threshold < 0 || clo_threshold > 100) {
447 VG_(fmsg_bad_option)(arg,
448 "--threshold must be between 0.0 and 100.0\n");
449 }
450 }
451
452 else if VG_DBL_CLO(arg, "--peak-inaccuracy", clo_peak_inaccuracy) {}
453
454 else if VG_XACT_CLO(arg, "--time-unit=i", clo_time_unit, TimeI) {}
455 else if VG_XACT_CLO(arg, "--time-unit=ms", clo_time_unit, TimeMS) {}
456 else if VG_XACT_CLO(arg, "--time-unit=B", clo_time_unit, TimeB) {}
457
458 else if VG_BINT_CLO(arg, "--detailed-freq", clo_detailed_freq, 1, 1000000) {}
459
460 else if VG_BINT_CLO(arg, "--max-snapshots", clo_max_snapshots, 10, 1000) {}
461
462 else if VG_STR_CLO(arg, "--massif-out-file", clo_massif_out_file) {}
463
464 else
465 return VG_(replacement_malloc_process_cmd_line_option)(arg);
466
467 return True;
468 }
469
ms_print_usage(void)470 static void ms_print_usage(void)
471 {
472 VG_(printf)(
473 " --heap=no|yes profile heap blocks [yes]\n"
474 " --heap-admin=<size> average admin bytes per heap block;\n"
475 " ignored if --heap=no [8]\n"
476 " --stacks=no|yes profile stack(s) [no]\n"
477 " --pages-as-heap=no|yes profile memory at the page level [no]\n"
478 " --depth=<number> depth of contexts [30]\n"
479 " --alloc-fn=<name> specify <name> as an alloc function [empty]\n"
480 " --ignore-fn=<name> ignore heap allocations within <name> [empty]\n"
481 " --threshold=<m.n> significance threshold, as a percentage [1.0]\n"
482 " --peak-inaccuracy=<m.n> maximum peak inaccuracy, as a percentage [1.0]\n"
483 " --time-unit=i|ms|B time unit: instructions executed, milliseconds\n"
484 " or heap bytes alloc'd/dealloc'd [i]\n"
485 " --detailed-freq=<N> every Nth snapshot should be detailed [10]\n"
486 " --max-snapshots=<N> maximum number of snapshots recorded [100]\n"
487 " --massif-out-file=<file> output file name [massif.out.%%p]\n"
488 );
489 }
490
ms_print_debug_usage(void)491 static void ms_print_debug_usage(void)
492 {
493 VG_(printf)(
494 " (none)\n"
495 );
496 }
497
498
499 //------------------------------------------------------------//
500 //--- XPts, XTrees and XCons ---//
501 //------------------------------------------------------------//
502
503 // An XPt represents an "execution point", ie. a code address. Each XPt is
504 // part of a tree of XPts (an "execution tree", or "XTree"). The details of
505 // the heap are represented by a single XTree.
506 //
507 // The root of the tree is 'alloc_xpt', which represents all allocation
508 // functions, eg:
509 // - malloc/calloc/realloc/memalign/new/new[];
510 // - user-specified allocation functions (using --alloc-fn);
511 // - custom allocation (MALLOCLIKE) points
512 // It's a bit of a fake XPt (ie. its 'ip' is zero), and is only used because
513 // it makes the code simpler.
514 //
515 // Any child of 'alloc_xpt' is called a "top-XPt". The XPts at the bottom
516 // of an XTree (leaf nodes) are "bottom-XPTs".
517 //
518 // Each path from a top-XPt to a bottom-XPt through an XTree gives an
519 // execution context ("XCon"), ie. a stack trace. (And sub-paths represent
520 // stack sub-traces.) The number of XCons in an XTree is equal to the
521 // number of bottom-XPTs in that XTree.
522 //
523 // alloc_xpt XTrees are bi-directional.
524 // | ^
525 // v |
526 // > parent < Example: if child1() calls parent() and child2()
527 // / | \ also calls parent(), and parent() calls malloc(),
528 // | / \ | the XTree will look like this.
529 // | v v |
530 // child1 child2
531 //
532 // (Note that malformed stack traces can lead to difficulties. See the
533 // comment at the bottom of get_XCon.)
534 //
535 // XTrees and XPts are mirrored by SXTrees and SXPts, where the 'S' is short
536 // for "saved". When the XTree is duplicated for a snapshot, we duplicate
537 // it as an SXTree, which is similar but omits some things it does not need,
538 // and aggregates up insignificant nodes. This is important as an SXTree is
539 // typically much smaller than an XTree.
540
541 // XXX: make XPt and SXPt extensible arrays, to avoid having to do two
542 // allocations per Pt.
543
544 typedef struct _XPt XPt;
545 struct _XPt {
546 Addr ip; // code address
547
548 // Bottom-XPts: space for the precise context.
549 // Other XPts: space of all the descendent bottom-XPts.
550 // Nb: this value goes up and down as the program executes.
551 SizeT szB;
552
553 XPt* parent; // pointer to parent XPt
554
555 // Children.
556 // n_children and max_children are 32-bit integers. 16-bit integers
557 // are too small -- a very big program might have more than 65536
558 // allocation points (ie. top-XPts) -- Konqueror starting up has 1800.
559 UInt n_children; // number of children
560 UInt max_children; // capacity of children array
561 XPt** children; // pointers to children XPts
562 };
563
564 typedef
565 enum {
566 SigSXPt,
567 InsigSXPt
568 }
569 SXPtTag;
570
571 typedef struct _SXPt SXPt;
572 struct _SXPt {
573 SXPtTag tag;
574 SizeT szB; // memory size for the node, be it Sig or Insig
575 union {
576 // An SXPt representing a single significant code location. Much like
577 // an XPt, minus the fields that aren't necessary.
578 struct {
579 Addr ip;
580 UInt n_children;
581 SXPt** children;
582 }
583 Sig;
584
585 // An SXPt representing one or more code locations, all below the
586 // significance threshold.
587 struct {
588 Int n_xpts; // number of aggregated XPts
589 }
590 Insig;
591 };
592 };
593
594 // Fake XPt representing all allocation functions like malloc(). Acts as
595 // parent node to all top-XPts.
596 static XPt* alloc_xpt;
597
598 // Cheap allocation for blocks that never need to be freed. Saves about 10%
599 // for Konqueror startup with --depth=40.
perm_malloc(SizeT n_bytes)600 static void* perm_malloc(SizeT n_bytes)
601 {
602 static Addr hp = 0; // current heap pointer
603 static Addr hp_lim = 0; // maximum usable byte in current block
604
605 #define SUPERBLOCK_SIZE (1 << 20) // 1 MB
606
607 if (hp + n_bytes > hp_lim) {
608 hp = (Addr)VG_(am_shadow_alloc)(SUPERBLOCK_SIZE);
609 if (0 == hp)
610 VG_(out_of_memory_NORETURN)( "massif:perm_malloc",
611 SUPERBLOCK_SIZE);
612 hp_lim = hp + SUPERBLOCK_SIZE - 1;
613 }
614
615 hp += n_bytes;
616
617 return (void*)(hp - n_bytes);
618 }
619
new_XPt(Addr ip,XPt * parent)620 static XPt* new_XPt(Addr ip, XPt* parent)
621 {
622 // XPts are never freed, so we can use perm_malloc to allocate them.
623 // Note that we cannot use perm_malloc for the 'children' array, because
624 // that needs to be resizable.
625 XPt* xpt = perm_malloc(sizeof(XPt));
626 xpt->ip = ip;
627 xpt->szB = 0;
628 xpt->parent = parent;
629
630 // We don't initially allocate any space for children. We let that
631 // happen on demand. Many XPts (ie. all the bottom-XPts) don't have any
632 // children anyway.
633 xpt->n_children = 0;
634 xpt->max_children = 0;
635 xpt->children = NULL;
636
637 // Update statistics
638 n_xpts++;
639
640 return xpt;
641 }
642
add_child_xpt(XPt * parent,XPt * child)643 static void add_child_xpt(XPt* parent, XPt* child)
644 {
645 // Expand 'children' if necessary.
646 tl_assert(parent->n_children <= parent->max_children);
647 if (parent->n_children == parent->max_children) {
648 if (0 == parent->max_children) {
649 parent->max_children = 4;
650 parent->children = VG_(malloc)( "ms.main.acx.1",
651 parent->max_children * sizeof(XPt*) );
652 n_xpt_init_expansions++;
653 } else {
654 parent->max_children *= 2; // Double size
655 parent->children = VG_(realloc)( "ms.main.acx.2",
656 parent->children,
657 parent->max_children * sizeof(XPt*) );
658 n_xpt_later_expansions++;
659 }
660 }
661
662 // Insert new child XPt in parent's children list.
663 parent->children[ parent->n_children++ ] = child;
664 }
665
666 // Reverse comparison for a reverse sort -- biggest to smallest.
SXPt_revcmp_szB(void * n1,void * n2)667 static Int SXPt_revcmp_szB(void* n1, void* n2)
668 {
669 SXPt* sxpt1 = *(SXPt**)n1;
670 SXPt* sxpt2 = *(SXPt**)n2;
671 return ( sxpt1->szB < sxpt2->szB ? 1
672 : sxpt1->szB > sxpt2->szB ? -1
673 : 0);
674 }
675
676 //------------------------------------------------------------//
677 //--- XTree Operations ---//
678 //------------------------------------------------------------//
679
680 // Duplicates an XTree as an SXTree.
dup_XTree(XPt * xpt,SizeT total_szB)681 static SXPt* dup_XTree(XPt* xpt, SizeT total_szB)
682 {
683 Int i, n_sig_children, n_insig_children, n_child_sxpts;
684 SizeT sig_child_threshold_szB;
685 SXPt* sxpt;
686
687 // Number of XPt children Action for SXPT
688 // ------------------ ---------------
689 // 0 sig, 0 insig alloc 0 children
690 // N sig, 0 insig alloc N children, dup all
691 // N sig, M insig alloc N+1, dup first N, aggregate remaining M
692 // 0 sig, M insig alloc 1, aggregate M
693
694 // Work out how big a child must be to be significant. If the current
695 // total_szB is zero, then we set it to 1, which means everything will be
696 // judged insignificant -- this is sensible, as there's no point showing
697 // any detail for this case. Unless they used --threshold=0, in which
698 // case we show them everything because that's what they asked for.
699 //
700 // Nb: We do this once now, rather than once per child, because if we do
701 // that the cost of all the divisions adds up to something significant.
702 if (0 == total_szB && 0 != clo_threshold) {
703 sig_child_threshold_szB = 1;
704 } else {
705 sig_child_threshold_szB = (SizeT)((total_szB * clo_threshold) / 100);
706 }
707
708 // How many children are significant? And do we need an aggregate SXPt?
709 n_sig_children = 0;
710 for (i = 0; i < xpt->n_children; i++) {
711 if (xpt->children[i]->szB >= sig_child_threshold_szB) {
712 n_sig_children++;
713 }
714 }
715 n_insig_children = xpt->n_children - n_sig_children;
716 n_child_sxpts = n_sig_children + ( n_insig_children > 0 ? 1 : 0 );
717
718 // Duplicate the XPt.
719 sxpt = VG_(malloc)("ms.main.dX.1", sizeof(SXPt));
720 n_sxpt_allocs++;
721 sxpt->tag = SigSXPt;
722 sxpt->szB = xpt->szB;
723 sxpt->Sig.ip = xpt->ip;
724 sxpt->Sig.n_children = n_child_sxpts;
725
726 // Create the SXPt's children.
727 if (n_child_sxpts > 0) {
728 Int j;
729 SizeT sig_children_szB = 0, insig_children_szB = 0;
730 sxpt->Sig.children = VG_(malloc)("ms.main.dX.2",
731 n_child_sxpts * sizeof(SXPt*));
732
733 // Duplicate the significant children. (Nb: sig_children_szB +
734 // insig_children_szB doesn't necessarily equal xpt->szB.)
735 j = 0;
736 for (i = 0; i < xpt->n_children; i++) {
737 if (xpt->children[i]->szB >= sig_child_threshold_szB) {
738 sxpt->Sig.children[j++] = dup_XTree(xpt->children[i], total_szB);
739 sig_children_szB += xpt->children[i]->szB;
740 } else {
741 insig_children_szB += xpt->children[i]->szB;
742 }
743 }
744
745 // Create the SXPt for the insignificant children, if any, and put it
746 // in the last child entry.
747 if (n_insig_children > 0) {
748 // Nb: We 'n_sxpt_allocs' here because creating an Insig SXPt
749 // doesn't involve a call to dup_XTree().
750 SXPt* insig_sxpt = VG_(malloc)("ms.main.dX.3", sizeof(SXPt));
751 n_sxpt_allocs++;
752 insig_sxpt->tag = InsigSXPt;
753 insig_sxpt->szB = insig_children_szB;
754 insig_sxpt->Insig.n_xpts = n_insig_children;
755 sxpt->Sig.children[n_sig_children] = insig_sxpt;
756 }
757 } else {
758 sxpt->Sig.children = NULL;
759 }
760
761 return sxpt;
762 }
763
free_SXTree(SXPt * sxpt)764 static void free_SXTree(SXPt* sxpt)
765 {
766 Int i;
767 tl_assert(sxpt != NULL);
768
769 switch (sxpt->tag) {
770 case SigSXPt:
771 // Free all children SXPts, then the children array.
772 for (i = 0; i < sxpt->Sig.n_children; i++) {
773 free_SXTree(sxpt->Sig.children[i]);
774 sxpt->Sig.children[i] = NULL;
775 }
776 VG_(free)(sxpt->Sig.children); sxpt->Sig.children = NULL;
777 break;
778
779 case InsigSXPt:
780 break;
781
782 default: tl_assert2(0, "free_SXTree: unknown SXPt tag");
783 }
784
785 // Free the SXPt itself.
786 VG_(free)(sxpt); sxpt = NULL;
787 n_sxpt_frees++;
788 }
789
790 // Sanity checking: we periodically check the heap XTree with
791 // ms_expensive_sanity_check.
sanity_check_XTree(XPt * xpt,XPt * parent)792 static void sanity_check_XTree(XPt* xpt, XPt* parent)
793 {
794 tl_assert(xpt != NULL);
795
796 // Check back-pointer.
797 tl_assert2(xpt->parent == parent,
798 "xpt->parent = %p, parent = %p\n", xpt->parent, parent);
799
800 // Check children counts look sane.
801 tl_assert(xpt->n_children <= xpt->max_children);
802
803 // Unfortunately, xpt's size is not necessarily equal to the sum of xpt's
804 // children's sizes. See comment at the bottom of get_XCon.
805 }
806
807 // Sanity checking: we check SXTrees (which are in snapshots) after
808 // snapshots are created, before they are deleted, and before they are
809 // printed.
sanity_check_SXTree(SXPt * sxpt)810 static void sanity_check_SXTree(SXPt* sxpt)
811 {
812 Int i;
813
814 tl_assert(sxpt != NULL);
815
816 // Check the sum of any children szBs equals the SXPt's szB. Check the
817 // children at the same time.
818 switch (sxpt->tag) {
819 case SigSXPt: {
820 if (sxpt->Sig.n_children > 0) {
821 for (i = 0; i < sxpt->Sig.n_children; i++) {
822 sanity_check_SXTree(sxpt->Sig.children[i]);
823 }
824 }
825 break;
826 }
827 case InsigSXPt:
828 break; // do nothing
829
830 default: tl_assert2(0, "sanity_check_SXTree: unknown SXPt tag");
831 }
832 }
833
834
835 //------------------------------------------------------------//
836 //--- XCon Operations ---//
837 //------------------------------------------------------------//
838
839 // This is the limit on the number of removed alloc-fns that can be in a
840 // single XCon.
841 #define MAX_OVERESTIMATE 50
842 #define MAX_IPS (MAX_DEPTH + MAX_OVERESTIMATE)
843
844 // This is used for various buffers which can hold function names/IP
845 // description. Some C++ names can get really long so 1024 isn't big
846 // enough.
847 #define BUF_LEN 2048
848
849 // Determine if the given IP belongs to a function that should be ignored.
fn_should_be_ignored(Addr ip)850 static Bool fn_should_be_ignored(Addr ip)
851 {
852 static Char buf[BUF_LEN];
853 return
854 ( VG_(get_fnname)(ip, buf, BUF_LEN) && is_member_fn(ignore_fns, buf)
855 ? True : False );
856 }
857
858 // Get the stack trace for an XCon, filtering out uninteresting entries:
859 // alloc-fns and entries above alloc-fns, and entries below main-or-below-main.
860 // Eg: alloc-fn1 / alloc-fn2 / a / b / main / (below main) / c
861 // becomes: a / b / main
862 // Nb: it's possible to end up with an empty trace, eg. if 'main' is marked
863 // as an alloc-fn. This is ok.
864 static
get_IPs(ThreadId tid,Bool exclude_first_entry,Addr ips[])865 Int get_IPs( ThreadId tid, Bool exclude_first_entry, Addr ips[])
866 {
867 static Char buf[BUF_LEN];
868 Int n_ips, i, n_alloc_fns_removed;
869 Int overestimate;
870 Bool redo;
871
872 // We ask for a few more IPs than clo_depth suggests we need. Then we
873 // remove every entry that is an alloc-fn. Depending on the
874 // circumstances, we may need to redo it all, asking for more IPs.
875 // Details:
876 // - If the original stack trace is smaller than asked-for, redo=False
877 // - Else if after filtering we have >= clo_depth IPs, redo=False
878 // - Else redo=True
879 // In other words, to redo, we'd have to get a stack trace as big as we
880 // asked for and remove more than 'overestimate' alloc-fns.
881
882 // Main loop.
883 redo = True; // Assume this to begin with.
884 for (overestimate = 3; redo; overestimate += 6) {
885 // This should never happen -- would require MAX_OVERESTIMATE
886 // alloc-fns to be removed from the stack trace.
887 if (overestimate > MAX_OVERESTIMATE)
888 VG_(tool_panic)("get_IPs: ips[] too small, inc. MAX_OVERESTIMATE?");
889
890 // Ask for more IPs than clo_depth suggests we need.
891 n_ips = VG_(get_StackTrace)( tid, ips, clo_depth + overestimate,
892 NULL/*array to dump SP values in*/,
893 NULL/*array to dump FP values in*/,
894 0/*first_ip_delta*/ );
895 tl_assert(n_ips > 0);
896
897 // If the original stack trace is smaller than asked-for, redo=False.
898 if (n_ips < clo_depth + overestimate) { redo = False; }
899
900 // Filter out alloc fns. If requested, we automatically remove the
901 // first entry (which presumably will be something like malloc or
902 // __builtin_new that we're sure to filter out) without looking at it,
903 // because VG_(get_fnname) is expensive.
904 n_alloc_fns_removed = ( exclude_first_entry ? 1 : 0 );
905 for (i = n_alloc_fns_removed; i < n_ips; i++) {
906 if (VG_(get_fnname)(ips[i], buf, BUF_LEN)) {
907 if (is_member_fn(alloc_fns, buf)) {
908 n_alloc_fns_removed++;
909 } else {
910 break;
911 }
912 }
913 }
914 // Remove the alloc fns by shuffling the rest down over them.
915 n_ips -= n_alloc_fns_removed;
916 for (i = 0; i < n_ips; i++) {
917 ips[i] = ips[i + n_alloc_fns_removed];
918 }
919
920 // If after filtering we have >= clo_depth IPs, redo=False
921 if (n_ips >= clo_depth) {
922 redo = False;
923 n_ips = clo_depth; // Ignore any IPs below --depth.
924 }
925
926 if (redo) {
927 n_XCon_redos++;
928 }
929 }
930 return n_ips;
931 }
932
933 // Gets an XCon and puts it in the tree. Returns the XCon's bottom-XPt.
934 // Unless the allocation should be ignored, in which case we return NULL.
get_XCon(ThreadId tid,Bool exclude_first_entry)935 static XPt* get_XCon( ThreadId tid, Bool exclude_first_entry )
936 {
937 static Addr ips[MAX_IPS];
938 Int i;
939 XPt* xpt = alloc_xpt;
940
941 // After this call, the IPs we want are in ips[0]..ips[n_ips-1].
942 Int n_ips = get_IPs(tid, exclude_first_entry, ips);
943
944 // Should we ignore this allocation? (Nb: n_ips can be zero, eg. if
945 // 'main' is marked as an alloc-fn.)
946 if (n_ips > 0 && fn_should_be_ignored(ips[0])) {
947 return NULL;
948 }
949
950 // Now do the search/insertion of the XCon.
951 for (i = 0; i < n_ips; i++) {
952 Addr ip = ips[i];
953 Int ch;
954 // Look for IP in xpt's children.
955 // Linear search, ugh -- about 10% of time for konqueror startup tried
956 // caching last result, only hit about 4% for konqueror.
957 // Nb: this search hits about 98% of the time for konqueror
958 for (ch = 0; True; ch++) {
959 if (ch == xpt->n_children) {
960 // IP not found in the children.
961 // Create and add new child XPt, then stop.
962 XPt* new_child_xpt = new_XPt(ip, xpt);
963 add_child_xpt(xpt, new_child_xpt);
964 xpt = new_child_xpt;
965 break;
966
967 } else if (ip == xpt->children[ch]->ip) {
968 // Found the IP in the children, stop.
969 xpt = xpt->children[ch];
970 break;
971 }
972 }
973 }
974
975 // [Note: several comments refer to this comment. Do not delete it
976 // without updating them.]
977 //
978 // A complication... If all stack traces were well-formed, then the
979 // returned xpt would always be a bottom-XPt. As a consequence, an XPt's
980 // size would always be equal to the sum of its children's sizes, which
981 // is an excellent sanity check.
982 //
983 // Unfortunately, stack traces occasionally are malformed, ie. truncated.
984 // This allows a stack trace to be a sub-trace of another, eg. a/b/c is a
985 // sub-trace of a/b/c/d. So we can't assume this xpt is a bottom-XPt;
986 // nor can we do sanity check an XPt's size against its children's sizes.
987 // This is annoying, but must be dealt with. (Older versions of Massif
988 // had this assertion in, and it was reported to fail by real users a
989 // couple of times.) Even more annoyingly, I can't come up with a simple
990 // test case that exhibit such a malformed stack trace, so I can't
991 // regression test it. Sigh.
992 //
993 // However, we can print a warning, so that if it happens (unexpectedly)
994 // in existing regression tests we'll know. Also, it warns users that
995 // the output snapshots may not add up the way they might expect.
996 //
997 //tl_assert(0 == xpt->n_children); // Must be bottom-XPt
998 if (0 != xpt->n_children) {
999 static Int n_moans = 0;
1000 if (n_moans < 3) {
1001 VG_(umsg)(
1002 "Warning: Malformed stack trace detected. In Massif's output,\n");
1003 VG_(umsg)(
1004 " the size of an entry's child entries may not sum up\n");
1005 VG_(umsg)(
1006 " to the entry's size as they normally do.\n");
1007 n_moans++;
1008 if (3 == n_moans)
1009 VG_(umsg)(
1010 " (And Massif now won't warn about this again.)\n");
1011 }
1012 }
1013 return xpt;
1014 }
1015
1016 // Update 'szB' of every XPt in the XCon, by percolating upwards.
update_XCon(XPt * xpt,SSizeT space_delta)1017 static void update_XCon(XPt* xpt, SSizeT space_delta)
1018 {
1019 tl_assert(clo_heap);
1020 tl_assert(NULL != xpt);
1021
1022 if (0 == space_delta)
1023 return;
1024
1025 while (xpt != alloc_xpt) {
1026 if (space_delta < 0) tl_assert(xpt->szB >= -space_delta);
1027 xpt->szB += space_delta;
1028 xpt = xpt->parent;
1029 }
1030 if (space_delta < 0) tl_assert(alloc_xpt->szB >= -space_delta);
1031 alloc_xpt->szB += space_delta;
1032 }
1033
1034
1035 //------------------------------------------------------------//
1036 //--- Snapshots ---//
1037 //------------------------------------------------------------//
1038
1039 // Snapshots are done in a way so that we always have a reasonable number of
1040 // them. We start by taking them quickly. Once we hit our limit, we cull
1041 // some (eg. half), and start taking them more slowly. Once we hit the
1042 // limit again, we again cull and then take them even more slowly, and so
1043 // on.
1044
1045 // Time is measured either in i or ms or bytes, depending on the --time-unit
1046 // option. It's a Long because it can exceed 32-bits reasonably easily, and
1047 // because we need to allow negative values to represent unset times.
1048 typedef Long Time;
1049
1050 #define UNUSED_SNAPSHOT_TIME -333 // A conspicuous negative number.
1051
1052 typedef
1053 enum {
1054 Normal = 77,
1055 Peak,
1056 Unused
1057 }
1058 SnapshotKind;
1059
1060 typedef
1061 struct {
1062 SnapshotKind kind;
1063 Time time;
1064 SizeT heap_szB;
1065 SizeT heap_extra_szB;// Heap slop + admin bytes.
1066 SizeT stacks_szB;
1067 SXPt* alloc_sxpt; // Heap XTree root, if a detailed snapshot,
1068 } // otherwise NULL.
1069 Snapshot;
1070
1071 static UInt next_snapshot_i = 0; // Index of where next snapshot will go.
1072 static Snapshot* snapshots; // Array of snapshots.
1073
is_snapshot_in_use(Snapshot * snapshot)1074 static Bool is_snapshot_in_use(Snapshot* snapshot)
1075 {
1076 if (Unused == snapshot->kind) {
1077 // If snapshot is unused, check all the fields are unset.
1078 tl_assert(snapshot->time == UNUSED_SNAPSHOT_TIME);
1079 tl_assert(snapshot->heap_extra_szB == 0);
1080 tl_assert(snapshot->heap_szB == 0);
1081 tl_assert(snapshot->stacks_szB == 0);
1082 tl_assert(snapshot->alloc_sxpt == NULL);
1083 return False;
1084 } else {
1085 tl_assert(snapshot->time != UNUSED_SNAPSHOT_TIME);
1086 return True;
1087 }
1088 }
1089
is_detailed_snapshot(Snapshot * snapshot)1090 static Bool is_detailed_snapshot(Snapshot* snapshot)
1091 {
1092 return (snapshot->alloc_sxpt ? True : False);
1093 }
1094
is_uncullable_snapshot(Snapshot * snapshot)1095 static Bool is_uncullable_snapshot(Snapshot* snapshot)
1096 {
1097 return &snapshots[0] == snapshot // First snapshot
1098 || &snapshots[next_snapshot_i-1] == snapshot // Last snapshot
1099 || snapshot->kind == Peak; // Peak snapshot
1100 }
1101
sanity_check_snapshot(Snapshot * snapshot)1102 static void sanity_check_snapshot(Snapshot* snapshot)
1103 {
1104 if (snapshot->alloc_sxpt) {
1105 sanity_check_SXTree(snapshot->alloc_sxpt);
1106 }
1107 }
1108
1109 // All the used entries should look used, all the unused ones should be clear.
sanity_check_snapshots_array(void)1110 static void sanity_check_snapshots_array(void)
1111 {
1112 Int i;
1113 for (i = 0; i < next_snapshot_i; i++) {
1114 tl_assert( is_snapshot_in_use( & snapshots[i] ));
1115 }
1116 for ( ; i < clo_max_snapshots; i++) {
1117 tl_assert(!is_snapshot_in_use( & snapshots[i] ));
1118 }
1119 }
1120
1121 // This zeroes all the fields in the snapshot, but does not free the heap
1122 // XTree if present. It also does a sanity check unless asked not to; we
1123 // can't sanity check at startup when clearing the initial snapshots because
1124 // they're full of junk.
clear_snapshot(Snapshot * snapshot,Bool do_sanity_check)1125 static void clear_snapshot(Snapshot* snapshot, Bool do_sanity_check)
1126 {
1127 if (do_sanity_check) sanity_check_snapshot(snapshot);
1128 snapshot->kind = Unused;
1129 snapshot->time = UNUSED_SNAPSHOT_TIME;
1130 snapshot->heap_extra_szB = 0;
1131 snapshot->heap_szB = 0;
1132 snapshot->stacks_szB = 0;
1133 snapshot->alloc_sxpt = NULL;
1134 }
1135
1136 // This zeroes all the fields in the snapshot, and frees the heap XTree if
1137 // present.
delete_snapshot(Snapshot * snapshot)1138 static void delete_snapshot(Snapshot* snapshot)
1139 {
1140 // Nb: if there's an XTree, we free it after calling clear_snapshot,
1141 // because clear_snapshot does a sanity check which includes checking the
1142 // XTree.
1143 SXPt* tmp_sxpt = snapshot->alloc_sxpt;
1144 clear_snapshot(snapshot, /*do_sanity_check*/True);
1145 if (tmp_sxpt) {
1146 free_SXTree(tmp_sxpt);
1147 }
1148 }
1149
VERB_snapshot(Int verbosity,Char * prefix,Int i)1150 static void VERB_snapshot(Int verbosity, Char* prefix, Int i)
1151 {
1152 Snapshot* snapshot = &snapshots[i];
1153 Char* suffix;
1154 switch (snapshot->kind) {
1155 case Peak: suffix = "p"; break;
1156 case Normal: suffix = ( is_detailed_snapshot(snapshot) ? "d" : "." ); break;
1157 case Unused: suffix = "u"; break;
1158 default:
1159 tl_assert2(0, "VERB_snapshot: unknown snapshot kind: %d", snapshot->kind);
1160 }
1161 VERB(verbosity, "%s S%s%3d (t:%lld, hp:%ld, ex:%ld, st:%ld)\n",
1162 prefix, suffix, i,
1163 snapshot->time,
1164 snapshot->heap_szB,
1165 snapshot->heap_extra_szB,
1166 snapshot->stacks_szB
1167 );
1168 }
1169
1170 // Cull half the snapshots; we choose those that represent the smallest
1171 // time-spans, because that gives us the most even distribution of snapshots
1172 // over time. (It's possible to lose interesting spikes, however.)
1173 //
1174 // Algorithm for N snapshots: We find the snapshot representing the smallest
1175 // timeframe, and remove it. We repeat this until (N/2) snapshots are gone.
1176 // We have to do this one snapshot at a time, rather than finding the (N/2)
1177 // smallest snapshots in one hit, because when a snapshot is removed, its
1178 // neighbours immediately cover greater timespans. So it's O(N^2), but N is
1179 // small, and it's not done very often.
1180 //
1181 // Once we're done, we return the new smallest interval between snapshots.
1182 // That becomes our minimum time interval.
cull_snapshots(void)1183 static UInt cull_snapshots(void)
1184 {
1185 Int i, jp, j, jn, min_timespan_i;
1186 Int n_deleted = 0;
1187 Time min_timespan;
1188
1189 n_cullings++;
1190
1191 // Sets j to the index of the first not-yet-removed snapshot at or after i
1192 #define FIND_SNAPSHOT(i, j) \
1193 for (j = i; \
1194 j < clo_max_snapshots && !is_snapshot_in_use(&snapshots[j]); \
1195 j++) { }
1196
1197 VERB(2, "Culling...\n");
1198
1199 // First we remove enough snapshots by clearing them in-place. Once
1200 // that's done, we can slide the remaining ones down.
1201 for (i = 0; i < clo_max_snapshots/2; i++) {
1202 // Find the snapshot representing the smallest timespan. The timespan
1203 // for snapshot n = d(N-1,N)+d(N,N+1), where d(A,B) is the time between
1204 // snapshot A and B. We don't consider the first and last snapshots for
1205 // removal.
1206 Snapshot* min_snapshot;
1207 Int min_j;
1208
1209 // Initial triple: (prev, curr, next) == (jp, j, jn)
1210 // Initial min_timespan is the first one.
1211 jp = 0;
1212 FIND_SNAPSHOT(1, j);
1213 FIND_SNAPSHOT(j+1, jn);
1214 min_timespan = 0x7fffffffffffffffLL;
1215 min_j = -1;
1216 while (jn < clo_max_snapshots) {
1217 Time timespan = snapshots[jn].time - snapshots[jp].time;
1218 tl_assert(timespan >= 0);
1219 // Nb: We never cull the peak snapshot.
1220 if (Peak != snapshots[j].kind && timespan < min_timespan) {
1221 min_timespan = timespan;
1222 min_j = j;
1223 }
1224 // Move on to next triple
1225 jp = j;
1226 j = jn;
1227 FIND_SNAPSHOT(jn+1, jn);
1228 }
1229 // We've found the least important snapshot, now delete it. First
1230 // print it if necessary.
1231 tl_assert(-1 != min_j); // Check we found a minimum.
1232 min_snapshot = & snapshots[ min_j ];
1233 if (VG_(clo_verbosity) > 1) {
1234 Char buf[64];
1235 VG_(snprintf)(buf, 64, " %3d (t-span = %lld)", i, min_timespan);
1236 VERB_snapshot(2, buf, min_j);
1237 }
1238 delete_snapshot(min_snapshot);
1239 n_deleted++;
1240 }
1241
1242 // Slide down the remaining snapshots over the removed ones. First set i
1243 // to point to the first empty slot, and j to the first full slot after
1244 // i. Then slide everything down.
1245 for (i = 0; is_snapshot_in_use( &snapshots[i] ); i++) { }
1246 for (j = i; !is_snapshot_in_use( &snapshots[j] ); j++) { }
1247 for ( ; j < clo_max_snapshots; j++) {
1248 if (is_snapshot_in_use( &snapshots[j] )) {
1249 snapshots[i++] = snapshots[j];
1250 clear_snapshot(&snapshots[j], /*do_sanity_check*/True);
1251 }
1252 }
1253 next_snapshot_i = i;
1254
1255 // Check snapshots array looks ok after changes.
1256 sanity_check_snapshots_array();
1257
1258 // Find the minimum timespan remaining; that will be our new minimum
1259 // time interval. Note that above we were finding timespans by measuring
1260 // two intervals around a snapshot that was under consideration for
1261 // deletion. Here we only measure single intervals because all the
1262 // deletions have occurred.
1263 //
1264 // But we have to be careful -- some snapshots (eg. snapshot 0, and the
1265 // peak snapshot) are uncullable. If two uncullable snapshots end up
1266 // next to each other, they'll never be culled (assuming the peak doesn't
1267 // change), and the time gap between them will not change. However, the
1268 // time between the remaining cullable snapshots will grow ever larger.
1269 // This means that the min_timespan found will always be that between the
1270 // two uncullable snapshots, and it will be much smaller than it should
1271 // be. To avoid this problem, when computing the minimum timespan, we
1272 // ignore any timespans between two uncullable snapshots.
1273 tl_assert(next_snapshot_i > 1);
1274 min_timespan = 0x7fffffffffffffffLL;
1275 min_timespan_i = -1;
1276 for (i = 1; i < next_snapshot_i; i++) {
1277 if (is_uncullable_snapshot(&snapshots[i]) &&
1278 is_uncullable_snapshot(&snapshots[i-1]))
1279 {
1280 VERB(2, "(Ignoring interval %d--%d when computing minimum)\n", i-1, i);
1281 } else {
1282 Time timespan = snapshots[i].time - snapshots[i-1].time;
1283 tl_assert(timespan >= 0);
1284 if (timespan < min_timespan) {
1285 min_timespan = timespan;
1286 min_timespan_i = i;
1287 }
1288 }
1289 }
1290 tl_assert(-1 != min_timespan_i); // Check we found a minimum.
1291
1292 // Print remaining snapshots, if necessary.
1293 if (VG_(clo_verbosity) > 1) {
1294 VERB(2, "Finished culling (%3d of %3d deleted)\n",
1295 n_deleted, clo_max_snapshots);
1296 for (i = 0; i < next_snapshot_i; i++) {
1297 VERB_snapshot(2, " post-cull", i);
1298 }
1299 VERB(2, "New time interval = %lld (between snapshots %d and %d)\n",
1300 min_timespan, min_timespan_i-1, min_timespan_i);
1301 }
1302
1303 return min_timespan;
1304 }
1305
get_time(void)1306 static Time get_time(void)
1307 {
1308 // Get current time, in whatever time unit we're using.
1309 if (clo_time_unit == TimeI) {
1310 return guest_instrs_executed;
1311 } else if (clo_time_unit == TimeMS) {
1312 // Some stuff happens between the millisecond timer being initialised
1313 // to zero and us taking our first snapshot. We determine that time
1314 // gap so we can subtract it from all subsequent times so that our
1315 // first snapshot is considered to be at t = 0ms. Unfortunately, a
1316 // bunch of symbols get read after the first snapshot is taken but
1317 // before the second one (which is triggered by the first allocation),
1318 // so when the time-unit is 'ms' we always have a big gap between the
1319 // first two snapshots. But at least users won't have to wonder why
1320 // the first snapshot isn't at t=0.
1321 static Bool is_first_get_time = True;
1322 static Time start_time_ms;
1323 if (is_first_get_time) {
1324 start_time_ms = VG_(read_millisecond_timer)();
1325 is_first_get_time = False;
1326 return 0;
1327 } else {
1328 return VG_(read_millisecond_timer)() - start_time_ms;
1329 }
1330 } else if (clo_time_unit == TimeB) {
1331 return total_allocs_deallocs_szB;
1332 } else {
1333 tl_assert2(0, "bad --time-unit value");
1334 }
1335 }
1336
1337 // Take a snapshot, and only that -- decisions on whether to take a
1338 // snapshot, or what kind of snapshot, are made elsewhere.
1339 // Nb: we call the arg "my_time" because "time" shadows a global declaration
1340 // in /usr/include/time.h on Darwin.
1341 static void
take_snapshot(Snapshot * snapshot,SnapshotKind kind,Time my_time,Bool is_detailed)1342 take_snapshot(Snapshot* snapshot, SnapshotKind kind, Time my_time,
1343 Bool is_detailed)
1344 {
1345 tl_assert(!is_snapshot_in_use(snapshot));
1346 if (!clo_pages_as_heap) {
1347 tl_assert(have_started_executing_code);
1348 }
1349
1350 // Heap and heap admin.
1351 if (clo_heap) {
1352 snapshot->heap_szB = heap_szB;
1353 if (is_detailed) {
1354 SizeT total_szB = heap_szB + heap_extra_szB + stacks_szB;
1355 snapshot->alloc_sxpt = dup_XTree(alloc_xpt, total_szB);
1356 tl_assert( alloc_xpt->szB == heap_szB);
1357 tl_assert(snapshot->alloc_sxpt->szB == heap_szB);
1358 }
1359 snapshot->heap_extra_szB = heap_extra_szB;
1360 }
1361
1362 // Stack(s).
1363 if (clo_stacks) {
1364 snapshot->stacks_szB = stacks_szB;
1365 }
1366
1367 // Rest of snapshot.
1368 snapshot->kind = kind;
1369 snapshot->time = my_time;
1370 sanity_check_snapshot(snapshot);
1371
1372 // Update stats.
1373 if (Peak == kind) n_peak_snapshots++;
1374 if (is_detailed) n_detailed_snapshots++;
1375 n_real_snapshots++;
1376 }
1377
1378
1379 // Take a snapshot, if it's time, or if we've hit a peak.
1380 static void
maybe_take_snapshot(SnapshotKind kind,Char * what)1381 maybe_take_snapshot(SnapshotKind kind, Char* what)
1382 {
1383 // 'min_time_interval' is the minimum time interval between snapshots.
1384 // If we try to take a snapshot and less than this much time has passed,
1385 // we don't take it. It gets larger as the program runs longer. It's
1386 // initialised to zero so that we begin by taking snapshots as quickly as
1387 // possible.
1388 static Time min_time_interval = 0;
1389 // Zero allows startup snapshot.
1390 static Time earliest_possible_time_of_next_snapshot = 0;
1391 static Int n_snapshots_since_last_detailed = 0;
1392 static Int n_skipped_snapshots_since_last_snapshot = 0;
1393
1394 Snapshot* snapshot;
1395 Bool is_detailed;
1396 // Nb: we call this variable "my_time" because "time" shadows a global
1397 // declaration in /usr/include/time.h on Darwin.
1398 Time my_time = get_time();
1399
1400 switch (kind) {
1401 case Normal:
1402 // Only do a snapshot if it's time.
1403 if (my_time < earliest_possible_time_of_next_snapshot) {
1404 n_skipped_snapshots++;
1405 n_skipped_snapshots_since_last_snapshot++;
1406 return;
1407 }
1408 is_detailed = (clo_detailed_freq-1 == n_snapshots_since_last_detailed);
1409 break;
1410
1411 case Peak: {
1412 // Because we're about to do a deallocation, we're coming down from a
1413 // local peak. If it is (a) actually a global peak, and (b) a certain
1414 // amount bigger than the previous peak, then we take a peak snapshot.
1415 // By not taking a snapshot for every peak, we save a lot of effort --
1416 // because many peaks remain peak only for a short time.
1417 SizeT total_szB = heap_szB + heap_extra_szB + stacks_szB;
1418 SizeT excess_szB_for_new_peak =
1419 (SizeT)((peak_snapshot_total_szB * clo_peak_inaccuracy) / 100);
1420 if (total_szB <= peak_snapshot_total_szB + excess_szB_for_new_peak) {
1421 return;
1422 }
1423 is_detailed = True;
1424 break;
1425 }
1426
1427 default:
1428 tl_assert2(0, "maybe_take_snapshot: unrecognised snapshot kind");
1429 }
1430
1431 // Take the snapshot.
1432 snapshot = & snapshots[next_snapshot_i];
1433 take_snapshot(snapshot, kind, my_time, is_detailed);
1434
1435 // Record if it was detailed.
1436 if (is_detailed) {
1437 n_snapshots_since_last_detailed = 0;
1438 } else {
1439 n_snapshots_since_last_detailed++;
1440 }
1441
1442 // Update peak data, if it's a Peak snapshot.
1443 if (Peak == kind) {
1444 Int i, number_of_peaks_snapshots_found = 0;
1445
1446 // Sanity check the size, then update our recorded peak.
1447 SizeT snapshot_total_szB =
1448 snapshot->heap_szB + snapshot->heap_extra_szB + snapshot->stacks_szB;
1449 tl_assert2(snapshot_total_szB > peak_snapshot_total_szB,
1450 "%ld, %ld\n", snapshot_total_szB, peak_snapshot_total_szB);
1451 peak_snapshot_total_szB = snapshot_total_szB;
1452
1453 // Find the old peak snapshot, if it exists, and mark it as normal.
1454 for (i = 0; i < next_snapshot_i; i++) {
1455 if (Peak == snapshots[i].kind) {
1456 snapshots[i].kind = Normal;
1457 number_of_peaks_snapshots_found++;
1458 }
1459 }
1460 tl_assert(number_of_peaks_snapshots_found <= 1);
1461 }
1462
1463 // Finish up verbosity and stats stuff.
1464 if (n_skipped_snapshots_since_last_snapshot > 0) {
1465 VERB(2, " (skipped %d snapshot%s)\n",
1466 n_skipped_snapshots_since_last_snapshot,
1467 ( 1 == n_skipped_snapshots_since_last_snapshot ? "" : "s") );
1468 }
1469 VERB_snapshot(2, what, next_snapshot_i);
1470 n_skipped_snapshots_since_last_snapshot = 0;
1471
1472 // Cull the entries, if our snapshot table is full.
1473 next_snapshot_i++;
1474 if (clo_max_snapshots == next_snapshot_i) {
1475 min_time_interval = cull_snapshots();
1476 }
1477
1478 // Work out the earliest time when the next snapshot can happen.
1479 earliest_possible_time_of_next_snapshot = my_time + min_time_interval;
1480 }
1481
1482
1483 //------------------------------------------------------------//
1484 //--- Sanity checking ---//
1485 //------------------------------------------------------------//
1486
ms_cheap_sanity_check(void)1487 static Bool ms_cheap_sanity_check ( void )
1488 {
1489 return True; // Nothing useful we can cheaply check.
1490 }
1491
ms_expensive_sanity_check(void)1492 static Bool ms_expensive_sanity_check ( void )
1493 {
1494 sanity_check_XTree(alloc_xpt, /*parent*/NULL);
1495 sanity_check_snapshots_array();
1496 return True;
1497 }
1498
1499
1500 //------------------------------------------------------------//
1501 //--- Heap management ---//
1502 //------------------------------------------------------------//
1503
1504 // Metadata for heap blocks. Each one contains a pointer to a bottom-XPt,
1505 // which is a foothold into the XCon at which it was allocated. From
1506 // HP_Chunks, XPt 'space' fields are incremented (at allocation) and
1507 // decremented (at deallocation).
1508 //
1509 // Nb: first two fields must match core's VgHashNode.
1510 typedef
1511 struct _HP_Chunk {
1512 struct _HP_Chunk* next;
1513 Addr data; // Ptr to actual block
1514 SizeT req_szB; // Size requested
1515 SizeT slop_szB; // Extra bytes given above those requested
1516 XPt* where; // Where allocated; bottom-XPt
1517 }
1518 HP_Chunk;
1519
1520 static VgHashTable malloc_list = NULL; // HP_Chunks
1521
update_alloc_stats(SSizeT szB_delta)1522 static void update_alloc_stats(SSizeT szB_delta)
1523 {
1524 // Update total_allocs_deallocs_szB.
1525 if (szB_delta < 0) szB_delta = -szB_delta;
1526 total_allocs_deallocs_szB += szB_delta;
1527 }
1528
update_heap_stats(SSizeT heap_szB_delta,Int heap_extra_szB_delta)1529 static void update_heap_stats(SSizeT heap_szB_delta, Int heap_extra_szB_delta)
1530 {
1531 if (heap_szB_delta < 0)
1532 tl_assert(heap_szB >= -heap_szB_delta);
1533 if (heap_extra_szB_delta < 0)
1534 tl_assert(heap_extra_szB >= -heap_extra_szB_delta);
1535
1536 heap_extra_szB += heap_extra_szB_delta;
1537 heap_szB += heap_szB_delta;
1538
1539 update_alloc_stats(heap_szB_delta + heap_extra_szB_delta);
1540 }
1541
1542 static
record_block(ThreadId tid,void * p,SizeT req_szB,SizeT slop_szB,Bool exclude_first_entry,Bool maybe_snapshot)1543 void* record_block( ThreadId tid, void* p, SizeT req_szB, SizeT slop_szB,
1544 Bool exclude_first_entry, Bool maybe_snapshot )
1545 {
1546 // Make new HP_Chunk node, add to malloc_list
1547 HP_Chunk* hc = VG_(malloc)("ms.main.rb.1", sizeof(HP_Chunk));
1548 hc->req_szB = req_szB;
1549 hc->slop_szB = slop_szB;
1550 hc->data = (Addr)p;
1551 hc->where = NULL;
1552 VG_(HT_add_node)(malloc_list, hc);
1553
1554 if (clo_heap) {
1555 VERB(3, "<<< record_block (%lu, %lu)\n", req_szB, slop_szB);
1556
1557 hc->where = get_XCon( tid, exclude_first_entry );
1558
1559 if (hc->where) {
1560 // Update statistics.
1561 n_heap_allocs++;
1562
1563 // Update heap stats.
1564 update_heap_stats(req_szB, clo_heap_admin + slop_szB);
1565
1566 // Update XTree.
1567 update_XCon(hc->where, req_szB);
1568
1569 // Maybe take a snapshot.
1570 if (maybe_snapshot) {
1571 maybe_take_snapshot(Normal, " alloc");
1572 }
1573
1574 } else {
1575 // Ignored allocation.
1576 n_ignored_heap_allocs++;
1577
1578 VERB(3, "(ignored)\n");
1579 }
1580
1581 VERB(3, ">>>\n");
1582 }
1583
1584 return p;
1585 }
1586
1587 static __inline__
alloc_and_record_block(ThreadId tid,SizeT req_szB,SizeT req_alignB,Bool is_zeroed)1588 void* alloc_and_record_block ( ThreadId tid, SizeT req_szB, SizeT req_alignB,
1589 Bool is_zeroed )
1590 {
1591 SizeT actual_szB, slop_szB;
1592 void* p;
1593
1594 if ((SSizeT)req_szB < 0) return NULL;
1595
1596 // Allocate and zero if necessary.
1597 p = VG_(cli_malloc)( req_alignB, req_szB );
1598 if (!p) {
1599 return NULL;
1600 }
1601 if (is_zeroed) VG_(memset)(p, 0, req_szB);
1602 actual_szB = VG_(malloc_usable_size)(p);
1603 tl_assert(actual_szB >= req_szB);
1604 slop_szB = actual_szB - req_szB;
1605
1606 // Record block.
1607 record_block(tid, p, req_szB, slop_szB, /*exclude_first_entry*/True,
1608 /*maybe_snapshot*/True);
1609
1610 return p;
1611 }
1612
1613 static __inline__
unrecord_block(void * p,Bool maybe_snapshot)1614 void unrecord_block ( void* p, Bool maybe_snapshot )
1615 {
1616 // Remove HP_Chunk from malloc_list
1617 HP_Chunk* hc = VG_(HT_remove)(malloc_list, (UWord)p);
1618 if (NULL == hc) {
1619 return; // must have been a bogus free()
1620 }
1621
1622 if (clo_heap) {
1623 VERB(3, "<<< unrecord_block\n");
1624
1625 if (hc->where) {
1626 // Update statistics.
1627 n_heap_frees++;
1628
1629 // Maybe take a peak snapshot, since it's a deallocation.
1630 if (maybe_snapshot) {
1631 maybe_take_snapshot(Peak, "de-PEAK");
1632 }
1633
1634 // Update heap stats.
1635 update_heap_stats(-hc->req_szB, -clo_heap_admin - hc->slop_szB);
1636
1637 // Update XTree.
1638 update_XCon(hc->where, -hc->req_szB);
1639
1640 // Maybe take a snapshot.
1641 if (maybe_snapshot) {
1642 maybe_take_snapshot(Normal, "dealloc");
1643 }
1644
1645 } else {
1646 n_ignored_heap_frees++;
1647
1648 VERB(3, "(ignored)\n");
1649 }
1650
1651 VERB(3, ">>> (-%lu, -%lu)\n", hc->req_szB, hc->slop_szB);
1652 }
1653
1654 // Actually free the chunk, and the heap block (if necessary)
1655 VG_(free)( hc ); hc = NULL;
1656 }
1657
1658 // Nb: --ignore-fn is tricky for realloc. If the block's original alloc was
1659 // ignored, but the realloc is not requested to be ignored, and we are
1660 // shrinking the block, then we have to ignore the realloc -- otherwise we
1661 // could end up with negative heap sizes. This isn't a danger if we are
1662 // growing such a block, but for consistency (it also simplifies things) we
1663 // ignore such reallocs as well.
1664 static __inline__
realloc_block(ThreadId tid,void * p_old,SizeT new_req_szB)1665 void* realloc_block ( ThreadId tid, void* p_old, SizeT new_req_szB )
1666 {
1667 HP_Chunk* hc;
1668 void* p_new;
1669 SizeT old_req_szB, old_slop_szB, new_slop_szB, new_actual_szB;
1670 XPt *old_where, *new_where;
1671 Bool is_ignored = False;
1672
1673 // Remove the old block
1674 hc = VG_(HT_remove)(malloc_list, (UWord)p_old);
1675 if (hc == NULL) {
1676 return NULL; // must have been a bogus realloc()
1677 }
1678
1679 old_req_szB = hc->req_szB;
1680 old_slop_szB = hc->slop_szB;
1681
1682 tl_assert(!clo_pages_as_heap); // Shouldn't be here if --pages-as-heap=yes.
1683 if (clo_heap) {
1684 VERB(3, "<<< realloc_block (%lu)\n", new_req_szB);
1685
1686 if (hc->where) {
1687 // Update statistics.
1688 n_heap_reallocs++;
1689
1690 // Maybe take a peak snapshot, if it's (effectively) a deallocation.
1691 if (new_req_szB < old_req_szB) {
1692 maybe_take_snapshot(Peak, "re-PEAK");
1693 }
1694 } else {
1695 // The original malloc was ignored, so we have to ignore the
1696 // realloc as well.
1697 is_ignored = True;
1698 }
1699 }
1700
1701 // Actually do the allocation, if necessary.
1702 if (new_req_szB <= old_req_szB + old_slop_szB) {
1703 // New size is smaller or same; block not moved.
1704 p_new = p_old;
1705 new_slop_szB = old_slop_szB + (old_req_szB - new_req_szB);
1706
1707 } else {
1708 // New size is bigger; make new block, copy shared contents, free old.
1709 p_new = VG_(cli_malloc)(VG_(clo_alignment), new_req_szB);
1710 if (!p_new) {
1711 // Nb: if realloc fails, NULL is returned but the old block is not
1712 // touched. What an awful function.
1713 return NULL;
1714 }
1715 VG_(memcpy)(p_new, p_old, old_req_szB);
1716 VG_(cli_free)(p_old);
1717 new_actual_szB = VG_(malloc_usable_size)(p_new);
1718 tl_assert(new_actual_szB >= new_req_szB);
1719 new_slop_szB = new_actual_szB - new_req_szB;
1720 }
1721
1722 if (p_new) {
1723 // Update HP_Chunk.
1724 hc->data = (Addr)p_new;
1725 hc->req_szB = new_req_szB;
1726 hc->slop_szB = new_slop_szB;
1727 old_where = hc->where;
1728 hc->where = NULL;
1729
1730 // Update XTree.
1731 if (clo_heap) {
1732 new_where = get_XCon( tid, /*exclude_first_entry*/True);
1733 if (!is_ignored && new_where) {
1734 hc->where = new_where;
1735 update_XCon(old_where, -old_req_szB);
1736 update_XCon(new_where, new_req_szB);
1737 } else {
1738 // The realloc itself is ignored.
1739 is_ignored = True;
1740
1741 // Update statistics.
1742 n_ignored_heap_reallocs++;
1743 }
1744 }
1745 }
1746
1747 // Now insert the new hc (with a possibly new 'data' field) into
1748 // malloc_list. If this realloc() did not increase the memory size, we
1749 // will have removed and then re-added hc unnecessarily. But that's ok
1750 // because shrinking a block with realloc() is (presumably) much rarer
1751 // than growing it, and this way simplifies the growing case.
1752 VG_(HT_add_node)(malloc_list, hc);
1753
1754 if (clo_heap) {
1755 if (!is_ignored) {
1756 // Update heap stats.
1757 update_heap_stats(new_req_szB - old_req_szB,
1758 new_slop_szB - old_slop_szB);
1759
1760 // Maybe take a snapshot.
1761 maybe_take_snapshot(Normal, "realloc");
1762 } else {
1763
1764 VERB(3, "(ignored)\n");
1765 }
1766
1767 VERB(3, ">>> (%ld, %ld)\n",
1768 new_req_szB - old_req_szB, new_slop_szB - old_slop_szB);
1769 }
1770
1771 return p_new;
1772 }
1773
1774
1775 //------------------------------------------------------------//
1776 //--- malloc() et al replacement wrappers ---//
1777 //------------------------------------------------------------//
1778
ms_malloc(ThreadId tid,SizeT szB)1779 static void* ms_malloc ( ThreadId tid, SizeT szB )
1780 {
1781 return alloc_and_record_block( tid, szB, VG_(clo_alignment), /*is_zeroed*/False );
1782 }
1783
ms___builtin_new(ThreadId tid,SizeT szB)1784 static void* ms___builtin_new ( ThreadId tid, SizeT szB )
1785 {
1786 return alloc_and_record_block( tid, szB, VG_(clo_alignment), /*is_zeroed*/False );
1787 }
1788
ms___builtin_vec_new(ThreadId tid,SizeT szB)1789 static void* ms___builtin_vec_new ( ThreadId tid, SizeT szB )
1790 {
1791 return alloc_and_record_block( tid, szB, VG_(clo_alignment), /*is_zeroed*/False );
1792 }
1793
ms_calloc(ThreadId tid,SizeT m,SizeT szB)1794 static void* ms_calloc ( ThreadId tid, SizeT m, SizeT szB )
1795 {
1796 return alloc_and_record_block( tid, m*szB, VG_(clo_alignment), /*is_zeroed*/True );
1797 }
1798
ms_memalign(ThreadId tid,SizeT alignB,SizeT szB)1799 static void *ms_memalign ( ThreadId tid, SizeT alignB, SizeT szB )
1800 {
1801 return alloc_and_record_block( tid, szB, alignB, False );
1802 }
1803
ms_free(ThreadId tid,void * p)1804 static void ms_free ( ThreadId tid __attribute__((unused)), void* p )
1805 {
1806 unrecord_block(p, /*maybe_snapshot*/True);
1807 VG_(cli_free)(p);
1808 }
1809
ms___builtin_delete(ThreadId tid,void * p)1810 static void ms___builtin_delete ( ThreadId tid, void* p )
1811 {
1812 unrecord_block(p, /*maybe_snapshot*/True);
1813 VG_(cli_free)(p);
1814 }
1815
ms___builtin_vec_delete(ThreadId tid,void * p)1816 static void ms___builtin_vec_delete ( ThreadId tid, void* p )
1817 {
1818 unrecord_block(p, /*maybe_snapshot*/True);
1819 VG_(cli_free)(p);
1820 }
1821
ms_realloc(ThreadId tid,void * p_old,SizeT new_szB)1822 static void* ms_realloc ( ThreadId tid, void* p_old, SizeT new_szB )
1823 {
1824 return realloc_block(tid, p_old, new_szB);
1825 }
1826
ms_malloc_usable_size(ThreadId tid,void * p)1827 static SizeT ms_malloc_usable_size ( ThreadId tid, void* p )
1828 {
1829 HP_Chunk* hc = VG_(HT_lookup)( malloc_list, (UWord)p );
1830
1831 return ( hc ? hc->req_szB + hc->slop_szB : 0 );
1832 }
1833
1834 //------------------------------------------------------------//
1835 //--- Page handling ---//
1836 //------------------------------------------------------------//
1837
1838 static
ms_record_page_mem(Addr a,SizeT len)1839 void ms_record_page_mem ( Addr a, SizeT len )
1840 {
1841 ThreadId tid = VG_(get_running_tid)();
1842 Addr end;
1843 tl_assert(VG_IS_PAGE_ALIGNED(len));
1844 tl_assert(len >= VKI_PAGE_SIZE);
1845 // Record the first N-1 pages as blocks, but don't do any snapshots.
1846 for (end = a + len - VKI_PAGE_SIZE; a < end; a += VKI_PAGE_SIZE) {
1847 record_block( tid, (void*)a, VKI_PAGE_SIZE, /*slop_szB*/0,
1848 /*exclude_first_entry*/False, /*maybe_snapshot*/False );
1849 }
1850 // Record the last page as a block, and maybe do a snapshot afterwards.
1851 record_block( tid, (void*)a, VKI_PAGE_SIZE, /*slop_szB*/0,
1852 /*exclude_first_entry*/False, /*maybe_snapshot*/True );
1853 }
1854
1855 static
ms_unrecord_page_mem(Addr a,SizeT len)1856 void ms_unrecord_page_mem( Addr a, SizeT len )
1857 {
1858 Addr end;
1859 tl_assert(VG_IS_PAGE_ALIGNED(len));
1860 tl_assert(len >= VKI_PAGE_SIZE);
1861 for (end = a + len - VKI_PAGE_SIZE; a < end; a += VKI_PAGE_SIZE) {
1862 unrecord_block((void*)a, /*maybe_snapshot*/False);
1863 }
1864 unrecord_block((void*)a, /*maybe_snapshot*/True);
1865 }
1866
1867 //------------------------------------------------------------//
1868
1869 static
ms_new_mem_mmap(Addr a,SizeT len,Bool rr,Bool ww,Bool xx,ULong di_handle)1870 void ms_new_mem_mmap ( Addr a, SizeT len,
1871 Bool rr, Bool ww, Bool xx, ULong di_handle )
1872 {
1873 tl_assert(VG_IS_PAGE_ALIGNED(len));
1874 ms_record_page_mem(a, len);
1875 }
1876
1877 static
ms_new_mem_startup(Addr a,SizeT len,Bool rr,Bool ww,Bool xx,ULong di_handle)1878 void ms_new_mem_startup( Addr a, SizeT len,
1879 Bool rr, Bool ww, Bool xx, ULong di_handle )
1880 {
1881 // startup maps are always be page-sized, except the trampoline page is
1882 // marked by the core as only being the size of the trampoline itself,
1883 // which is something like 57 bytes. Round it up to page size.
1884 len = VG_PGROUNDUP(len);
1885 ms_record_page_mem(a, len);
1886 }
1887
1888 static
ms_new_mem_brk(Addr a,SizeT len,ThreadId tid)1889 void ms_new_mem_brk ( Addr a, SizeT len, ThreadId tid )
1890 {
1891 tl_assert(VG_IS_PAGE_ALIGNED(len));
1892 ms_record_page_mem(a, len);
1893 }
1894
1895 static
ms_copy_mem_remap(Addr from,Addr to,SizeT len)1896 void ms_copy_mem_remap( Addr from, Addr to, SizeT len)
1897 {
1898 tl_assert(VG_IS_PAGE_ALIGNED(len));
1899 ms_unrecord_page_mem(from, len);
1900 ms_record_page_mem(to, len);
1901 }
1902
1903 static
ms_die_mem_munmap(Addr a,SizeT len)1904 void ms_die_mem_munmap( Addr a, SizeT len )
1905 {
1906 tl_assert(VG_IS_PAGE_ALIGNED(len));
1907 ms_unrecord_page_mem(a, len);
1908 }
1909
1910 static
ms_die_mem_brk(Addr a,SizeT len)1911 void ms_die_mem_brk( Addr a, SizeT len )
1912 {
1913 tl_assert(VG_IS_PAGE_ALIGNED(len));
1914 ms_unrecord_page_mem(a, len);
1915 }
1916
1917 //------------------------------------------------------------//
1918 //--- Stacks ---//
1919 //------------------------------------------------------------//
1920
1921 // We really want the inlining to occur...
1922 #define INLINE inline __attribute__((always_inline))
1923
update_stack_stats(SSizeT stack_szB_delta)1924 static void update_stack_stats(SSizeT stack_szB_delta)
1925 {
1926 if (stack_szB_delta < 0) tl_assert(stacks_szB >= -stack_szB_delta);
1927 stacks_szB += stack_szB_delta;
1928
1929 update_alloc_stats(stack_szB_delta);
1930 }
1931
new_mem_stack_2(SizeT len,Char * what)1932 static INLINE void new_mem_stack_2(SizeT len, Char* what)
1933 {
1934 if (have_started_executing_code) {
1935 VERB(3, "<<< new_mem_stack (%ld)\n", len);
1936 n_stack_allocs++;
1937 update_stack_stats(len);
1938 maybe_take_snapshot(Normal, what);
1939 VERB(3, ">>>\n");
1940 }
1941 }
1942
die_mem_stack_2(SizeT len,Char * what)1943 static INLINE void die_mem_stack_2(SizeT len, Char* what)
1944 {
1945 if (have_started_executing_code) {
1946 VERB(3, "<<< die_mem_stack (%ld)\n", -len);
1947 n_stack_frees++;
1948 maybe_take_snapshot(Peak, "stkPEAK");
1949 update_stack_stats(-len);
1950 maybe_take_snapshot(Normal, what);
1951 VERB(3, ">>>\n");
1952 }
1953 }
1954
new_mem_stack(Addr a,SizeT len)1955 static void new_mem_stack(Addr a, SizeT len)
1956 {
1957 new_mem_stack_2(len, "stk-new");
1958 }
1959
die_mem_stack(Addr a,SizeT len)1960 static void die_mem_stack(Addr a, SizeT len)
1961 {
1962 die_mem_stack_2(len, "stk-die");
1963 }
1964
new_mem_stack_signal(Addr a,SizeT len,ThreadId tid)1965 static void new_mem_stack_signal(Addr a, SizeT len, ThreadId tid)
1966 {
1967 new_mem_stack_2(len, "sig-new");
1968 }
1969
die_mem_stack_signal(Addr a,SizeT len)1970 static void die_mem_stack_signal(Addr a, SizeT len)
1971 {
1972 die_mem_stack_2(len, "sig-die");
1973 }
1974
1975
1976 //------------------------------------------------------------//
1977 //--- Client Requests ---//
1978 //------------------------------------------------------------//
1979
ms_handle_client_request(ThreadId tid,UWord * argv,UWord * ret)1980 static Bool ms_handle_client_request ( ThreadId tid, UWord* argv, UWord* ret )
1981 {
1982 switch (argv[0]) {
1983 case VG_USERREQ__MALLOCLIKE_BLOCK: {
1984 void* p = (void*)argv[1];
1985 SizeT szB = argv[2];
1986 record_block( tid, p, szB, /*slop_szB*/0, /*exclude_first_entry*/False,
1987 /*maybe_snapshot*/True );
1988 *ret = 0;
1989 return True;
1990 }
1991 case VG_USERREQ__FREELIKE_BLOCK: {
1992 void* p = (void*)argv[1];
1993 unrecord_block(p, /*maybe_snapshot*/True);
1994 *ret = 0;
1995 return True;
1996 }
1997 default:
1998 *ret = 0;
1999 return False;
2000 }
2001 }
2002
2003 //------------------------------------------------------------//
2004 //--- Instrumentation ---//
2005 //------------------------------------------------------------//
2006
add_counter_update(IRSB * sbOut,Int n)2007 static void add_counter_update(IRSB* sbOut, Int n)
2008 {
2009 #if defined(VG_BIGENDIAN)
2010 # define END Iend_BE
2011 #elif defined(VG_LITTLEENDIAN)
2012 # define END Iend_LE
2013 #else
2014 # error "Unknown endianness"
2015 #endif
2016 // Add code to increment 'guest_instrs_executed' by 'n', like this:
2017 // WrTmp(t1, Load64(&guest_instrs_executed))
2018 // WrTmp(t2, Add64(RdTmp(t1), Const(n)))
2019 // Store(&guest_instrs_executed, t2)
2020 IRTemp t1 = newIRTemp(sbOut->tyenv, Ity_I64);
2021 IRTemp t2 = newIRTemp(sbOut->tyenv, Ity_I64);
2022 IRExpr* counter_addr = mkIRExpr_HWord( (HWord)&guest_instrs_executed );
2023
2024 IRStmt* st1 = IRStmt_WrTmp(t1, IRExpr_Load(END, Ity_I64, counter_addr));
2025 IRStmt* st2 =
2026 IRStmt_WrTmp(t2,
2027 IRExpr_Binop(Iop_Add64, IRExpr_RdTmp(t1),
2028 IRExpr_Const(IRConst_U64(n))));
2029 IRStmt* st3 = IRStmt_Store(END, counter_addr, IRExpr_RdTmp(t2));
2030
2031 addStmtToIRSB( sbOut, st1 );
2032 addStmtToIRSB( sbOut, st2 );
2033 addStmtToIRSB( sbOut, st3 );
2034 }
2035
ms_instrument2(IRSB * sbIn)2036 static IRSB* ms_instrument2( IRSB* sbIn )
2037 {
2038 Int i, n = 0;
2039 IRSB* sbOut;
2040
2041 // We increment the instruction count in two places:
2042 // - just before any Ist_Exit statements;
2043 // - just before the IRSB's end.
2044 // In the former case, we zero 'n' and then continue instrumenting.
2045
2046 sbOut = deepCopyIRSBExceptStmts(sbIn);
2047
2048 for (i = 0; i < sbIn->stmts_used; i++) {
2049 IRStmt* st = sbIn->stmts[i];
2050
2051 if (!st || st->tag == Ist_NoOp) continue;
2052
2053 if (st->tag == Ist_IMark) {
2054 n++;
2055 } else if (st->tag == Ist_Exit) {
2056 if (n > 0) {
2057 // Add an increment before the Exit statement, then reset 'n'.
2058 add_counter_update(sbOut, n);
2059 n = 0;
2060 }
2061 }
2062 addStmtToIRSB( sbOut, st );
2063 }
2064
2065 if (n > 0) {
2066 // Add an increment before the SB end.
2067 add_counter_update(sbOut, n);
2068 }
2069 return sbOut;
2070 }
2071
2072 static
ms_instrument(VgCallbackClosure * closure,IRSB * sbIn,VexGuestLayout * layout,VexGuestExtents * vge,IRType gWordTy,IRType hWordTy)2073 IRSB* ms_instrument ( VgCallbackClosure* closure,
2074 IRSB* sbIn,
2075 VexGuestLayout* layout,
2076 VexGuestExtents* vge,
2077 IRType gWordTy, IRType hWordTy )
2078 {
2079 if (! have_started_executing_code) {
2080 // Do an initial sample to guarantee that we have at least one.
2081 // We use 'maybe_take_snapshot' instead of 'take_snapshot' to ensure
2082 // 'maybe_take_snapshot's internal static variables are initialised.
2083 have_started_executing_code = True;
2084 maybe_take_snapshot(Normal, "startup");
2085 }
2086
2087 if (clo_time_unit == TimeI) { return ms_instrument2(sbIn); }
2088 else if (clo_time_unit == TimeMS) { return sbIn; }
2089 else if (clo_time_unit == TimeB) { return sbIn; }
2090 else { tl_assert2(0, "bad --time-unit value"); }
2091 }
2092
2093
2094 //------------------------------------------------------------//
2095 //--- Writing snapshots ---//
2096 //------------------------------------------------------------//
2097
2098 Char FP_buf[BUF_LEN];
2099
2100 // XXX: implement f{,n}printf in m_libcprint.c eventually, and use it here.
2101 // Then change Cachegrind to use it too.
2102 #define FP(format, args...) ({ \
2103 VG_(snprintf)(FP_buf, BUF_LEN, format, ##args); \
2104 FP_buf[BUF_LEN-1] = '\0'; /* Make sure the string is terminated. */ \
2105 VG_(write)(fd, (void*)FP_buf, VG_(strlen)(FP_buf)); \
2106 })
2107
2108 // Nb: uses a static buffer, each call trashes the last string returned.
make_perc(double x)2109 static Char* make_perc(double x)
2110 {
2111 static Char mbuf[32];
2112
2113 VG_(percentify)((ULong)(x * 100), 10000, 2, 6, mbuf);
2114 // XXX: this is bogus if the denominator was zero -- resulting string is
2115 // something like "0 --%")
2116 if (' ' == mbuf[0]) mbuf[0] = '0';
2117 return mbuf;
2118 }
2119
pp_snapshot_SXPt(Int fd,SXPt * sxpt,Int depth,Char * depth_str,Int depth_str_len,SizeT snapshot_heap_szB,SizeT snapshot_total_szB)2120 static void pp_snapshot_SXPt(Int fd, SXPt* sxpt, Int depth, Char* depth_str,
2121 Int depth_str_len,
2122 SizeT snapshot_heap_szB, SizeT snapshot_total_szB)
2123 {
2124 Int i, j, n_insig_children_sxpts;
2125 SXPt* child = NULL;
2126
2127 // Used for printing function names. Is made static to keep it out
2128 // of the stack frame -- this function is recursive. Obviously this
2129 // now means its contents are trashed across the recursive call.
2130 static Char ip_desc_array[BUF_LEN];
2131 Char* ip_desc = ip_desc_array;
2132
2133 switch (sxpt->tag) {
2134 case SigSXPt:
2135 // Print the SXPt itself.
2136 if (0 == depth) {
2137 if (clo_heap) {
2138 ip_desc =
2139 ( clo_pages_as_heap
2140 ? "(page allocation syscalls) mmap/mremap/brk, --alloc-fns, etc."
2141 : "(heap allocation functions) malloc/new/new[], --alloc-fns, etc."
2142 );
2143 } else {
2144 // XXX: --alloc-fns?
2145 }
2146 } else {
2147 // If it's main-or-below-main, we (if appropriate) ignore everything
2148 // below it by pretending it has no children.
2149 if ( ! VG_(clo_show_below_main) ) {
2150 Vg_FnNameKind kind = VG_(get_fnname_kind_from_IP)(sxpt->Sig.ip);
2151 if (Vg_FnNameMain == kind || Vg_FnNameBelowMain == kind) {
2152 sxpt->Sig.n_children = 0;
2153 }
2154 }
2155
2156 // We need the -1 to get the line number right, But I'm not sure why.
2157 ip_desc = VG_(describe_IP)(sxpt->Sig.ip-1, ip_desc, BUF_LEN);
2158 }
2159
2160 // Do the non-ip_desc part first...
2161 FP("%sn%d: %lu ", depth_str, sxpt->Sig.n_children, sxpt->szB);
2162
2163 // For ip_descs beginning with "0xABCD...:" addresses, we first
2164 // measure the length of the "0xabcd: " address at the start of the
2165 // ip_desc.
2166 j = 0;
2167 if ('0' == ip_desc[0] && 'x' == ip_desc[1]) {
2168 j = 2;
2169 while (True) {
2170 if (ip_desc[j]) {
2171 if (':' == ip_desc[j]) break;
2172 j++;
2173 } else {
2174 tl_assert2(0, "ip_desc has unexpected form: %s\n", ip_desc);
2175 }
2176 }
2177 }
2178 // Nb: We treat this specially (ie. we don't use FP) so that if the
2179 // ip_desc is too long (eg. due to a long C++ function name), it'll
2180 // get truncated, but the '\n' is still there so its a valid file.
2181 // (At one point we were truncating without adding the '\n', which
2182 // caused bug #155929.)
2183 //
2184 // Also, we account for the length of the address in ip_desc when
2185 // truncating. (The longest address we could have is 18 chars: "0x"
2186 // plus 16 address digits.) This ensures that the truncated function
2187 // name always has the same length, which makes truncation
2188 // deterministic and thus makes testing easier.
2189 tl_assert(j <= 18);
2190 VG_(snprintf)(FP_buf, BUF_LEN, "%s\n", ip_desc);
2191 FP_buf[BUF_LEN-18+j-5] = '.'; // "..." at the end make the
2192 FP_buf[BUF_LEN-18+j-4] = '.'; // truncation more obvious.
2193 FP_buf[BUF_LEN-18+j-3] = '.';
2194 FP_buf[BUF_LEN-18+j-2] = '\n'; // The last char is '\n'.
2195 FP_buf[BUF_LEN-18+j-1] = '\0'; // The string is terminated.
2196 VG_(write)(fd, (void*)FP_buf, VG_(strlen)(FP_buf));
2197
2198 // Indent.
2199 tl_assert(depth+1 < depth_str_len-1); // -1 for end NUL char
2200 depth_str[depth+0] = ' ';
2201 depth_str[depth+1] = '\0';
2202
2203 // Sort SXPt's children by szB (reverse order: biggest to smallest).
2204 // Nb: we sort them here, rather than earlier (eg. in dup_XTree), for
2205 // two reasons. First, if we do it during dup_XTree, it can get
2206 // expensive (eg. 15% of execution time for konqueror
2207 // startup/shutdown). Second, this way we get the Insig SXPt (if one
2208 // is present) in its sorted position, not at the end.
2209 VG_(ssort)(sxpt->Sig.children, sxpt->Sig.n_children, sizeof(SXPt*),
2210 SXPt_revcmp_szB);
2211
2212 // Print the SXPt's children. They should already be in sorted order.
2213 n_insig_children_sxpts = 0;
2214 for (i = 0; i < sxpt->Sig.n_children; i++) {
2215 child = sxpt->Sig.children[i];
2216
2217 if (InsigSXPt == child->tag)
2218 n_insig_children_sxpts++;
2219
2220 // Ok, print the child. NB: contents of ip_desc_array will be
2221 // trashed by this recursive call. Doesn't matter currently,
2222 // but worth noting.
2223 pp_snapshot_SXPt(fd, child, depth+1, depth_str, depth_str_len,
2224 snapshot_heap_szB, snapshot_total_szB);
2225 }
2226
2227 // Unindent.
2228 depth_str[depth+0] = '\0';
2229 depth_str[depth+1] = '\0';
2230
2231 // There should be 0 or 1 Insig children SXPts.
2232 tl_assert(n_insig_children_sxpts <= 1);
2233 break;
2234
2235 case InsigSXPt: {
2236 Char* s = ( 1 == sxpt->Insig.n_xpts ? "," : "s, all" );
2237 FP("%sn0: %lu in %d place%s below massif's threshold (%s)\n",
2238 depth_str, sxpt->szB, sxpt->Insig.n_xpts, s,
2239 make_perc(clo_threshold));
2240 break;
2241 }
2242
2243 default:
2244 tl_assert2(0, "pp_snapshot_SXPt: unrecognised SXPt tag");
2245 }
2246 }
2247
pp_snapshot(Int fd,Snapshot * snapshot,Int snapshot_n)2248 static void pp_snapshot(Int fd, Snapshot* snapshot, Int snapshot_n)
2249 {
2250 sanity_check_snapshot(snapshot);
2251
2252 FP("#-----------\n");
2253 FP("snapshot=%d\n", snapshot_n);
2254 FP("#-----------\n");
2255 FP("time=%lld\n", snapshot->time);
2256 FP("mem_heap_B=%lu\n", snapshot->heap_szB);
2257 FP("mem_heap_extra_B=%lu\n", snapshot->heap_extra_szB);
2258 FP("mem_stacks_B=%lu\n", snapshot->stacks_szB);
2259
2260 if (is_detailed_snapshot(snapshot)) {
2261 // Detailed snapshot -- print heap tree.
2262 Int depth_str_len = clo_depth + 3;
2263 Char* depth_str = VG_(malloc)("ms.main.pps.1",
2264 sizeof(Char) * depth_str_len);
2265 SizeT snapshot_total_szB =
2266 snapshot->heap_szB + snapshot->heap_extra_szB + snapshot->stacks_szB;
2267 depth_str[0] = '\0'; // Initialise depth_str to "".
2268
2269 FP("heap_tree=%s\n", ( Peak == snapshot->kind ? "peak" : "detailed" ));
2270 pp_snapshot_SXPt(fd, snapshot->alloc_sxpt, 0, depth_str,
2271 depth_str_len, snapshot->heap_szB,
2272 snapshot_total_szB);
2273
2274 VG_(free)(depth_str);
2275
2276 } else {
2277 FP("heap_tree=empty\n");
2278 }
2279 }
2280
write_snapshots_to_file(void)2281 static void write_snapshots_to_file(void)
2282 {
2283 Int i, fd;
2284 SysRes sres;
2285
2286 // Setup output filename. Nb: it's important to do this now, ie. as late
2287 // as possible. If we do it at start-up and the program forks and the
2288 // output file format string contains a %p (pid) specifier, both the
2289 // parent and child will incorrectly write to the same file; this
2290 // happened in 3.3.0.
2291 Char* massif_out_file =
2292 VG_(expand_file_name)("--massif-out-file", clo_massif_out_file);
2293
2294 sres = VG_(open)(massif_out_file, VKI_O_CREAT|VKI_O_TRUNC|VKI_O_WRONLY,
2295 VKI_S_IRUSR|VKI_S_IWUSR);
2296 if (sr_isError(sres)) {
2297 // If the file can't be opened for whatever reason (conflict
2298 // between multiple cachegrinded processes?), give up now.
2299 VG_(umsg)("error: can't open output file '%s'\n", massif_out_file );
2300 VG_(umsg)(" ... so profiling results will be missing.\n");
2301 VG_(free)(massif_out_file);
2302 return;
2303 } else {
2304 fd = sr_Res(sres);
2305 VG_(free)(massif_out_file);
2306 }
2307
2308 // Print massif-specific options that were used.
2309 // XXX: is it worth having a "desc:" line? Could just call it "options:"
2310 // -- this file format isn't as generic as Cachegrind's, so the
2311 // implied genericity of "desc:" is bogus.
2312 FP("desc:");
2313 for (i = 0; i < VG_(sizeXA)(args_for_massif); i++) {
2314 Char* arg = *(Char**)VG_(indexXA)(args_for_massif, i);
2315 FP(" %s", arg);
2316 }
2317 if (0 == i) FP(" (none)");
2318 FP("\n");
2319
2320 // Print "cmd:" line.
2321 FP("cmd: ");
2322 if (VG_(args_the_exename)) {
2323 FP("%s", VG_(args_the_exename));
2324 for (i = 0; i < VG_(sizeXA)( VG_(args_for_client) ); i++) {
2325 HChar* arg = * (HChar**) VG_(indexXA)( VG_(args_for_client), i );
2326 if (arg)
2327 FP(" %s", arg);
2328 }
2329 } else {
2330 FP(" ???");
2331 }
2332 FP("\n");
2333
2334 FP("time_unit: %s\n", TimeUnit_to_string(clo_time_unit));
2335
2336 for (i = 0; i < next_snapshot_i; i++) {
2337 Snapshot* snapshot = & snapshots[i];
2338 pp_snapshot(fd, snapshot, i); // Detailed snapshot!
2339 }
2340 }
2341
2342
2343 //------------------------------------------------------------//
2344 //--- Finalisation ---//
2345 //------------------------------------------------------------//
2346
ms_fini(Int exit_status)2347 static void ms_fini(Int exit_status)
2348 {
2349 // Output.
2350 write_snapshots_to_file();
2351
2352 // Stats
2353 tl_assert(n_xpts > 0); // always have alloc_xpt
2354 STATS("heap allocs: %u\n", n_heap_allocs);
2355 STATS("heap reallocs: %u\n", n_heap_reallocs);
2356 STATS("heap frees: %u\n", n_heap_frees);
2357 STATS("ignored heap allocs: %u\n", n_ignored_heap_allocs);
2358 STATS("ignored heap frees: %u\n", n_ignored_heap_frees);
2359 STATS("ignored heap reallocs: %u\n", n_ignored_heap_reallocs);
2360 STATS("stack allocs: %u\n", n_stack_allocs);
2361 STATS("stack frees: %u\n", n_stack_frees);
2362 STATS("XPts: %u\n", n_xpts);
2363 STATS("top-XPts: %u (%d%%)\n",
2364 alloc_xpt->n_children,
2365 ( n_xpts ? alloc_xpt->n_children * 100 / n_xpts : 0));
2366 STATS("XPt init expansions: %u\n", n_xpt_init_expansions);
2367 STATS("XPt later expansions: %u\n", n_xpt_later_expansions);
2368 STATS("SXPt allocs: %u\n", n_sxpt_allocs);
2369 STATS("SXPt frees: %u\n", n_sxpt_frees);
2370 STATS("skipped snapshots: %u\n", n_skipped_snapshots);
2371 STATS("real snapshots: %u\n", n_real_snapshots);
2372 STATS("detailed snapshots: %u\n", n_detailed_snapshots);
2373 STATS("peak snapshots: %u\n", n_peak_snapshots);
2374 STATS("cullings: %u\n", n_cullings);
2375 STATS("XCon redos: %u\n", n_XCon_redos);
2376 }
2377
2378
2379 //------------------------------------------------------------//
2380 //--- Initialisation ---//
2381 //------------------------------------------------------------//
2382
ms_post_clo_init(void)2383 static void ms_post_clo_init(void)
2384 {
2385 Int i;
2386 Char* LD_PRELOAD_val;
2387 Char* s;
2388 Char* s2;
2389
2390 // Check options.
2391 if (clo_pages_as_heap) {
2392 if (clo_stacks) {
2393 VG_(fmsg_bad_option)(
2394 "--pages-as-heap=yes together with --stacks=yes", "");
2395 }
2396 }
2397 if (!clo_heap) {
2398 clo_pages_as_heap = False;
2399 }
2400
2401 // If --pages-as-heap=yes we don't want malloc replacement to occur. So we
2402 // disable vgpreload_massif-$PLATFORM.so by removing it from LD_PRELOAD (or
2403 // platform-equivalent). We replace it entirely with spaces because then
2404 // the linker doesn't complain (it does complain if we just change the name
2405 // to a bogus file). This is a bit of a hack, but LD_PRELOAD is setup well
2406 // before tool initialisation, so this seems the best way to do it.
2407 if (clo_pages_as_heap) {
2408 clo_heap_admin = 0; // No heap admin on pages.
2409
2410 LD_PRELOAD_val = VG_(getenv)( (Char*)VG_(LD_PRELOAD_var_name) );
2411 tl_assert(LD_PRELOAD_val);
2412
2413 // Make sure the vgpreload_core-$PLATFORM entry is there, for sanity.
2414 s2 = VG_(strstr)(LD_PRELOAD_val, "vgpreload_core");
2415 tl_assert(s2);
2416
2417 // Now find the vgpreload_massif-$PLATFORM entry.
2418 s2 = VG_(strstr)(LD_PRELOAD_val, "vgpreload_massif");
2419 tl_assert(s2);
2420
2421 // Blank out everything to the previous ':', which must be there because
2422 // of the preceding vgpreload_core-$PLATFORM entry.
2423 for (s = s2; *s != ':'; s--) {
2424 *s = ' ';
2425 }
2426
2427 // Blank out everything to the end of the entry, which will be '\0' if
2428 // LD_PRELOAD was empty before Valgrind started, or ':' otherwise.
2429 for (s = s2; *s != ':' && *s != '\0'; s++) {
2430 *s = ' ';
2431 }
2432 }
2433
2434 // Print alloc-fns and ignore-fns, if necessary.
2435 if (VG_(clo_verbosity) > 1) {
2436 VERB(1, "alloc-fns:\n");
2437 for (i = 0; i < VG_(sizeXA)(alloc_fns); i++) {
2438 Char** fn_ptr = VG_(indexXA)(alloc_fns, i);
2439 VERB(1, " %s\n", *fn_ptr);
2440 }
2441
2442 VERB(1, "ignore-fns:\n");
2443 if (0 == VG_(sizeXA)(ignore_fns)) {
2444 VERB(1, " <empty>\n");
2445 }
2446 for (i = 0; i < VG_(sizeXA)(ignore_fns); i++) {
2447 Char** fn_ptr = VG_(indexXA)(ignore_fns, i);
2448 VERB(1, " %d: %s\n", i, *fn_ptr);
2449 }
2450 }
2451
2452 // Events to track.
2453 if (clo_stacks) {
2454 VG_(track_new_mem_stack) ( new_mem_stack );
2455 VG_(track_die_mem_stack) ( die_mem_stack );
2456 VG_(track_new_mem_stack_signal) ( new_mem_stack_signal );
2457 VG_(track_die_mem_stack_signal) ( die_mem_stack_signal );
2458 }
2459
2460 if (clo_pages_as_heap) {
2461 VG_(track_new_mem_startup) ( ms_new_mem_startup );
2462 VG_(track_new_mem_brk) ( ms_new_mem_brk );
2463 VG_(track_new_mem_mmap) ( ms_new_mem_mmap );
2464
2465 VG_(track_copy_mem_remap) ( ms_copy_mem_remap );
2466
2467 VG_(track_die_mem_brk) ( ms_die_mem_brk );
2468 VG_(track_die_mem_munmap) ( ms_die_mem_munmap );
2469 }
2470
2471 // Initialise snapshot array, and sanity-check it.
2472 snapshots = VG_(malloc)("ms.main.mpoci.1",
2473 sizeof(Snapshot) * clo_max_snapshots);
2474 // We don't want to do snapshot sanity checks here, because they're
2475 // currently uninitialised.
2476 for (i = 0; i < clo_max_snapshots; i++) {
2477 clear_snapshot( & snapshots[i], /*do_sanity_check*/False );
2478 }
2479 sanity_check_snapshots_array();
2480 }
2481
ms_pre_clo_init(void)2482 static void ms_pre_clo_init(void)
2483 {
2484 VG_(details_name) ("Massif");
2485 VG_(details_version) (NULL);
2486 VG_(details_description) ("a heap profiler");
2487 VG_(details_copyright_author)(
2488 "Copyright (C) 2003-2010, and GNU GPL'd, by Nicholas Nethercote");
2489 VG_(details_bug_reports_to) (VG_BUGS_TO);
2490
2491 // Basic functions.
2492 VG_(basic_tool_funcs) (ms_post_clo_init,
2493 ms_instrument,
2494 ms_fini);
2495
2496 // Needs.
2497 VG_(needs_libc_freeres)();
2498 VG_(needs_command_line_options)(ms_process_cmd_line_option,
2499 ms_print_usage,
2500 ms_print_debug_usage);
2501 VG_(needs_client_requests) (ms_handle_client_request);
2502 VG_(needs_sanity_checks) (ms_cheap_sanity_check,
2503 ms_expensive_sanity_check);
2504 VG_(needs_malloc_replacement) (ms_malloc,
2505 ms___builtin_new,
2506 ms___builtin_vec_new,
2507 ms_memalign,
2508 ms_calloc,
2509 ms_free,
2510 ms___builtin_delete,
2511 ms___builtin_vec_delete,
2512 ms_realloc,
2513 ms_malloc_usable_size,
2514 0 );
2515
2516 // HP_Chunks.
2517 malloc_list = VG_(HT_construct)( "Massif's malloc list" );
2518
2519 // Dummy node at top of the context structure.
2520 alloc_xpt = new_XPt(/*ip*/0, /*parent*/NULL);
2521
2522 // Initialise alloc_fns and ignore_fns.
2523 init_alloc_fns();
2524 init_ignore_fns();
2525
2526 // Initialise args_for_massif.
2527 args_for_massif = VG_(newXA)(VG_(malloc), "ms.main.mprci.1",
2528 VG_(free), sizeof(HChar*));
2529 }
2530
2531 VG_DETERMINE_INTERFACE_VERSION(ms_pre_clo_init)
2532
2533 //--------------------------------------------------------------------//
2534 //--- end ---//
2535 //--------------------------------------------------------------------//
2536