• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2006, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 //     * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 //     * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 //     * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 
30 // ---
31 // Author: Sanjay Ghemawat
32 //         Maxim Lifantsev (refactoring)
33 //
34 
35 #include <config.h>
36 
37 #ifdef HAVE_UNISTD_H
38 #include <unistd.h>   // for write()
39 #endif
40 #include <fcntl.h>    // for open()
41 #ifdef HAVE_GLOB_H
42 #include <glob.h>
43 #ifndef GLOB_NOMATCH  // true on some old cygwins
44 # define GLOB_NOMATCH 0
45 #endif
46 #endif
47 #ifdef HAVE_INTTYPES_H
48 #include <inttypes.h> // for PRIxPTR
49 #endif
50 #ifdef HAVE_POLL_H
51 #include <poll.h>
52 #endif
53 #include <errno.h>
54 #include <stdarg.h>
55 #include <string>
56 #include <map>
57 #include <algorithm>  // for sort(), equal(), and copy()
58 
59 #include "heap-profile-table.h"
60 
61 #include "base/logging.h"
62 #include "raw_printer.h"
63 #include "symbolize.h"
64 #include <gperftools/stacktrace.h>
65 #include <gperftools/malloc_hook.h>
66 #include "memory_region_map.h"
67 #include "base/commandlineflags.h"
68 #include "base/logging.h"    // for the RawFD I/O commands
69 #include "base/sysinfo.h"
70 
71 using std::sort;
72 using std::equal;
73 using std::copy;
74 using std::string;
75 using std::map;
76 
77 using tcmalloc::FillProcSelfMaps;   // from sysinfo.h
78 using tcmalloc::DumpProcSelfMaps;   // from sysinfo.h
79 
80 //----------------------------------------------------------------------
81 
82 DEFINE_bool(cleanup_old_heap_profiles,
83             EnvToBool("HEAP_PROFILE_CLEANUP", true),
84             "At initialization time, delete old heap profiles.");
85 
86 DEFINE_int32(heap_check_max_leaks,
87              EnvToInt("HEAP_CHECK_MAX_LEAKS", 20),
88              "The maximum number of leak reports to print.");
89 
90 //----------------------------------------------------------------------
91 
92 // header of the dumped heap profile
93 static const char kProfileHeader[] = "heap profile: ";
94 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n";
95 #if defined(TYPE_PROFILING)
96 static const char kTypeProfileStatsHeader[] = "type statistics:\n";
97 #endif  // defined(TYPE_PROFILING)
98 
99 //----------------------------------------------------------------------
100 
101 const char HeapProfileTable::kFileExt[] = ".heap";
102 
103 //----------------------------------------------------------------------
104 
105 static const int kHashTableSize = 179999;   // Size for bucket_table_.
106 // GCC requires this declaration, but MSVC does not allow it.
107 #if !defined(COMPILER_MSVC)
108 /*static*/ const int HeapProfileTable::kMaxStackDepth;
109 #endif
110 
111 //----------------------------------------------------------------------
112 
113 // We strip out different number of stack frames in debug mode
114 // because less inlining happens in that case
115 #ifdef NDEBUG
116 static const int kStripFrames = 2;
117 #else
118 static const int kStripFrames = 3;
119 #endif
120 
121 // For sorting Stats or Buckets by in-use space
ByAllocatedSpace(HeapProfileTable::Stats * a,HeapProfileTable::Stats * b)122 static bool ByAllocatedSpace(HeapProfileTable::Stats* a,
123                              HeapProfileTable::Stats* b) {
124   // Return true iff "a" has more allocated space than "b"
125   return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size);
126 }
127 
128 //----------------------------------------------------------------------
129 
HeapProfileTable(Allocator alloc,DeAllocator dealloc,bool profile_mmap)130 HeapProfileTable::HeapProfileTable(Allocator alloc,
131                                    DeAllocator dealloc,
132                                    bool profile_mmap)
133     : alloc_(alloc),
134       dealloc_(dealloc),
135       bucket_table_(NULL),
136       profile_mmap_(profile_mmap),
137       num_buckets_(0),
138       address_map_(NULL) {
139   // Make a hash table for buckets.
140   const int table_bytes = kHashTableSize * sizeof(*bucket_table_);
141   bucket_table_ = static_cast<Bucket**>(alloc_(table_bytes));
142   memset(bucket_table_, 0, table_bytes);
143 
144   // Make an allocation map.
145   address_map_ =
146       new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_);
147 
148   // Initialize.
149   memset(&total_, 0, sizeof(total_));
150   num_buckets_ = 0;
151 }
152 
~HeapProfileTable()153 HeapProfileTable::~HeapProfileTable() {
154   // Free the allocation map.
155   address_map_->~AllocationMap();
156   dealloc_(address_map_);
157   address_map_ = NULL;
158 
159   // Free the hash table.
160   for (int i = 0; i < kHashTableSize; i++) {
161     for (Bucket* curr = bucket_table_[i]; curr != 0; /**/) {
162       Bucket* bucket = curr;
163       curr = curr->next;
164       dealloc_(bucket->stack);
165       dealloc_(bucket);
166     }
167   }
168   dealloc_(bucket_table_);
169   bucket_table_ = NULL;
170 }
171 
GetBucket(int depth,const void * const key[])172 HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth,
173                                                       const void* const key[]) {
174   // Make hash-value
175   uintptr_t h = 0;
176   for (int i = 0; i < depth; i++) {
177     h += reinterpret_cast<uintptr_t>(key[i]);
178     h += h << 10;
179     h ^= h >> 6;
180   }
181   h += h << 3;
182   h ^= h >> 11;
183 
184   // Lookup stack trace in table
185   unsigned int buck = ((unsigned int) h) % kHashTableSize;
186   for (Bucket* b = bucket_table_[buck]; b != 0; b = b->next) {
187     if ((b->hash == h) &&
188         (b->depth == depth) &&
189         equal(key, key + depth, b->stack)) {
190       return b;
191     }
192   }
193 
194   // Create new bucket
195   const size_t key_size = sizeof(key[0]) * depth;
196   const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size));
197   copy(key, key + depth, kcopy);
198   Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket)));
199   memset(b, 0, sizeof(*b));
200   b->hash  = h;
201   b->depth = depth;
202   b->stack = kcopy;
203   b->next  = bucket_table_[buck];
204   bucket_table_[buck] = b;
205   num_buckets_++;
206   return b;
207 }
208 
GetCallerStackTrace(int skip_count,void * stack[kMaxStackDepth])209 int HeapProfileTable::GetCallerStackTrace(
210     int skip_count, void* stack[kMaxStackDepth]) {
211   return MallocHook::GetCallerStackTrace(
212       stack, kMaxStackDepth, kStripFrames + skip_count + 1);
213 }
214 
RecordAlloc(const void * ptr,size_t bytes,int stack_depth,const void * const call_stack[])215 void HeapProfileTable::RecordAlloc(
216     const void* ptr, size_t bytes, int stack_depth,
217     const void* const call_stack[]) {
218   Bucket* b = GetBucket(stack_depth, call_stack);
219   b->allocs++;
220   b->alloc_size += bytes;
221   total_.allocs++;
222   total_.alloc_size += bytes;
223 
224   AllocValue v;
225   v.set_bucket(b);  // also did set_live(false); set_ignore(false)
226   v.bytes = bytes;
227   address_map_->Insert(ptr, v);
228 }
229 
RecordFree(const void * ptr)230 void HeapProfileTable::RecordFree(const void* ptr) {
231   AllocValue v;
232   if (address_map_->FindAndRemove(ptr, &v)) {
233     Bucket* b = v.bucket();
234     b->frees++;
235     b->free_size += v.bytes;
236     total_.frees++;
237     total_.free_size += v.bytes;
238   }
239 }
240 
FindAlloc(const void * ptr,size_t * object_size) const241 bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const {
242   const AllocValue* alloc_value = address_map_->Find(ptr);
243   if (alloc_value != NULL) *object_size = alloc_value->bytes;
244   return alloc_value != NULL;
245 }
246 
FindAllocDetails(const void * ptr,AllocInfo * info) const247 bool HeapProfileTable::FindAllocDetails(const void* ptr,
248                                         AllocInfo* info) const {
249   const AllocValue* alloc_value = address_map_->Find(ptr);
250   if (alloc_value != NULL) {
251     info->object_size = alloc_value->bytes;
252     info->call_stack = alloc_value->bucket()->stack;
253     info->stack_depth = alloc_value->bucket()->depth;
254   }
255   return alloc_value != NULL;
256 }
257 
FindInsideAlloc(const void * ptr,size_t max_size,const void ** object_ptr,size_t * object_size) const258 bool HeapProfileTable::FindInsideAlloc(const void* ptr,
259                                        size_t max_size,
260                                        const void** object_ptr,
261                                        size_t* object_size) const {
262   const AllocValue* alloc_value =
263     address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr);
264   if (alloc_value != NULL) *object_size = alloc_value->bytes;
265   return alloc_value != NULL;
266 }
267 
MarkAsLive(const void * ptr)268 bool HeapProfileTable::MarkAsLive(const void* ptr) {
269   AllocValue* alloc = address_map_->FindMutable(ptr);
270   if (alloc && !alloc->live()) {
271     alloc->set_live(true);
272     return true;
273   }
274   return false;
275 }
276 
MarkAsIgnored(const void * ptr)277 void HeapProfileTable::MarkAsIgnored(const void* ptr) {
278   AllocValue* alloc = address_map_->FindMutable(ptr);
279   if (alloc) {
280     alloc->set_ignore(true);
281   }
282 }
283 
IterateAllocationAddresses(AddressIterator f,void * data)284 void HeapProfileTable::IterateAllocationAddresses(AddressIterator f,
285                                                   void* data) {
286   const AllocationAddressIteratorArgs args(f, data);
287   address_map_->Iterate<const AllocationAddressIteratorArgs&>(
288       AllocationAddressesIterator, args);
289 }
290 
MarkCurrentAllocations(AllocationMark mark)291 void HeapProfileTable::MarkCurrentAllocations(AllocationMark mark) {
292   const MarkArgs args(mark, true);
293   address_map_->Iterate<const MarkArgs&>(MarkIterator, args);
294 }
295 
MarkUnmarkedAllocations(AllocationMark mark)296 void HeapProfileTable::MarkUnmarkedAllocations(AllocationMark mark) {
297   const MarkArgs args(mark, false);
298   address_map_->Iterate<const MarkArgs&>(MarkIterator, args);
299 }
300 
301 // We'd be happier using snprintfer, but we don't to reduce dependencies.
UnparseBucket(const Bucket & b,char * buf,int buflen,int bufsize,const char * extra,Stats * profile_stats)302 int HeapProfileTable::UnparseBucket(const Bucket& b,
303                                     char* buf, int buflen, int bufsize,
304                                     const char* extra,
305                                     Stats* profile_stats) {
306   if (profile_stats != NULL) {
307     profile_stats->allocs += b.allocs;
308     profile_stats->alloc_size += b.alloc_size;
309     profile_stats->frees += b.frees;
310     profile_stats->free_size += b.free_size;
311   }
312   int printed =
313     snprintf(buf + buflen, bufsize - buflen,
314              "%6d: %8" PRId64 " [%6d: %8" PRId64 "] @%s",
315              b.allocs - b.frees,
316              b.alloc_size - b.free_size,
317              b.allocs,
318              b.alloc_size,
319              extra);
320   // If it looks like the snprintf failed, ignore the fact we printed anything
321   if (printed < 0 || printed >= bufsize - buflen) return buflen;
322   buflen += printed;
323   for (int d = 0; d < b.depth; d++) {
324     printed = snprintf(buf + buflen, bufsize - buflen, " 0x%08" PRIxPTR,
325                        reinterpret_cast<uintptr_t>(b.stack[d]));
326     if (printed < 0 || printed >= bufsize - buflen) return buflen;
327     buflen += printed;
328   }
329   printed = snprintf(buf + buflen, bufsize - buflen, "\n");
330   if (printed < 0 || printed >= bufsize - buflen) return buflen;
331   buflen += printed;
332   return buflen;
333 }
334 
335 HeapProfileTable::Bucket**
MakeSortedBucketList() const336 HeapProfileTable::MakeSortedBucketList() const {
337   Bucket** list = static_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_));
338 
339   int bucket_count = 0;
340   for (int i = 0; i < kHashTableSize; i++) {
341     for (Bucket* curr = bucket_table_[i]; curr != 0; curr = curr->next) {
342       list[bucket_count++] = curr;
343     }
344   }
345   RAW_DCHECK(bucket_count == num_buckets_, "");
346 
347   sort(list, list + num_buckets_, ByAllocatedSpace);
348 
349   return list;
350 }
351 
DumpMarkedObjects(AllocationMark mark,const char * file_name)352 void HeapProfileTable::DumpMarkedObjects(AllocationMark mark,
353                                          const char* file_name) {
354   RawFD fd = RawOpenForWriting(file_name);
355   if (fd == kIllegalRawFD) {
356     RAW_LOG(ERROR, "Failed dumping live objects to %s", file_name);
357     return;
358   }
359   const DumpMarkedArgs args(fd, mark);
360   address_map_->Iterate<const DumpMarkedArgs&>(DumpMarkedIterator, args);
361   RawClose(fd);
362 }
363 
364 #if defined(TYPE_PROFILING)
DumpTypeStatistics(const char * file_name) const365 void HeapProfileTable::DumpTypeStatistics(const char* file_name) const {
366   RawFD fd = RawOpenForWriting(file_name);
367   if (fd == kIllegalRawFD) {
368     RAW_LOG(ERROR, "Failed dumping type statistics to %s", file_name);
369     return;
370   }
371 
372   AddressMap<TypeCount>* type_size_map;
373   type_size_map = new(alloc_(sizeof(AddressMap<TypeCount>)))
374       AddressMap<TypeCount>(alloc_, dealloc_);
375   address_map_->Iterate(TallyTypesItererator, type_size_map);
376 
377   RawWrite(fd, kTypeProfileStatsHeader, strlen(kTypeProfileStatsHeader));
378   const DumpArgs args(fd, NULL);
379   type_size_map->Iterate<const DumpArgs&>(DumpTypesIterator, args);
380   RawClose(fd);
381 
382   type_size_map->~AddressMap<TypeCount>();
383   dealloc_(type_size_map);
384 }
385 #endif  // defined(TYPE_PROFILING)
386 
IterateOrderedAllocContexts(AllocContextIterator callback) const387 void HeapProfileTable::IterateOrderedAllocContexts(
388     AllocContextIterator callback) const {
389   Bucket** list = MakeSortedBucketList();
390   AllocContextInfo info;
391   for (int i = 0; i < num_buckets_; ++i) {
392     *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]);
393     info.stack_depth = list[i]->depth;
394     info.call_stack = list[i]->stack;
395     callback(info);
396   }
397   dealloc_(list);
398 }
399 
FillOrderedProfile(char buf[],int size) const400 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const {
401   Bucket** list = MakeSortedBucketList();
402 
403   // Our file format is "bucket, bucket, ..., bucket, proc_self_maps_info".
404   // In the cases buf is too small, we'd rather leave out the last
405   // buckets than leave out the /proc/self/maps info.  To ensure that,
406   // we actually print the /proc/self/maps info first, then move it to
407   // the end of the buffer, then write the bucket info into whatever
408   // is remaining, and then move the maps info one last time to close
409   // any gaps.  Whew!
410   int map_length = snprintf(buf, size, "%s", kProcSelfMapsHeader);
411   if (map_length < 0 || map_length >= size) return 0;
412   bool dummy;   // "wrote_all" -- did /proc/self/maps fit in its entirety?
413   map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy);
414   RAW_DCHECK(map_length <= size, "");
415   char* const map_start = buf + size - map_length;      // move to end
416   memmove(map_start, buf, map_length);
417   size -= map_length;
418 
419   Stats stats;
420   memset(&stats, 0, sizeof(stats));
421   int bucket_length = snprintf(buf, size, "%s", kProfileHeader);
422   if (bucket_length < 0 || bucket_length >= size) return 0;
423   bucket_length = UnparseBucket(total_, buf, bucket_length, size,
424                                 " heapprofile", &stats);
425 
426   // Dump the mmap list first.
427   if (profile_mmap_) {
428     BufferArgs buffer(buf, bucket_length, size);
429     MemoryRegionMap::IterateBuckets<BufferArgs*>(DumpBucketIterator, &buffer);
430     bucket_length = buffer.buflen;
431   }
432 
433   for (int i = 0; i < num_buckets_; i++) {
434     bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "",
435                                   &stats);
436   }
437   RAW_DCHECK(bucket_length < size, "");
438 
439   dealloc_(list);
440 
441   RAW_DCHECK(buf + bucket_length <= map_start, "");
442   memmove(buf + bucket_length, map_start, map_length);  // close the gap
443 
444   return bucket_length + map_length;
445 }
446 
447 // static
DumpBucketIterator(const Bucket * bucket,BufferArgs * args)448 void HeapProfileTable::DumpBucketIterator(const Bucket* bucket,
449                                           BufferArgs* args) {
450   args->buflen = UnparseBucket(*bucket, args->buf, args->buflen, args->bufsize,
451                                "", NULL);
452 }
453 
454 #if defined(TYPE_PROFILING)
455 // static
TallyTypesItererator(const void * ptr,AllocValue * value,AddressMap<TypeCount> * type_size_map)456 void HeapProfileTable::TallyTypesItererator(
457     const void* ptr,
458     AllocValue* value,
459     AddressMap<TypeCount>* type_size_map) {
460   const std::type_info* type = LookupType(ptr);
461 
462   const void* key = NULL;
463   if (type)
464     key = type->name();
465 
466   TypeCount* count = type_size_map->FindMutable(key);
467   if (count) {
468     count->bytes += value->bytes;
469     ++count->objects;
470   } else {
471     type_size_map->Insert(key, TypeCount(value->bytes, 1));
472   }
473 }
474 
475 // static
DumpTypesIterator(const void * ptr,TypeCount * count,const DumpArgs & args)476 void HeapProfileTable::DumpTypesIterator(const void* ptr,
477                                          TypeCount* count,
478                                          const DumpArgs& args) {
479   char buf[1024];
480   int len;
481   const char* mangled_type_name = static_cast<const char*>(ptr);
482   len = snprintf(buf, sizeof(buf), "%6d: %8" PRId64 " @ %s\n",
483                  count->objects, count->bytes,
484                  mangled_type_name ? mangled_type_name : "(no_typeinfo)");
485   RawWrite(args.fd, buf, len);
486 }
487 #endif  // defined(TYPE_PROFILING)
488 
489 inline
DumpNonLiveIterator(const void * ptr,AllocValue * v,const DumpArgs & args)490 void HeapProfileTable::DumpNonLiveIterator(const void* ptr, AllocValue* v,
491                                            const DumpArgs& args) {
492   if (v->live()) {
493     v->set_live(false);
494     return;
495   }
496   if (v->ignore()) {
497     return;
498   }
499   Bucket b;
500   memset(&b, 0, sizeof(b));
501   b.allocs = 1;
502   b.alloc_size = v->bytes;
503   b.depth = v->bucket()->depth;
504   b.stack = v->bucket()->stack;
505   char buf[1024];
506   int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats);
507   RawWrite(args.fd, buf, len);
508 }
509 
510 inline
DumpMarkedIterator(const void * ptr,AllocValue * v,const DumpMarkedArgs & args)511 void HeapProfileTable::DumpMarkedIterator(const void* ptr, AllocValue* v,
512                                           const DumpMarkedArgs& args) {
513   if (v->mark() != args.mark)
514     return;
515   Bucket b;
516   memset(&b, 0, sizeof(b));
517   b.allocs = 1;
518   b.alloc_size = v->bytes;
519   b.depth = v->bucket()->depth;
520   b.stack = v->bucket()->stack;
521   char addr[16];
522   snprintf(addr, 16, "0x%08" PRIxPTR, ptr);
523   char buf[1024];
524   int len = UnparseBucket(b, buf, 0, sizeof(buf), addr, NULL);
525   RawWrite(args.fd, buf, len);
526 }
527 
528 inline
AllocationAddressesIterator(const void * ptr,AllocValue * v,const AllocationAddressIteratorArgs & args)529 void HeapProfileTable::AllocationAddressesIterator(
530     const void* ptr,
531     AllocValue* v,
532     const AllocationAddressIteratorArgs& args) {
533   args.callback(args.data, ptr);
534 }
535 
536 inline
MarkIterator(const void * ptr,AllocValue * v,const MarkArgs & args)537 void HeapProfileTable::MarkIterator(const void* ptr, AllocValue* v,
538                                     const MarkArgs& args) {
539   if (!args.mark_all && v->mark() != UNMARKED)
540     return;
541   v->set_mark(args.mark);
542 }
543 
544 // Callback from NonLiveSnapshot; adds entry to arg->dest
545 // if not the entry is not live and is not present in arg->base.
AddIfNonLive(const void * ptr,AllocValue * v,AddNonLiveArgs * arg)546 void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v,
547                                     AddNonLiveArgs* arg) {
548   if (v->live()) {
549     v->set_live(false);
550   } else {
551     if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) {
552       // Present in arg->base, so do not save
553     } else {
554       arg->dest->Add(ptr, *v);
555     }
556   }
557 }
558 
WriteProfile(const char * file_name,const Bucket & total,AllocationMap * allocations)559 bool HeapProfileTable::WriteProfile(const char* file_name,
560                                     const Bucket& total,
561                                     AllocationMap* allocations) {
562   RAW_VLOG(1, "Dumping non-live heap profile to %s", file_name);
563   RawFD fd = RawOpenForWriting(file_name);
564   if (fd == kIllegalRawFD) {
565     RAW_LOG(ERROR, "Failed dumping filtered heap profile to %s", file_name);
566     return false;
567   }
568   RawWrite(fd, kProfileHeader, strlen(kProfileHeader));
569   char buf[512];
570   int len = UnparseBucket(total, buf, 0, sizeof(buf), " heapprofile",
571                           NULL);
572   RawWrite(fd, buf, len);
573   const DumpArgs args(fd, NULL);
574   allocations->Iterate<const DumpArgs&>(DumpNonLiveIterator, args);
575   RawWrite(fd, kProcSelfMapsHeader, strlen(kProcSelfMapsHeader));
576   DumpProcSelfMaps(fd);
577   RawClose(fd);
578   return true;
579 }
580 
CleanupOldProfiles(const char * prefix)581 void HeapProfileTable::CleanupOldProfiles(const char* prefix) {
582   if (!FLAGS_cleanup_old_heap_profiles)
583     return;
584   char buf[1000];
585   snprintf(buf, 1000,"%s.%05d.", prefix, getpid());
586   string pattern = string(buf) + ".*" + kFileExt;
587 
588 #if defined(HAVE_GLOB_H)
589   glob_t g;
590   const int r = glob(pattern.c_str(), GLOB_ERR, NULL, &g);
591   if (r == 0 || r == GLOB_NOMATCH) {
592     const int prefix_length = strlen(prefix);
593     for (int i = 0; i < g.gl_pathc; i++) {
594       const char* fname = g.gl_pathv[i];
595       if ((strlen(fname) >= prefix_length) &&
596           (memcmp(fname, prefix, prefix_length) == 0)) {
597         RAW_VLOG(1, "Removing old heap profile %s", fname);
598         unlink(fname);
599       }
600     }
601   }
602   globfree(&g);
603 #else   /* HAVE_GLOB_H */
604   RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())");
605 #endif
606 }
607 
TakeSnapshot()608 HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() {
609   Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
610   address_map_->Iterate(AddToSnapshot, s);
611   return s;
612 }
613 
ReleaseSnapshot(Snapshot * s)614 void HeapProfileTable::ReleaseSnapshot(Snapshot* s) {
615   s->~Snapshot();
616   dealloc_(s);
617 }
618 
619 // Callback from TakeSnapshot; adds a single entry to snapshot
AddToSnapshot(const void * ptr,AllocValue * v,Snapshot * snapshot)620 void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v,
621                                      Snapshot* snapshot) {
622   snapshot->Add(ptr, *v);
623 }
624 
NonLiveSnapshot(Snapshot * base)625 HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot(
626     Snapshot* base) {
627   RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n",
628            int(total_.allocs - total_.frees),
629            int(total_.alloc_size - total_.free_size));
630 
631   Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
632   AddNonLiveArgs args;
633   args.dest = s;
634   args.base = base;
635   address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args);
636   RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n",
637            int(s->total_.allocs - s->total_.frees),
638            int(s->total_.alloc_size - s->total_.free_size));
639   return s;
640 }
641 
642 // Information kept per unique bucket seen
643 struct HeapProfileTable::Snapshot::Entry {
644   int count;
645   int bytes;
646   Bucket* bucket;
EntryHeapProfileTable::Snapshot::Entry647   Entry() : count(0), bytes(0) { }
648 
649   // Order by decreasing bytes
operator <HeapProfileTable::Snapshot::Entry650   bool operator<(const Entry& x) const {
651     return this->bytes > x.bytes;
652   }
653 };
654 
655 // State used to generate leak report.  We keep a mapping from Bucket pointer
656 // the collected stats for that bucket.
657 struct HeapProfileTable::Snapshot::ReportState {
658   map<Bucket*, Entry> buckets_;
659 };
660 
661 // Callback from ReportLeaks; updates ReportState.
ReportCallback(const void * ptr,AllocValue * v,ReportState * state)662 void HeapProfileTable::Snapshot::ReportCallback(const void* ptr,
663                                                 AllocValue* v,
664                                                 ReportState* state) {
665   Entry* e = &state->buckets_[v->bucket()]; // Creates empty Entry first time
666   e->bucket = v->bucket();
667   e->count++;
668   e->bytes += v->bytes;
669 }
670 
ReportLeaks(const char * checker_name,const char * filename,bool should_symbolize)671 void HeapProfileTable::Snapshot::ReportLeaks(const char* checker_name,
672                                              const char* filename,
673                                              bool should_symbolize) {
674   // This is only used by the heap leak checker, but is intimately
675   // tied to the allocation map that belongs in this module and is
676   // therefore placed here.
677   RAW_LOG(ERROR, "Leak check %s detected leaks of %" PRIuS " bytes "
678           "in %" PRIuS " objects",
679           checker_name,
680           size_t(total_.alloc_size),
681           size_t(total_.allocs));
682 
683   // Group objects by Bucket
684   ReportState state;
685   map_.Iterate(&ReportCallback, &state);
686 
687   // Sort buckets by decreasing leaked size
688   const int n = state.buckets_.size();
689   Entry* entries = new Entry[n];
690   int dst = 0;
691   for (map<Bucket*,Entry>::const_iterator iter = state.buckets_.begin();
692        iter != state.buckets_.end();
693        ++iter) {
694     entries[dst++] = iter->second;
695   }
696   sort(entries, entries + n);
697 
698   // Report a bounded number of leaks to keep the leak report from
699   // growing too long.
700   const int to_report =
701       (FLAGS_heap_check_max_leaks > 0 &&
702        n > FLAGS_heap_check_max_leaks) ? FLAGS_heap_check_max_leaks : n;
703   RAW_LOG(ERROR, "The %d largest leaks:", to_report);
704 
705   // Print
706   SymbolTable symbolization_table;
707   for (int i = 0; i < to_report; i++) {
708     const Entry& e = entries[i];
709     for (int j = 0; j < e.bucket->depth; j++) {
710       symbolization_table.Add(e.bucket->stack[j]);
711     }
712   }
713   static const int kBufSize = 2<<10;
714   char buffer[kBufSize];
715   if (should_symbolize)
716     symbolization_table.Symbolize();
717   for (int i = 0; i < to_report; i++) {
718     const Entry& e = entries[i];
719     base::RawPrinter printer(buffer, kBufSize);
720     printer.Printf("Leak of %d bytes in %d objects allocated from:\n",
721                    e.bytes, e.count);
722     for (int j = 0; j < e.bucket->depth; j++) {
723       const void* pc = e.bucket->stack[j];
724       printer.Printf("\t@ %" PRIxPTR " %s\n",
725           reinterpret_cast<uintptr_t>(pc), symbolization_table.GetSymbol(pc));
726     }
727     RAW_LOG(ERROR, "%s", buffer);
728   }
729 
730   if (to_report < n) {
731     RAW_LOG(ERROR, "Skipping leaks numbered %d..%d",
732             to_report, n-1);
733   }
734   delete[] entries;
735 
736   // TODO: Dump the sorted Entry list instead of dumping raw data?
737   // (should be much shorter)
738   if (!HeapProfileTable::WriteProfile(filename, total_, &map_)) {
739     RAW_LOG(ERROR, "Could not write pprof profile to %s", filename);
740   }
741 }
742 
ReportObject(const void * ptr,AllocValue * v,char * unused)743 void HeapProfileTable::Snapshot::ReportObject(const void* ptr,
744                                               AllocValue* v,
745                                               char* unused) {
746   // Perhaps also log the allocation stack trace (unsymbolized)
747   // on this line in case somebody finds it useful.
748   RAW_LOG(ERROR, "leaked %" PRIuS " byte object %p", v->bytes, ptr);
749 }
750 
ReportIndividualObjects()751 void HeapProfileTable::Snapshot::ReportIndividualObjects() {
752   char unused;
753   map_.Iterate(ReportObject, &unused);
754 }
755