1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // ---
6 // Author: Sainbayar Sukhbaatar
7 // Dai Mikurube
8 //
9
10 #include "deep-heap-profile.h"
11
12 #ifdef USE_DEEP_HEAP_PROFILE
13 #include <algorithm>
14 #include <fcntl.h>
15 #include <sys/stat.h>
16 #include <sys/types.h>
17 #include <time.h>
18 #ifdef HAVE_UNISTD_H
19 #include <unistd.h> // for getpagesize and getpid
20 #endif // HAVE_UNISTD_H
21
22 #if defined(__linux__)
23 #include <endian.h>
24 #if !defined(__LITTLE_ENDIAN__) and !defined(__BIG_ENDIAN__)
25 #if __BYTE_ORDER == __BIG_ENDIAN
26 #define __BIG_ENDIAN__
27 #endif // __BYTE_ORDER == __BIG_ENDIAN
28 #endif // !defined(__LITTLE_ENDIAN__) and !defined(__BIG_ENDIAN__)
29 #if defined(__BIG_ENDIAN__)
30 #include <byteswap.h>
31 #endif // defined(__BIG_ENDIAN__)
32 #endif // defined(__linux__)
33 #if defined(COMPILER_MSVC)
34 #include <Winsock2.h> // for gethostname
35 #endif // defined(COMPILER_MSVC)
36
37 #include "base/cycleclock.h"
38 #include "base/sysinfo.h"
39 #include "internal_logging.h" // for ASSERT, etc
40
41 static const int kProfilerBufferSize = 1 << 20;
42 static const int kHashTableSize = 179999; // Same as heap-profile-table.cc.
43
44 static const int PAGEMAP_BYTES = 8;
45 static const int KPAGECOUNT_BYTES = 8;
46 static const uint64 MAX_ADDRESS = kuint64max;
47
48 // Tag strings in heap profile dumps.
49 static const char kProfileHeader[] = "heap profile: ";
50 static const char kProfileVersion[] = "DUMP_DEEP_6";
51 static const char kMetaInformationHeader[] = "META:\n";
52 static const char kMMapListHeader[] = "MMAP_LIST:\n";
53 static const char kGlobalStatsHeader[] = "GLOBAL_STATS:\n";
54 static const char kStacktraceHeader[] = "STACKTRACES:\n";
55 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n";
56
57 static const char kVirtualLabel[] = "virtual";
58 static const char kCommittedLabel[] = "committed";
59
60 #if defined(__linux__)
61 #define OS_NAME "linux"
62 #elif defined(_WIN32) || defined(_WIN64)
63 #define OS_NAME "windows"
64 #else
65 #define OS_NAME "unknown-os"
66 #endif
67
AppendCommandLine(TextBuffer * buffer)68 bool DeepHeapProfile::AppendCommandLine(TextBuffer* buffer) {
69 #if defined(__linux__)
70 RawFD fd;
71 char filename[100];
72 char cmdline[4096];
73 snprintf(filename, sizeof(filename), "/proc/%d/cmdline",
74 static_cast<int>(getpid()));
75 fd = open(filename, O_RDONLY);
76 if (fd == kIllegalRawFD) {
77 RAW_LOG(0, "Failed to open /proc/self/cmdline");
78 return false;
79 }
80
81 size_t length = read(fd, cmdline, sizeof(cmdline) - 1);
82 close(fd);
83
84 for (int i = 0; i < length; ++i)
85 if (cmdline[i] == '\0')
86 cmdline[i] = ' ';
87 cmdline[length] = '\0';
88
89 buffer->AppendString("CommandLine: ", 0);
90 buffer->AppendString(cmdline, 0);
91 buffer->AppendChar('\n');
92
93 return true;
94 #else
95 return false;
96 #endif
97 }
98
99 #if defined(_WIN32) || defined(_WIN64)
100
101 // TODO(peria): Implement this function.
Initialize()102 void DeepHeapProfile::MemoryInfoGetterWindows::Initialize() {
103 }
104
105 // TODO(peria): Implement this function.
CommittedSize(uint64 first_address,uint64 last_address,TextBuffer * buffer) const106 size_t DeepHeapProfile::MemoryInfoGetterWindows::CommittedSize(
107 uint64 first_address,
108 uint64 last_address,
109 TextBuffer* buffer) const {
110 return 0;
111 }
112
113 // TODO(peria): Implement this function.
IsPageCountAvailable() const114 bool DeepHeapProfile::MemoryInfoGetterWindows::IsPageCountAvailable() const {
115 return false;
116 }
117
118 #endif // defined(_WIN32) || defined(_WIN64)
119
120 #if defined(__linux__)
121
Initialize()122 void DeepHeapProfile::MemoryInfoGetterLinux::Initialize() {
123 char filename[100];
124 snprintf(filename, sizeof(filename), "/proc/%d/pagemap",
125 static_cast<int>(getpid()));
126 pagemap_fd_ = open(filename, O_RDONLY);
127 RAW_CHECK(pagemap_fd_ != -1, "Failed to open /proc/self/pagemap");
128
129 if (pageframe_type_ == DUMP_PAGECOUNT) {
130 snprintf(filename, sizeof(filename), "/proc/kpagecount",
131 static_cast<int>(getpid()));
132 kpagecount_fd_ = open(filename, O_RDONLY);
133 if (kpagecount_fd_ == -1)
134 RAW_LOG(0, "Failed to open /proc/kpagecount");
135 }
136 }
137
CommittedSize(uint64 first_address,uint64 last_address,DeepHeapProfile::TextBuffer * buffer) const138 size_t DeepHeapProfile::MemoryInfoGetterLinux::CommittedSize(
139 uint64 first_address,
140 uint64 last_address,
141 DeepHeapProfile::TextBuffer* buffer) const {
142 int page_size = getpagesize();
143 uint64 page_address = (first_address / page_size) * page_size;
144 size_t committed_size = 0;
145 size_t pageframe_list_length = 0;
146
147 Seek(first_address);
148
149 // Check every page on which the allocation resides.
150 while (page_address <= last_address) {
151 // Read corresponding physical page.
152 State state;
153 // TODO(dmikurube): Read pagemap in bulk for speed.
154 // TODO(dmikurube): Consider using mincore(2).
155 if (Read(&state, pageframe_type_ != DUMP_NO_PAGEFRAME) == false) {
156 // We can't read the last region (e.g vsyscall).
157 #ifndef NDEBUG
158 RAW_LOG(0, "pagemap read failed @ %#llx %" PRId64 " bytes",
159 first_address, last_address - first_address + 1);
160 #endif
161 return 0;
162 }
163
164 // Dump pageframes of resident pages. Non-resident pages are just skipped.
165 if (pageframe_type_ != DUMP_NO_PAGEFRAME &&
166 buffer != NULL && state.pfn != 0) {
167 if (pageframe_list_length == 0) {
168 buffer->AppendString(" PF:", 0);
169 pageframe_list_length = 5;
170 }
171 buffer->AppendChar(' ');
172 if (page_address < first_address)
173 buffer->AppendChar('<');
174 buffer->AppendBase64(state.pfn, 4);
175 pageframe_list_length += 5;
176 if (pageframe_type_ == DUMP_PAGECOUNT && IsPageCountAvailable()) {
177 uint64 pagecount = ReadPageCount(state.pfn);
178 // Assume pagecount == 63 if the pageframe is mapped more than 63 times.
179 if (pagecount > 63)
180 pagecount = 63;
181 buffer->AppendChar('#');
182 buffer->AppendBase64(pagecount, 1);
183 pageframe_list_length += 2;
184 }
185 if (last_address < page_address - 1 + page_size)
186 buffer->AppendChar('>');
187 // Begins a new line every 94 characters.
188 if (pageframe_list_length > 94) {
189 buffer->AppendChar('\n');
190 pageframe_list_length = 0;
191 }
192 }
193
194 if (state.is_committed) {
195 // Calculate the size of the allocation part in this page.
196 size_t bytes = page_size;
197
198 // If looking at the last page in a given region.
199 if (last_address <= page_address - 1 + page_size) {
200 bytes = last_address - page_address + 1;
201 }
202
203 // If looking at the first page in a given region.
204 if (page_address < first_address) {
205 bytes -= first_address - page_address;
206 }
207
208 committed_size += bytes;
209 }
210 if (page_address > MAX_ADDRESS - page_size) {
211 break;
212 }
213 page_address += page_size;
214 }
215
216 if (pageframe_type_ != DUMP_NO_PAGEFRAME &&
217 buffer != NULL && pageframe_list_length != 0) {
218 buffer->AppendChar('\n');
219 }
220
221 return committed_size;
222 }
223
ReadPageCount(uint64 pfn) const224 uint64 DeepHeapProfile::MemoryInfoGetterLinux::ReadPageCount(uint64 pfn) const {
225 int64 index = pfn * KPAGECOUNT_BYTES;
226 int64 offset = lseek64(kpagecount_fd_, index, SEEK_SET);
227 RAW_DCHECK(offset == index, "Failed in seeking in kpagecount.");
228
229 uint64 kpagecount_value;
230 int result = read(kpagecount_fd_, &kpagecount_value, KPAGECOUNT_BYTES);
231 if (result != KPAGECOUNT_BYTES)
232 return 0;
233
234 return kpagecount_value;
235 }
236
Seek(uint64 address) const237 bool DeepHeapProfile::MemoryInfoGetterLinux::Seek(uint64 address) const {
238 int64 index = (address / getpagesize()) * PAGEMAP_BYTES;
239 RAW_DCHECK(pagemap_fd_ != -1, "Failed to seek in /proc/self/pagemap");
240 int64 offset = lseek64(pagemap_fd_, index, SEEK_SET);
241 RAW_DCHECK(offset == index, "Failed in seeking.");
242 return offset >= 0;
243 }
244
Read(State * state,bool get_pfn) const245 bool DeepHeapProfile::MemoryInfoGetterLinux::Read(
246 State* state, bool get_pfn) const {
247 static const uint64 U64_1 = 1;
248 static const uint64 PFN_FILTER = (U64_1 << 55) - U64_1;
249 static const uint64 PAGE_PRESENT = U64_1 << 63;
250 static const uint64 PAGE_SWAP = U64_1 << 62;
251 static const uint64 PAGE_RESERVED = U64_1 << 61;
252 static const uint64 FLAG_NOPAGE = U64_1 << 20;
253 static const uint64 FLAG_KSM = U64_1 << 21;
254 static const uint64 FLAG_MMAP = U64_1 << 11;
255
256 uint64 pagemap_value;
257 RAW_DCHECK(pagemap_fd_ != -1, "Failed to read from /proc/self/pagemap");
258 int result = read(pagemap_fd_, &pagemap_value, PAGEMAP_BYTES);
259 if (result != PAGEMAP_BYTES) {
260 return false;
261 }
262
263 // Check if the page is committed.
264 state->is_committed = (pagemap_value & (PAGE_PRESENT | PAGE_SWAP));
265
266 state->is_present = (pagemap_value & PAGE_PRESENT);
267 state->is_swapped = (pagemap_value & PAGE_SWAP);
268 state->is_shared = false;
269
270 if (get_pfn && state->is_present && !state->is_swapped)
271 state->pfn = (pagemap_value & PFN_FILTER);
272 else
273 state->pfn = 0;
274
275 return true;
276 }
277
IsPageCountAvailable() const278 bool DeepHeapProfile::MemoryInfoGetterLinux::IsPageCountAvailable() const {
279 return kpagecount_fd_ != -1;
280 }
281
282 #endif // defined(__linux__)
283
284 DeepHeapProfile::MemoryResidenceInfoGetterInterface::
MemoryResidenceInfoGetterInterface()285 MemoryResidenceInfoGetterInterface() {}
286
287 DeepHeapProfile::MemoryResidenceInfoGetterInterface::
~MemoryResidenceInfoGetterInterface()288 ~MemoryResidenceInfoGetterInterface() {}
289
290 DeepHeapProfile::MemoryResidenceInfoGetterInterface*
Create(PageFrameType pageframe_type)291 DeepHeapProfile::MemoryResidenceInfoGetterInterface::Create(
292 PageFrameType pageframe_type) {
293 #if defined(_WIN32) || defined(_WIN64)
294 return new MemoryInfoGetterWindows(pageframe_type);
295 #elif defined(__linux__)
296 return new MemoryInfoGetterLinux(pageframe_type);
297 #else
298 return NULL;
299 #endif
300 }
301
DeepHeapProfile(HeapProfileTable * heap_profile,const char * prefix,enum PageFrameType pageframe_type)302 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile,
303 const char* prefix,
304 enum PageFrameType pageframe_type)
305 : memory_residence_info_getter_(
306 MemoryResidenceInfoGetterInterface::Create(pageframe_type)),
307 most_recent_pid_(-1),
308 stats_(),
309 dump_count_(0),
310 filename_prefix_(NULL),
311 deep_table_(kHashTableSize, heap_profile->alloc_, heap_profile->dealloc_),
312 pageframe_type_(pageframe_type),
313 heap_profile_(heap_profile) {
314 // Copy filename prefix.
315 const int prefix_length = strlen(prefix);
316 filename_prefix_ =
317 reinterpret_cast<char*>(heap_profile_->alloc_(prefix_length + 1));
318 memcpy(filename_prefix_, prefix, prefix_length);
319 filename_prefix_[prefix_length] = '\0';
320
321 strncpy(run_id_, "undetermined-run-id", sizeof(run_id_));
322 }
323
~DeepHeapProfile()324 DeepHeapProfile::~DeepHeapProfile() {
325 heap_profile_->dealloc_(filename_prefix_);
326 delete memory_residence_info_getter_;
327 }
328
329 // Global malloc() should not be used in this function.
330 // Use LowLevelAlloc if required.
DumpOrderedProfile(const char * reason,char raw_buffer[],int buffer_size,RawFD fd)331 void DeepHeapProfile::DumpOrderedProfile(const char* reason,
332 char raw_buffer[],
333 int buffer_size,
334 RawFD fd) {
335 TextBuffer buffer(raw_buffer, buffer_size, fd);
336
337 #ifndef NDEBUG
338 int64 starting_cycles = CycleClock::Now();
339 #endif
340
341 // Get the time before starting snapshot.
342 // TODO(dmikurube): Consider gettimeofday if available.
343 time_t time_value = time(NULL);
344
345 ++dump_count_;
346
347 // Re-open files in /proc/pid/ if the process is newly forked one.
348 if (most_recent_pid_ != getpid()) {
349 char hostname[64];
350 if (0 == gethostname(hostname, sizeof(hostname))) {
351 char* dot = strchr(hostname, '.');
352 if (dot != NULL)
353 *dot = '\0';
354 } else {
355 strcpy(hostname, "unknown");
356 }
357
358 most_recent_pid_ = getpid();
359
360 snprintf(run_id_, sizeof(run_id_), "%s-" OS_NAME "-%d-%lu",
361 hostname, most_recent_pid_, time(NULL));
362
363 if (memory_residence_info_getter_)
364 memory_residence_info_getter_->Initialize();
365 deep_table_.ResetIsLogged();
366
367 // Write maps into "|filename_prefix_|.<pid>.maps".
368 WriteProcMaps(filename_prefix_, raw_buffer, buffer_size);
369 }
370
371 // Reset committed sizes of buckets.
372 deep_table_.ResetCommittedSize();
373
374 // Record committed sizes.
375 stats_.SnapshotAllocations(this);
376
377 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf.
378 // glibc's snprintf internally allocates memory by alloca normally, but it
379 // allocates memory by malloc if large memory is required.
380
381 buffer.AppendString(kProfileHeader, 0);
382 buffer.AppendString(kProfileVersion, 0);
383 buffer.AppendString("\n", 0);
384
385 // Fill buffer with meta information.
386 buffer.AppendString(kMetaInformationHeader, 0);
387
388 buffer.AppendString("Time: ", 0);
389 buffer.AppendUnsignedLong(time_value, 0);
390 buffer.AppendChar('\n');
391
392 if (reason != NULL) {
393 buffer.AppendString("Reason: ", 0);
394 buffer.AppendString(reason, 0);
395 buffer.AppendChar('\n');
396 }
397
398 AppendCommandLine(&buffer);
399
400 buffer.AppendString("RunID: ", 0);
401 buffer.AppendString(run_id_, 0);
402 buffer.AppendChar('\n');
403
404 buffer.AppendString("PageSize: ", 0);
405 buffer.AppendInt(getpagesize(), 0, 0);
406 buffer.AppendChar('\n');
407
408 // Assumes the physical memory <= 64GB (PFN < 2^24).
409 if (pageframe_type_ == DUMP_PAGECOUNT && memory_residence_info_getter_ &&
410 memory_residence_info_getter_->IsPageCountAvailable()) {
411 buffer.AppendString("PageFrame: 24,Base64,PageCount", 0);
412 buffer.AppendChar('\n');
413 } else if (pageframe_type_ != DUMP_NO_PAGEFRAME) {
414 buffer.AppendString("PageFrame: 24,Base64", 0);
415 buffer.AppendChar('\n');
416 }
417
418 // Fill buffer with the global stats.
419 buffer.AppendString(kMMapListHeader, 0);
420
421 stats_.SnapshotMaps(memory_residence_info_getter_, this, &buffer);
422
423 // Fill buffer with the global stats.
424 buffer.AppendString(kGlobalStatsHeader, 0);
425
426 stats_.Unparse(&buffer);
427
428 buffer.AppendString(kStacktraceHeader, 0);
429 buffer.AppendString(kVirtualLabel, 10);
430 buffer.AppendChar(' ');
431 buffer.AppendString(kCommittedLabel, 10);
432 buffer.AppendString("\n", 0);
433
434 // Fill buffer.
435 deep_table_.UnparseForStats(&buffer);
436
437 buffer.Flush();
438
439 // Write the bucket listing into a .bucket file.
440 deep_table_.WriteForBucketFile(
441 filename_prefix_, dump_count_, raw_buffer, buffer_size);
442
443 #ifndef NDEBUG
444 int64 elapsed_cycles = CycleClock::Now() - starting_cycles;
445 double elapsed_seconds = elapsed_cycles / CyclesPerSecond();
446 RAW_LOG(0, "Time spent on DeepProfiler: %.3f sec\n", elapsed_seconds);
447 #endif
448 }
449
Size()450 int DeepHeapProfile::TextBuffer::Size() {
451 return size_;
452 }
453
FilledBytes()454 int DeepHeapProfile::TextBuffer::FilledBytes() {
455 return cursor_;
456 }
457
Clear()458 void DeepHeapProfile::TextBuffer::Clear() {
459 cursor_ = 0;
460 }
461
Flush()462 void DeepHeapProfile::TextBuffer::Flush() {
463 RawWrite(fd_, buffer_, cursor_);
464 cursor_ = 0;
465 }
466
467 // TODO(dmikurube): These Append* functions should not use snprintf.
AppendChar(char value)468 bool DeepHeapProfile::TextBuffer::AppendChar(char value) {
469 return ForwardCursor(snprintf(buffer_ + cursor_, size_ - cursor_,
470 "%c", value));
471 }
472
AppendString(const char * value,int width)473 bool DeepHeapProfile::TextBuffer::AppendString(const char* value, int width) {
474 char* position = buffer_ + cursor_;
475 int available = size_ - cursor_;
476 int appended;
477 if (width == 0)
478 appended = snprintf(position, available, "%s", value);
479 else
480 appended = snprintf(position, available, "%*s",
481 width, value);
482 return ForwardCursor(appended);
483 }
484
AppendInt(int value,int width,bool leading_zero)485 bool DeepHeapProfile::TextBuffer::AppendInt(int value, int width,
486 bool leading_zero) {
487 char* position = buffer_ + cursor_;
488 int available = size_ - cursor_;
489 int appended;
490 if (width == 0)
491 appended = snprintf(position, available, "%d", value);
492 else if (leading_zero)
493 appended = snprintf(position, available, "%0*d", width, value);
494 else
495 appended = snprintf(position, available, "%*d", width, value);
496 return ForwardCursor(appended);
497 }
498
AppendLong(long value,int width)499 bool DeepHeapProfile::TextBuffer::AppendLong(long value, int width) {
500 char* position = buffer_ + cursor_;
501 int available = size_ - cursor_;
502 int appended;
503 if (width == 0)
504 appended = snprintf(position, available, "%ld", value);
505 else
506 appended = snprintf(position, available, "%*ld", width, value);
507 return ForwardCursor(appended);
508 }
509
AppendUnsignedLong(unsigned long value,int width)510 bool DeepHeapProfile::TextBuffer::AppendUnsignedLong(unsigned long value,
511 int width) {
512 char* position = buffer_ + cursor_;
513 int available = size_ - cursor_;
514 int appended;
515 if (width == 0)
516 appended = snprintf(position, available, "%lu", value);
517 else
518 appended = snprintf(position, available, "%*lu", width, value);
519 return ForwardCursor(appended);
520 }
521
AppendInt64(int64 value,int width)522 bool DeepHeapProfile::TextBuffer::AppendInt64(int64 value, int width) {
523 char* position = buffer_ + cursor_;
524 int available = size_ - cursor_;
525 int appended;
526 if (width == 0)
527 appended = snprintf(position, available, "%" PRId64, value);
528 else
529 appended = snprintf(position, available, "%*" PRId64, width, value);
530 return ForwardCursor(appended);
531 }
532
AppendPtr(uint64 value,int width)533 bool DeepHeapProfile::TextBuffer::AppendPtr(uint64 value, int width) {
534 char* position = buffer_ + cursor_;
535 int available = size_ - cursor_;
536 int appended;
537 if (width == 0)
538 appended = snprintf(position, available, "%" PRIx64, value);
539 else
540 appended = snprintf(position, available, "%0*" PRIx64, width, value);
541 return ForwardCursor(appended);
542 }
543
AppendBase64(uint64 value,int width)544 bool DeepHeapProfile::TextBuffer::AppendBase64(uint64 value, int width) {
545 static const char base64[65] =
546 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
547 #if defined(__BIG_ENDIAN__)
548 value = bswap_64(value);
549 #endif
550 for (int shift = (width - 1) * 6; shift >= 0; shift -= 6) {
551 if (!AppendChar(base64[(value >> shift) & 0x3f]))
552 return false;
553 }
554 return true;
555 }
556
ForwardCursor(int appended)557 bool DeepHeapProfile::TextBuffer::ForwardCursor(int appended) {
558 if (appended < 0 || appended >= size_ - cursor_)
559 return false;
560 cursor_ += appended;
561 if (cursor_ > size_ * 4 / 5)
562 Flush();
563 return true;
564 }
565
UnparseForStats(TextBuffer * buffer)566 void DeepHeapProfile::DeepBucket::UnparseForStats(TextBuffer* buffer) {
567 buffer->AppendInt64(bucket->alloc_size - bucket->free_size, 10);
568 buffer->AppendChar(' ');
569 buffer->AppendInt64(committed_size, 10);
570 buffer->AppendChar(' ');
571 buffer->AppendInt(bucket->allocs, 6, false);
572 buffer->AppendChar(' ');
573 buffer->AppendInt(bucket->frees, 6, false);
574 buffer->AppendString(" @ ", 0);
575 buffer->AppendInt(id, 0, false);
576 buffer->AppendString("\n", 0);
577 }
578
UnparseForBucketFile(TextBuffer * buffer)579 void DeepHeapProfile::DeepBucket::UnparseForBucketFile(TextBuffer* buffer) {
580 buffer->AppendInt(id, 0, false);
581 buffer->AppendChar(' ');
582 buffer->AppendString(is_mmap ? "mmap" : "malloc", 0);
583
584 #if defined(TYPE_PROFILING)
585 buffer->AppendString(" t0x", 0);
586 buffer->AppendPtr(reinterpret_cast<uintptr_t>(type), 0);
587 if (type == NULL) {
588 buffer->AppendString(" nno_typeinfo", 0);
589 } else {
590 buffer->AppendString(" n", 0);
591 buffer->AppendString(type->name(), 0);
592 }
593 #endif
594
595 for (int depth = 0; depth < bucket->depth; depth++) {
596 buffer->AppendString(" 0x", 0);
597 buffer->AppendPtr(reinterpret_cast<uintptr_t>(bucket->stack[depth]), 8);
598 }
599 buffer->AppendString("\n", 0);
600 }
601
DeepBucketTable(int table_size,HeapProfileTable::Allocator alloc,HeapProfileTable::DeAllocator dealloc)602 DeepHeapProfile::DeepBucketTable::DeepBucketTable(
603 int table_size,
604 HeapProfileTable::Allocator alloc,
605 HeapProfileTable::DeAllocator dealloc)
606 : table_(NULL),
607 table_size_(table_size),
608 alloc_(alloc),
609 dealloc_(dealloc),
610 bucket_id_(0) {
611 const int bytes = table_size * sizeof(DeepBucket*);
612 table_ = reinterpret_cast<DeepBucket**>(alloc(bytes));
613 memset(table_, 0, bytes);
614 }
615
~DeepBucketTable()616 DeepHeapProfile::DeepBucketTable::~DeepBucketTable() {
617 ASSERT(table_ != NULL);
618 for (int db = 0; db < table_size_; db++) {
619 for (DeepBucket* x = table_[db]; x != 0; /**/) {
620 DeepBucket* db = x;
621 x = x->next;
622 dealloc_(db);
623 }
624 }
625 dealloc_(table_);
626 }
627
Lookup(Bucket * bucket,const std::type_info * type,bool is_mmap)628 DeepHeapProfile::DeepBucket* DeepHeapProfile::DeepBucketTable::Lookup(
629 Bucket* bucket,
630 #if defined(TYPE_PROFILING)
631 const std::type_info* type,
632 #endif
633 bool is_mmap) {
634 // Make hash-value
635 uintptr_t h = 0;
636
637 AddToHashValue(reinterpret_cast<uintptr_t>(bucket), &h);
638 if (is_mmap) {
639 AddToHashValue(1, &h);
640 } else {
641 AddToHashValue(0, &h);
642 }
643
644 #if defined(TYPE_PROFILING)
645 if (type == NULL) {
646 AddToHashValue(0, &h);
647 } else {
648 AddToHashValue(reinterpret_cast<uintptr_t>(type->name()), &h);
649 }
650 #endif
651
652 FinishHashValue(&h);
653
654 // Lookup stack trace in table
655 unsigned int buck = ((unsigned int) h) % table_size_;
656 for (DeepBucket* db = table_[buck]; db != 0; db = db->next) {
657 if (db->bucket == bucket) {
658 return db;
659 }
660 }
661
662 // Create a new bucket
663 DeepBucket* db = reinterpret_cast<DeepBucket*>(alloc_(sizeof(DeepBucket)));
664 memset(db, 0, sizeof(*db));
665 db->bucket = bucket;
666 #if defined(TYPE_PROFILING)
667 db->type = type;
668 #endif
669 db->committed_size = 0;
670 db->is_mmap = is_mmap;
671 db->id = (bucket_id_++);
672 db->is_logged = false;
673 db->next = table_[buck];
674 table_[buck] = db;
675 return db;
676 }
677
678 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf.
UnparseForStats(TextBuffer * buffer)679 void DeepHeapProfile::DeepBucketTable::UnparseForStats(TextBuffer* buffer) {
680 for (int i = 0; i < table_size_; i++) {
681 for (DeepBucket* deep_bucket = table_[i];
682 deep_bucket != NULL;
683 deep_bucket = deep_bucket->next) {
684 Bucket* bucket = deep_bucket->bucket;
685 if (bucket->alloc_size - bucket->free_size == 0) {
686 continue; // Skip empty buckets.
687 }
688 deep_bucket->UnparseForStats(buffer);
689 }
690 }
691 }
692
WriteForBucketFile(const char * prefix,int dump_count,char raw_buffer[],int buffer_size)693 void DeepHeapProfile::DeepBucketTable::WriteForBucketFile(
694 const char* prefix, int dump_count, char raw_buffer[], int buffer_size) {
695 char filename[100];
696 snprintf(filename, sizeof(filename),
697 "%s.%05d.%04d.buckets", prefix, getpid(), dump_count);
698 RawFD fd = RawOpenForWriting(filename);
699 RAW_DCHECK(fd != kIllegalRawFD, "");
700
701 TextBuffer buffer(raw_buffer, buffer_size, fd);
702
703 for (int i = 0; i < table_size_; i++) {
704 for (DeepBucket* deep_bucket = table_[i];
705 deep_bucket != NULL;
706 deep_bucket = deep_bucket->next) {
707 Bucket* bucket = deep_bucket->bucket;
708 if (deep_bucket->is_logged) {
709 continue; // Skip the bucket if it is already logged.
710 }
711 if (!deep_bucket->is_mmap &&
712 bucket->alloc_size - bucket->free_size <= 64) {
713 continue; // Skip small malloc buckets.
714 }
715
716 deep_bucket->UnparseForBucketFile(&buffer);
717 deep_bucket->is_logged = true;
718 }
719 }
720
721 buffer.Flush();
722 RawClose(fd);
723 }
724
ResetCommittedSize()725 void DeepHeapProfile::DeepBucketTable::ResetCommittedSize() {
726 for (int i = 0; i < table_size_; i++) {
727 for (DeepBucket* deep_bucket = table_[i];
728 deep_bucket != NULL;
729 deep_bucket = deep_bucket->next) {
730 deep_bucket->committed_size = 0;
731 }
732 }
733 }
734
ResetIsLogged()735 void DeepHeapProfile::DeepBucketTable::ResetIsLogged() {
736 for (int i = 0; i < table_size_; i++) {
737 for (DeepBucket* deep_bucket = table_[i];
738 deep_bucket != NULL;
739 deep_bucket = deep_bucket->next) {
740 deep_bucket->is_logged = false;
741 }
742 }
743 }
744
745 // This hash function is from HeapProfileTable::GetBucket.
746 // static
AddToHashValue(uintptr_t add,uintptr_t * hash_value)747 void DeepHeapProfile::DeepBucketTable::AddToHashValue(
748 uintptr_t add, uintptr_t* hash_value) {
749 *hash_value += add;
750 *hash_value += *hash_value << 10;
751 *hash_value ^= *hash_value >> 6;
752 }
753
754 // This hash function is from HeapProfileTable::GetBucket.
755 // static
FinishHashValue(uintptr_t * hash_value)756 void DeepHeapProfile::DeepBucketTable::FinishHashValue(uintptr_t* hash_value) {
757 *hash_value += *hash_value << 3;
758 *hash_value ^= *hash_value >> 11;
759 }
760
Initialize()761 void DeepHeapProfile::RegionStats::Initialize() {
762 virtual_bytes_ = 0;
763 committed_bytes_ = 0;
764 }
765
Record(const MemoryResidenceInfoGetterInterface * memory_residence_info_getter,uint64 first_address,uint64 last_address,TextBuffer * buffer)766 uint64 DeepHeapProfile::RegionStats::Record(
767 const MemoryResidenceInfoGetterInterface* memory_residence_info_getter,
768 uint64 first_address,
769 uint64 last_address,
770 TextBuffer* buffer) {
771 uint64 committed = 0;
772 virtual_bytes_ += static_cast<size_t>(last_address - first_address + 1);
773 if (memory_residence_info_getter)
774 committed = memory_residence_info_getter->CommittedSize(first_address,
775 last_address,
776 buffer);
777 committed_bytes_ += committed;
778 return committed;
779 }
780
Unparse(const char * name,TextBuffer * buffer)781 void DeepHeapProfile::RegionStats::Unparse(const char* name,
782 TextBuffer* buffer) {
783 buffer->AppendString(name, 25);
784 buffer->AppendChar(' ');
785 buffer->AppendLong(virtual_bytes_, 12);
786 buffer->AppendChar(' ');
787 buffer->AppendLong(committed_bytes_, 12);
788 buffer->AppendString("\n", 0);
789 }
790
791 // Snapshots all virtual memory mapping stats by merging mmap(2) records from
792 // MemoryRegionMap and /proc/maps, the OS-level memory mapping information.
793 // Memory regions described in /proc/maps, but which are not created by mmap,
794 // are accounted as "unhooked" memory regions.
795 //
796 // This function assumes that every memory region created by mmap is covered
797 // by VMA(s) described in /proc/maps except for http://crbug.com/189114.
798 // Note that memory regions created with mmap don't align with borders of VMAs
799 // in /proc/maps. In other words, a memory region by mmap can cut across many
800 // VMAs. Also, of course a VMA can include many memory regions by mmap.
801 // It means that the following situation happens:
802 //
803 // => Virtual address
804 // <----- VMA #1 -----><----- VMA #2 ----->...<----- VMA #3 -----><- VMA #4 ->
805 // ..< mmap #1 >.<- mmap #2 -><- mmap #3 ->...<- mmap #4 ->..<-- mmap #5 -->..
806 //
807 // It can happen easily as permission can be changed by mprotect(2) for a part
808 // of a memory region. A change in permission splits VMA(s).
809 //
810 // To deal with the situation, this function iterates over MemoryRegionMap and
811 // /proc/maps independently. The iterator for MemoryRegionMap is initialized
812 // at the top outside the loop for /proc/maps, and it goes forward inside the
813 // loop while comparing their addresses.
814 //
815 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf.
SnapshotMaps(const MemoryResidenceInfoGetterInterface * memory_residence_info_getter,DeepHeapProfile * deep_profile,TextBuffer * mmap_dump_buffer)816 void DeepHeapProfile::GlobalStats::SnapshotMaps(
817 const MemoryResidenceInfoGetterInterface* memory_residence_info_getter,
818 DeepHeapProfile* deep_profile,
819 TextBuffer* mmap_dump_buffer) {
820 MemoryRegionMap::LockHolder lock_holder;
821 ProcMapsIterator::Buffer procmaps_iter_buffer;
822 ProcMapsIterator procmaps_iter(0, &procmaps_iter_buffer);
823 uint64 vma_start_addr, vma_last_addr, offset;
824 int64 inode;
825 char* flags;
826 char* filename;
827 enum MapsRegionType type;
828
829 for (int i = 0; i < NUMBER_OF_MAPS_REGION_TYPES; ++i) {
830 all_[i].Initialize();
831 unhooked_[i].Initialize();
832 }
833 profiled_mmap_.Initialize();
834
835 MemoryRegionMap::RegionIterator mmap_iter =
836 MemoryRegionMap::BeginRegionLocked();
837 DeepBucket* deep_bucket = NULL;
838 if (mmap_iter != MemoryRegionMap::EndRegionLocked()) {
839 deep_bucket = GetInformationOfMemoryRegion(
840 mmap_iter, memory_residence_info_getter, deep_profile);
841 }
842
843 while (procmaps_iter.Next(&vma_start_addr, &vma_last_addr,
844 &flags, &offset, &inode, &filename)) {
845 if (mmap_dump_buffer) {
846 char buffer[1024];
847 int written = procmaps_iter.FormatLine(buffer, sizeof(buffer),
848 vma_start_addr, vma_last_addr,
849 flags, offset, inode, filename, 0);
850 mmap_dump_buffer->AppendString(buffer, 0);
851 }
852
853 // 'vma_last_addr' should be the last inclusive address of the region.
854 vma_last_addr -= 1;
855 if (strcmp("[vsyscall]", filename) == 0) {
856 continue; // Reading pagemap will fail in [vsyscall].
857 }
858
859 // TODO(dmikurube): |type| will be deprecated in the dump.
860 // See http://crbug.com/245603.
861 type = ABSENT;
862 if (filename[0] == '/') {
863 if (flags[2] == 'x')
864 type = FILE_EXEC;
865 else
866 type = FILE_NONEXEC;
867 } else if (filename[0] == '\0' || filename[0] == '\n') {
868 type = ANONYMOUS;
869 } else if (strcmp(filename, "[stack]") == 0) {
870 type = STACK;
871 } else {
872 type = OTHER;
873 }
874 // TODO(dmikurube): This |all_| count should be removed in future soon.
875 // See http://crbug.com/245603.
876 uint64 vma_total = all_[type].Record(
877 memory_residence_info_getter, vma_start_addr, vma_last_addr, NULL);
878 uint64 vma_subtotal = 0;
879
880 // TODO(dmikurube): Stop double-counting pagemap.
881 // It will be fixed when http://crbug.com/245603 finishes.
882 if (MemoryRegionMap::IsRecordingLocked()) {
883 uint64 cursor = vma_start_addr;
884 bool first = true;
885
886 // Iterates over MemoryRegionMap until the iterator moves out of the VMA.
887 do {
888 if (!first) {
889 cursor = mmap_iter->end_addr;
890 ++mmap_iter;
891 // Don't break here even if mmap_iter == EndRegionLocked().
892
893 if (mmap_iter != MemoryRegionMap::EndRegionLocked()) {
894 deep_bucket = GetInformationOfMemoryRegion(
895 mmap_iter, memory_residence_info_getter, deep_profile);
896 }
897 }
898 first = false;
899
900 uint64 last_address_of_unhooked;
901 // If the next mmap entry is away from the current VMA.
902 if (mmap_iter == MemoryRegionMap::EndRegionLocked() ||
903 mmap_iter->start_addr > vma_last_addr) {
904 last_address_of_unhooked = vma_last_addr;
905 } else {
906 last_address_of_unhooked = mmap_iter->start_addr - 1;
907 }
908
909 if (last_address_of_unhooked + 1 > cursor) {
910 RAW_CHECK(cursor >= vma_start_addr,
911 "Wrong calculation for unhooked");
912 RAW_CHECK(last_address_of_unhooked <= vma_last_addr,
913 "Wrong calculation for unhooked");
914 uint64 committed_size = unhooked_[type].Record(
915 memory_residence_info_getter,
916 cursor,
917 last_address_of_unhooked,
918 mmap_dump_buffer);
919 vma_subtotal += committed_size;
920 if (mmap_dump_buffer) {
921 mmap_dump_buffer->AppendString(" ", 0);
922 mmap_dump_buffer->AppendPtr(cursor, 0);
923 mmap_dump_buffer->AppendString(" - ", 0);
924 mmap_dump_buffer->AppendPtr(last_address_of_unhooked + 1, 0);
925 mmap_dump_buffer->AppendString(" unhooked ", 0);
926 mmap_dump_buffer->AppendInt64(committed_size, 0);
927 mmap_dump_buffer->AppendString(" / ", 0);
928 mmap_dump_buffer->AppendInt64(
929 last_address_of_unhooked - cursor + 1, 0);
930 mmap_dump_buffer->AppendString("\n", 0);
931 }
932 cursor = last_address_of_unhooked + 1;
933 }
934
935 if (mmap_iter != MemoryRegionMap::EndRegionLocked() &&
936 mmap_iter->start_addr <= vma_last_addr &&
937 mmap_dump_buffer) {
938 bool trailing = mmap_iter->start_addr < vma_start_addr;
939 bool continued = mmap_iter->end_addr - 1 > vma_last_addr;
940 uint64 partial_first_address, partial_last_address;
941 if (trailing)
942 partial_first_address = vma_start_addr;
943 else
944 partial_first_address = mmap_iter->start_addr;
945 if (continued)
946 partial_last_address = vma_last_addr;
947 else
948 partial_last_address = mmap_iter->end_addr - 1;
949 uint64 committed_size = 0;
950 if (memory_residence_info_getter)
951 committed_size = memory_residence_info_getter->CommittedSize(
952 partial_first_address, partial_last_address, mmap_dump_buffer);
953 vma_subtotal += committed_size;
954 mmap_dump_buffer->AppendString(trailing ? " (" : " ", 0);
955 mmap_dump_buffer->AppendPtr(mmap_iter->start_addr, 0);
956 mmap_dump_buffer->AppendString(trailing ? ")" : " ", 0);
957 mmap_dump_buffer->AppendString("-", 0);
958 mmap_dump_buffer->AppendString(continued ? "(" : " ", 0);
959 mmap_dump_buffer->AppendPtr(mmap_iter->end_addr, 0);
960 mmap_dump_buffer->AppendString(continued ? ")" : " ", 0);
961 mmap_dump_buffer->AppendString(" hooked ", 0);
962 mmap_dump_buffer->AppendInt64(committed_size, 0);
963 mmap_dump_buffer->AppendString(" / ", 0);
964 mmap_dump_buffer->AppendInt64(
965 partial_last_address - partial_first_address + 1, 0);
966 mmap_dump_buffer->AppendString(" @ ", 0);
967 if (deep_bucket != NULL) {
968 mmap_dump_buffer->AppendInt(deep_bucket->id, 0, false);
969 } else {
970 mmap_dump_buffer->AppendInt(0, 0, false);
971 }
972 mmap_dump_buffer->AppendString("\n", 0);
973 }
974 } while (mmap_iter != MemoryRegionMap::EndRegionLocked() &&
975 mmap_iter->end_addr - 1 <= vma_last_addr);
976 }
977
978 if (vma_total != vma_subtotal) {
979 char buffer[1024];
980 int written = procmaps_iter.FormatLine(buffer, sizeof(buffer),
981 vma_start_addr, vma_last_addr,
982 flags, offset, inode, filename, 0);
983 RAW_LOG(0, "[%d] Mismatched total in VMA %" PRId64 ":"
984 "%" PRId64 " (%" PRId64 ")",
985 getpid(), vma_total, vma_subtotal, vma_total - vma_subtotal);
986 RAW_LOG(0, "[%d] in %s", getpid(), buffer);
987 }
988 }
989
990 // TODO(dmikurube): Investigate and fix http://crbug.com/189114.
991 //
992 // The total committed memory usage in all_ (from /proc/<pid>/maps) is
993 // sometimes smaller than the sum of the committed mmap'ed addresses and
994 // unhooked regions. Within our observation, the difference was only 4KB
995 // in committed usage, zero in reserved virtual addresses
996 //
997 // A guess is that an uncommitted (but reserved) page may become committed
998 // during counting memory usage in the loop above.
999 //
1000 // The difference is accounted as "ABSENT" to investigate such cases.
1001 //
1002 // It will be fixed when http://crbug.com/245603 finishes (no double count).
1003
1004 RegionStats all_total;
1005 RegionStats unhooked_total;
1006 for (int i = 0; i < NUMBER_OF_MAPS_REGION_TYPES; ++i) {
1007 all_total.AddAnotherRegionStat(all_[i]);
1008 unhooked_total.AddAnotherRegionStat(unhooked_[i]);
1009 }
1010
1011 size_t absent_virtual = profiled_mmap_.virtual_bytes() +
1012 unhooked_total.virtual_bytes() -
1013 all_total.virtual_bytes();
1014 if (absent_virtual > 0)
1015 all_[ABSENT].AddToVirtualBytes(absent_virtual);
1016
1017 size_t absent_committed = profiled_mmap_.committed_bytes() +
1018 unhooked_total.committed_bytes() -
1019 all_total.committed_bytes();
1020 if (absent_committed > 0)
1021 all_[ABSENT].AddToCommittedBytes(absent_committed);
1022 }
1023
SnapshotAllocations(DeepHeapProfile * deep_profile)1024 void DeepHeapProfile::GlobalStats::SnapshotAllocations(
1025 DeepHeapProfile* deep_profile) {
1026 profiled_malloc_.Initialize();
1027
1028 deep_profile->heap_profile_->address_map_->Iterate(RecordAlloc, deep_profile);
1029 }
1030
Unparse(TextBuffer * buffer)1031 void DeepHeapProfile::GlobalStats::Unparse(TextBuffer* buffer) {
1032 RegionStats all_total;
1033 RegionStats unhooked_total;
1034 for (int i = 0; i < NUMBER_OF_MAPS_REGION_TYPES; ++i) {
1035 all_total.AddAnotherRegionStat(all_[i]);
1036 unhooked_total.AddAnotherRegionStat(unhooked_[i]);
1037 }
1038
1039 // "# total (%lu) %c= profiled-mmap (%lu) + nonprofiled-* (%lu)\n"
1040 buffer->AppendString("# total (", 0);
1041 buffer->AppendUnsignedLong(all_total.committed_bytes(), 0);
1042 buffer->AppendString(") ", 0);
1043 buffer->AppendChar(all_total.committed_bytes() ==
1044 profiled_mmap_.committed_bytes() +
1045 unhooked_total.committed_bytes() ? '=' : '!');
1046 buffer->AppendString("= profiled-mmap (", 0);
1047 buffer->AppendUnsignedLong(profiled_mmap_.committed_bytes(), 0);
1048 buffer->AppendString(") + nonprofiled-* (", 0);
1049 buffer->AppendUnsignedLong(unhooked_total.committed_bytes(), 0);
1050 buffer->AppendString(")\n", 0);
1051
1052 // " virtual committed"
1053 buffer->AppendString("", 26);
1054 buffer->AppendString(kVirtualLabel, 12);
1055 buffer->AppendChar(' ');
1056 buffer->AppendString(kCommittedLabel, 12);
1057 buffer->AppendString("\n", 0);
1058
1059 all_total.Unparse("total", buffer);
1060 all_[ABSENT].Unparse("absent", buffer);
1061 all_[FILE_EXEC].Unparse("file-exec", buffer);
1062 all_[FILE_NONEXEC].Unparse("file-nonexec", buffer);
1063 all_[ANONYMOUS].Unparse("anonymous", buffer);
1064 all_[STACK].Unparse("stack", buffer);
1065 all_[OTHER].Unparse("other", buffer);
1066 unhooked_total.Unparse("nonprofiled-total", buffer);
1067 unhooked_[ABSENT].Unparse("nonprofiled-absent", buffer);
1068 unhooked_[ANONYMOUS].Unparse("nonprofiled-anonymous", buffer);
1069 unhooked_[FILE_EXEC].Unparse("nonprofiled-file-exec", buffer);
1070 unhooked_[FILE_NONEXEC].Unparse("nonprofiled-file-nonexec", buffer);
1071 unhooked_[STACK].Unparse("nonprofiled-stack", buffer);
1072 unhooked_[OTHER].Unparse("nonprofiled-other", buffer);
1073 profiled_mmap_.Unparse("profiled-mmap", buffer);
1074 profiled_malloc_.Unparse("profiled-malloc", buffer);
1075 }
1076
1077 // static
RecordAlloc(const void * pointer,AllocValue * alloc_value,DeepHeapProfile * deep_profile)1078 void DeepHeapProfile::GlobalStats::RecordAlloc(const void* pointer,
1079 AllocValue* alloc_value,
1080 DeepHeapProfile* deep_profile) {
1081 uint64 address = reinterpret_cast<uintptr_t>(pointer);
1082 size_t committed = deep_profile->memory_residence_info_getter_->CommittedSize(
1083 address, address + alloc_value->bytes - 1, NULL);
1084
1085 DeepBucket* deep_bucket = deep_profile->deep_table_.Lookup(
1086 alloc_value->bucket(),
1087 #if defined(TYPE_PROFILING)
1088 LookupType(pointer),
1089 #endif
1090 /* is_mmap */ false);
1091 deep_bucket->committed_size += committed;
1092 deep_profile->stats_.profiled_malloc_.AddToVirtualBytes(alloc_value->bytes);
1093 deep_profile->stats_.profiled_malloc_.AddToCommittedBytes(committed);
1094 }
1095
1096 DeepHeapProfile::DeepBucket*
GetInformationOfMemoryRegion(const MemoryRegionMap::RegionIterator & mmap_iter,const MemoryResidenceInfoGetterInterface * memory_residence_info_getter,DeepHeapProfile * deep_profile)1097 DeepHeapProfile::GlobalStats::GetInformationOfMemoryRegion(
1098 const MemoryRegionMap::RegionIterator& mmap_iter,
1099 const MemoryResidenceInfoGetterInterface* memory_residence_info_getter,
1100 DeepHeapProfile* deep_profile) {
1101 size_t committed = deep_profile->memory_residence_info_getter_->
1102 CommittedSize(mmap_iter->start_addr, mmap_iter->end_addr - 1, NULL);
1103
1104 // TODO(dmikurube): Store a reference to the bucket in region.
1105 Bucket* bucket = MemoryRegionMap::GetBucket(
1106 mmap_iter->call_stack_depth, mmap_iter->call_stack);
1107 DeepBucket* deep_bucket = NULL;
1108 if (bucket != NULL) {
1109 deep_bucket = deep_profile->deep_table_.Lookup(
1110 bucket,
1111 #if defined(TYPE_PROFILING)
1112 NULL, // No type information for memory regions by mmap.
1113 #endif
1114 /* is_mmap */ true);
1115 if (deep_bucket != NULL)
1116 deep_bucket->committed_size += committed;
1117 }
1118
1119 profiled_mmap_.AddToVirtualBytes(
1120 mmap_iter->end_addr - mmap_iter->start_addr);
1121 profiled_mmap_.AddToCommittedBytes(committed);
1122
1123 return deep_bucket;
1124 }
1125
1126 // static
WriteProcMaps(const char * prefix,char raw_buffer[],int buffer_size)1127 void DeepHeapProfile::WriteProcMaps(const char* prefix,
1128 char raw_buffer[],
1129 int buffer_size) {
1130 char filename[100];
1131 snprintf(filename, sizeof(filename),
1132 "%s.%05d.maps", prefix, static_cast<int>(getpid()));
1133
1134 RawFD fd = RawOpenForWriting(filename);
1135 RAW_DCHECK(fd != kIllegalRawFD, "");
1136
1137 int length;
1138 bool wrote_all;
1139 length = tcmalloc::FillProcSelfMaps(raw_buffer, buffer_size, &wrote_all);
1140 RAW_DCHECK(wrote_all, "");
1141 RAW_DCHECK(length <= buffer_size, "");
1142 RawWrite(fd, raw_buffer, length);
1143 RawClose(fd);
1144 }
1145 #else // USE_DEEP_HEAP_PROFILE
1146
DeepHeapProfile(HeapProfileTable * heap_profile,const char * prefix,enum PageFrameType pageframe_type)1147 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile,
1148 const char* prefix,
1149 enum PageFrameType pageframe_type)
1150 : heap_profile_(heap_profile) {
1151 }
1152
~DeepHeapProfile()1153 DeepHeapProfile::~DeepHeapProfile() {
1154 }
1155
DumpOrderedProfile(const char * reason,char raw_buffer[],int buffer_size,RawFD fd)1156 void DeepHeapProfile::DumpOrderedProfile(const char* reason,
1157 char raw_buffer[],
1158 int buffer_size,
1159 RawFD fd) {
1160 }
1161
1162 #endif // USE_DEEP_HEAP_PROFILE
1163