1 // Copyright (c) 2010, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 // This code writes out minidump files:
31 // http://msdn.microsoft.com/en-us/library/ms680378(VS.85,loband).aspx
32 //
33 // Minidumps are a Microsoft format which Breakpad uses for recording crash
34 // dumps. This code has to run in a compromised environment (the address space
35 // may have received SIGSEGV), thus the following rules apply:
36 // * You may not enter the dynamic linker. This means that we cannot call
37 // any symbols in a shared library (inc libc). Because of this we replace
38 // libc functions in linux_libc_support.h.
39 // * You may not call syscalls via the libc wrappers. This rule is a subset
40 // of the first rule but it bears repeating. We have direct wrappers
41 // around the system calls in linux_syscall_support.h.
42 // * You may not malloc. There's an alternative allocator in memory.h and
43 // a canonical instance in the LinuxDumper object. We use the placement
44 // new form to allocate objects and we don't delete them.
45
46 #include "client/linux/handler/minidump_descriptor.h"
47 #include "client/linux/minidump_writer/minidump_writer.h"
48 #include "client/minidump_file_writer-inl.h"
49
50 #include <ctype.h>
51 #include <errno.h>
52 #include <fcntl.h>
53 #include <link.h>
54 #include <stdio.h>
55 #if defined(__ANDROID__)
56 #include <sys/system_properties.h>
57 #endif
58 #include <sys/types.h>
59 #include <sys/ucontext.h>
60 #include <sys/user.h>
61 #include <sys/utsname.h>
62 #include <time.h>
63 #include <unistd.h>
64
65 #include <algorithm>
66
67 #include "client/linux/dump_writer_common/seccomp_unwinder.h"
68 #include "client/linux/dump_writer_common/thread_info.h"
69 #include "client/linux/dump_writer_common/ucontext_reader.h"
70 #include "client/linux/handler/exception_handler.h"
71 #include "client/linux/minidump_writer/cpu_set.h"
72 #include "client/linux/minidump_writer/line_reader.h"
73 #include "client/linux/minidump_writer/linux_dumper.h"
74 #include "client/linux/minidump_writer/linux_ptrace_dumper.h"
75 #include "client/linux/minidump_writer/proc_cpuinfo_reader.h"
76 #include "client/minidump_file_writer.h"
77 #include "common/linux/linux_libc_support.h"
78 #include "common/minidump_type_helper.h"
79 #include "google_breakpad/common/minidump_format.h"
80 #include "third_party/lss/linux_syscall_support.h"
81
82 namespace {
83
84 using google_breakpad::AppMemoryList;
85 using google_breakpad::ExceptionHandler;
86 using google_breakpad::CpuSet;
87 using google_breakpad::LineReader;
88 using google_breakpad::LinuxDumper;
89 using google_breakpad::LinuxPtraceDumper;
90 using google_breakpad::MDTypeHelper;
91 using google_breakpad::MappingEntry;
92 using google_breakpad::MappingInfo;
93 using google_breakpad::MappingList;
94 using google_breakpad::MinidumpFileWriter;
95 using google_breakpad::PageAllocator;
96 using google_breakpad::ProcCpuInfoReader;
97 using google_breakpad::RawContextCPU;
98 using google_breakpad::SeccompUnwinder;
99 using google_breakpad::ThreadInfo;
100 using google_breakpad::TypedMDRVA;
101 using google_breakpad::UContextReader;
102 using google_breakpad::UntypedMDRVA;
103 using google_breakpad::wasteful_vector;
104
105 typedef MDTypeHelper<sizeof(void*)>::MDRawDebug MDRawDebug;
106 typedef MDTypeHelper<sizeof(void*)>::MDRawLinkMap MDRawLinkMap;
107
108 class MinidumpWriter {
109 public:
110 // The following kLimit* constants are for when minidump_size_limit_ is set
111 // and the minidump size might exceed it.
112 //
113 // Estimate for how big each thread's stack will be (in bytes).
114 static const unsigned kLimitAverageThreadStackLength = 8 * 1024;
115 // Number of threads whose stack size we don't want to limit. These base
116 // threads will simply be the first N threads returned by the dumper (although
117 // the crashing thread will never be limited). Threads beyond this count are
118 // the extra threads.
119 static const unsigned kLimitBaseThreadCount = 20;
120 // Maximum stack size to dump for any extra thread (in bytes).
121 static const unsigned kLimitMaxExtraThreadStackLen = 2 * 1024;
122 // Make sure this number of additional bytes can fit in the minidump
123 // (exclude the stack data).
124 static const unsigned kLimitMinidumpFudgeFactor = 64 * 1024;
125
MinidumpWriter(const char * minidump_path,int minidump_fd,const ExceptionHandler::CrashContext * context,const MappingList & mappings,const AppMemoryList & appmem,LinuxDumper * dumper)126 MinidumpWriter(const char* minidump_path,
127 int minidump_fd,
128 const ExceptionHandler::CrashContext* context,
129 const MappingList& mappings,
130 const AppMemoryList& appmem,
131 LinuxDumper* dumper)
132 : fd_(minidump_fd),
133 path_(minidump_path),
134 ucontext_(context ? &context->context : NULL),
135 #if !defined(__ARM_EABI__) && !defined(__mips__)
136 float_state_(context ? &context->float_state : NULL),
137 #endif
138 dumper_(dumper),
139 minidump_size_limit_(-1),
140 memory_blocks_(dumper_->allocator()),
141 mapping_list_(mappings),
142 app_memory_list_(appmem) {
143 // Assert there should be either a valid fd or a valid path, not both.
144 assert(fd_ != -1 || minidump_path);
145 assert(fd_ == -1 || !minidump_path);
146 }
147
Init()148 bool Init() {
149 if (!dumper_->Init())
150 return false;
151
152 if (fd_ != -1)
153 minidump_writer_.SetFile(fd_);
154 else if (!minidump_writer_.Open(path_))
155 return false;
156
157 return dumper_->ThreadsSuspend();
158 }
159
~MinidumpWriter()160 ~MinidumpWriter() {
161 // Don't close the file descriptor when it's been provided explicitly.
162 // Callers might still need to use it.
163 if (fd_ == -1)
164 minidump_writer_.Close();
165 dumper_->ThreadsResume();
166 }
167
Dump()168 bool Dump() {
169 // A minidump file contains a number of tagged streams. This is the number
170 // of stream which we write.
171 unsigned kNumWriters = 13;
172
173 TypedMDRVA<MDRawHeader> header(&minidump_writer_);
174 TypedMDRVA<MDRawDirectory> dir(&minidump_writer_);
175 if (!header.Allocate())
176 return false;
177 if (!dir.AllocateArray(kNumWriters))
178 return false;
179 my_memset(header.get(), 0, sizeof(MDRawHeader));
180
181 header.get()->signature = MD_HEADER_SIGNATURE;
182 header.get()->version = MD_HEADER_VERSION;
183 header.get()->time_date_stamp = time(NULL);
184 header.get()->stream_count = kNumWriters;
185 header.get()->stream_directory_rva = dir.position();
186
187 unsigned dir_index = 0;
188 MDRawDirectory dirent;
189
190 if (!WriteThreadListStream(&dirent))
191 return false;
192 dir.CopyIndex(dir_index++, &dirent);
193
194 if (!WriteMappings(&dirent))
195 return false;
196 dir.CopyIndex(dir_index++, &dirent);
197
198 if (!WriteAppMemory())
199 return false;
200
201 if (!WriteMemoryListStream(&dirent))
202 return false;
203 dir.CopyIndex(dir_index++, &dirent);
204
205 if (!WriteExceptionStream(&dirent))
206 return false;
207 dir.CopyIndex(dir_index++, &dirent);
208
209 if (!WriteSystemInfoStream(&dirent))
210 return false;
211 dir.CopyIndex(dir_index++, &dirent);
212
213 dirent.stream_type = MD_LINUX_CPU_INFO;
214 if (!WriteFile(&dirent.location, "/proc/cpuinfo"))
215 NullifyDirectoryEntry(&dirent);
216 dir.CopyIndex(dir_index++, &dirent);
217
218 dirent.stream_type = MD_LINUX_PROC_STATUS;
219 if (!WriteProcFile(&dirent.location, GetCrashThread(), "status"))
220 NullifyDirectoryEntry(&dirent);
221 dir.CopyIndex(dir_index++, &dirent);
222
223 dirent.stream_type = MD_LINUX_LSB_RELEASE;
224 if (!WriteFile(&dirent.location, "/etc/lsb-release"))
225 NullifyDirectoryEntry(&dirent);
226 dir.CopyIndex(dir_index++, &dirent);
227
228 dirent.stream_type = MD_LINUX_CMD_LINE;
229 if (!WriteProcFile(&dirent.location, GetCrashThread(), "cmdline"))
230 NullifyDirectoryEntry(&dirent);
231 dir.CopyIndex(dir_index++, &dirent);
232
233 dirent.stream_type = MD_LINUX_ENVIRON;
234 if (!WriteProcFile(&dirent.location, GetCrashThread(), "environ"))
235 NullifyDirectoryEntry(&dirent);
236 dir.CopyIndex(dir_index++, &dirent);
237
238 dirent.stream_type = MD_LINUX_AUXV;
239 if (!WriteProcFile(&dirent.location, GetCrashThread(), "auxv"))
240 NullifyDirectoryEntry(&dirent);
241 dir.CopyIndex(dir_index++, &dirent);
242
243 dirent.stream_type = MD_LINUX_MAPS;
244 if (!WriteProcFile(&dirent.location, GetCrashThread(), "maps"))
245 NullifyDirectoryEntry(&dirent);
246 dir.CopyIndex(dir_index++, &dirent);
247
248 dirent.stream_type = MD_LINUX_DSO_DEBUG;
249 if (!WriteDSODebugStream(&dirent))
250 NullifyDirectoryEntry(&dirent);
251 dir.CopyIndex(dir_index++, &dirent);
252
253 // If you add more directory entries, don't forget to update kNumWriters,
254 // above.
255
256 dumper_->ThreadsResume();
257 return true;
258 }
259
FillThreadStack(MDRawThread * thread,uintptr_t stack_pointer,int max_stack_len,uint8_t ** stack_copy)260 bool FillThreadStack(MDRawThread* thread, uintptr_t stack_pointer,
261 int max_stack_len, uint8_t** stack_copy) {
262 *stack_copy = NULL;
263 const void* stack;
264 size_t stack_len;
265 if (dumper_->GetStackInfo(&stack, &stack_len, stack_pointer)) {
266 UntypedMDRVA memory(&minidump_writer_);
267 if (max_stack_len >= 0 &&
268 stack_len > static_cast<unsigned int>(max_stack_len)) {
269 stack_len = max_stack_len;
270 }
271 if (!memory.Allocate(stack_len))
272 return false;
273 *stack_copy = reinterpret_cast<uint8_t*>(Alloc(stack_len));
274 dumper_->CopyFromProcess(*stack_copy, thread->thread_id, stack,
275 stack_len);
276 memory.Copy(*stack_copy, stack_len);
277 thread->stack.start_of_memory_range =
278 reinterpret_cast<uintptr_t>(stack);
279 thread->stack.memory = memory.location();
280 memory_blocks_.push_back(thread->stack);
281 } else {
282 thread->stack.start_of_memory_range = stack_pointer;
283 thread->stack.memory.data_size = 0;
284 thread->stack.memory.rva = minidump_writer_.position();
285 }
286 return true;
287 }
288
289 // Write information about the threads.
WriteThreadListStream(MDRawDirectory * dirent)290 bool WriteThreadListStream(MDRawDirectory* dirent) {
291 const unsigned num_threads = dumper_->threads().size();
292
293 TypedMDRVA<uint32_t> list(&minidump_writer_);
294 if (!list.AllocateObjectAndArray(num_threads, sizeof(MDRawThread)))
295 return false;
296
297 dirent->stream_type = MD_THREAD_LIST_STREAM;
298 dirent->location = list.location();
299
300 *list.get() = num_threads;
301
302 // If there's a minidump size limit, check if it might be exceeded. Since
303 // most of the space is filled with stack data, just check against that.
304 // If this expects to exceed the limit, set extra_thread_stack_len such
305 // that any thread beyond the first kLimitBaseThreadCount threads will
306 // have only kLimitMaxExtraThreadStackLen bytes dumped.
307 int extra_thread_stack_len = -1; // default to no maximum
308 if (minidump_size_limit_ >= 0) {
309 const unsigned estimated_total_stack_size = num_threads *
310 kLimitAverageThreadStackLength;
311 const off_t estimated_minidump_size = minidump_writer_.position() +
312 estimated_total_stack_size + kLimitMinidumpFudgeFactor;
313 if (estimated_minidump_size > minidump_size_limit_)
314 extra_thread_stack_len = kLimitMaxExtraThreadStackLen;
315 }
316
317 for (unsigned i = 0; i < num_threads; ++i) {
318 MDRawThread thread;
319 my_memset(&thread, 0, sizeof(thread));
320 thread.thread_id = dumper_->threads()[i];
321
322 // We have a different source of information for the crashing thread. If
323 // we used the actual state of the thread we would find it running in the
324 // signal handler with the alternative stack, which would be deeply
325 // unhelpful.
326 if (static_cast<pid_t>(thread.thread_id) == GetCrashThread() &&
327 ucontext_ &&
328 !dumper_->IsPostMortem()) {
329 uint8_t* stack_copy;
330 const uintptr_t stack_ptr = UContextReader::GetStackPointer(ucontext_);
331 if (!FillThreadStack(&thread, stack_ptr, -1, &stack_copy))
332 return false;
333
334 // Copy 256 bytes around crashing instruction pointer to minidump.
335 const size_t kIPMemorySize = 256;
336 uint64_t ip = UContextReader::GetInstructionPointer(ucontext_);
337 // Bound it to the upper and lower bounds of the memory map
338 // it's contained within. If it's not in mapped memory,
339 // don't bother trying to write it.
340 bool ip_is_mapped = false;
341 MDMemoryDescriptor ip_memory_d;
342 for (unsigned j = 0; j < dumper_->mappings().size(); ++j) {
343 const MappingInfo& mapping = *dumper_->mappings()[j];
344 if (ip >= mapping.start_addr &&
345 ip < mapping.start_addr + mapping.size) {
346 ip_is_mapped = true;
347 // Try to get 128 bytes before and after the IP, but
348 // settle for whatever's available.
349 ip_memory_d.start_of_memory_range =
350 std::max(mapping.start_addr,
351 uintptr_t(ip - (kIPMemorySize / 2)));
352 uintptr_t end_of_range =
353 std::min(uintptr_t(ip + (kIPMemorySize / 2)),
354 uintptr_t(mapping.start_addr + mapping.size));
355 ip_memory_d.memory.data_size =
356 end_of_range - ip_memory_d.start_of_memory_range;
357 break;
358 }
359 }
360
361 if (ip_is_mapped) {
362 UntypedMDRVA ip_memory(&minidump_writer_);
363 if (!ip_memory.Allocate(ip_memory_d.memory.data_size))
364 return false;
365 uint8_t* memory_copy =
366 reinterpret_cast<uint8_t*>(Alloc(ip_memory_d.memory.data_size));
367 dumper_->CopyFromProcess(
368 memory_copy,
369 thread.thread_id,
370 reinterpret_cast<void*>(ip_memory_d.start_of_memory_range),
371 ip_memory_d.memory.data_size);
372 ip_memory.Copy(memory_copy, ip_memory_d.memory.data_size);
373 ip_memory_d.memory = ip_memory.location();
374 memory_blocks_.push_back(ip_memory_d);
375 }
376
377 TypedMDRVA<RawContextCPU> cpu(&minidump_writer_);
378 if (!cpu.Allocate())
379 return false;
380 my_memset(cpu.get(), 0, sizeof(RawContextCPU));
381 #if !defined(__ARM_EABI__) && !defined(__mips__)
382 UContextReader::FillCPUContext(cpu.get(), ucontext_, float_state_);
383 #else
384 UContextReader::FillCPUContext(cpu.get(), ucontext_);
385 #endif
386 if (stack_copy)
387 SeccompUnwinder::PopSeccompStackFrame(cpu.get(), thread, stack_copy);
388 thread.thread_context = cpu.location();
389 crashing_thread_context_ = cpu.location();
390 } else {
391 ThreadInfo info;
392 if (!dumper_->GetThreadInfoByIndex(i, &info))
393 return false;
394
395 uint8_t* stack_copy;
396 int max_stack_len = -1; // default to no maximum for this thread
397 if (minidump_size_limit_ >= 0 && i >= kLimitBaseThreadCount)
398 max_stack_len = extra_thread_stack_len;
399 if (!FillThreadStack(&thread, info.stack_pointer, max_stack_len,
400 &stack_copy))
401 return false;
402
403 TypedMDRVA<RawContextCPU> cpu(&minidump_writer_);
404 if (!cpu.Allocate())
405 return false;
406 my_memset(cpu.get(), 0, sizeof(RawContextCPU));
407 info.FillCPUContext(cpu.get());
408 if (stack_copy)
409 SeccompUnwinder::PopSeccompStackFrame(cpu.get(), thread, stack_copy);
410 thread.thread_context = cpu.location();
411 if (dumper_->threads()[i] == GetCrashThread()) {
412 crashing_thread_context_ = cpu.location();
413 if (!dumper_->IsPostMortem()) {
414 // This is the crashing thread of a live process, but
415 // no context was provided, so set the crash address
416 // while the instruction pointer is already here.
417 dumper_->set_crash_address(info.GetInstructionPointer());
418 }
419 }
420 }
421
422 list.CopyIndexAfterObject(i, &thread, sizeof(thread));
423 }
424
425 return true;
426 }
427
428 // Write application-provided memory regions.
WriteAppMemory()429 bool WriteAppMemory() {
430 for (AppMemoryList::const_iterator iter = app_memory_list_.begin();
431 iter != app_memory_list_.end();
432 ++iter) {
433 uint8_t* data_copy =
434 reinterpret_cast<uint8_t*>(dumper_->allocator()->Alloc(iter->length));
435 dumper_->CopyFromProcess(data_copy, GetCrashThread(), iter->ptr,
436 iter->length);
437
438 UntypedMDRVA memory(&minidump_writer_);
439 if (!memory.Allocate(iter->length)) {
440 return false;
441 }
442 memory.Copy(data_copy, iter->length);
443 MDMemoryDescriptor desc;
444 desc.start_of_memory_range = reinterpret_cast<uintptr_t>(iter->ptr);
445 desc.memory = memory.location();
446 memory_blocks_.push_back(desc);
447 }
448
449 return true;
450 }
451
ShouldIncludeMapping(const MappingInfo & mapping)452 static bool ShouldIncludeMapping(const MappingInfo& mapping) {
453 if (mapping.name[0] == 0 || // only want modules with filenames.
454 // Only want to include one mapping per shared lib.
455 // Avoid filtering executable mappings.
456 (mapping.offset != 0 && !mapping.exec) ||
457 mapping.size < 4096) { // too small to get a signature for.
458 return false;
459 }
460
461 return true;
462 }
463
464 // If there is caller-provided information about this mapping
465 // in the mapping_list_ list, return true. Otherwise, return false.
HaveMappingInfo(const MappingInfo & mapping)466 bool HaveMappingInfo(const MappingInfo& mapping) {
467 for (MappingList::const_iterator iter = mapping_list_.begin();
468 iter != mapping_list_.end();
469 ++iter) {
470 // Ignore any mappings that are wholly contained within
471 // mappings in the mapping_info_ list.
472 if (mapping.start_addr >= iter->first.start_addr &&
473 (mapping.start_addr + mapping.size) <=
474 (iter->first.start_addr + iter->first.size)) {
475 return true;
476 }
477 }
478 return false;
479 }
480
481 // Write information about the mappings in effect. Because we are using the
482 // minidump format, the information about the mappings is pretty limited.
483 // Because of this, we also include the full, unparsed, /proc/$x/maps file in
484 // another stream in the file.
WriteMappings(MDRawDirectory * dirent)485 bool WriteMappings(MDRawDirectory* dirent) {
486 const unsigned num_mappings = dumper_->mappings().size();
487 unsigned num_output_mappings = mapping_list_.size();
488
489 for (unsigned i = 0; i < dumper_->mappings().size(); ++i) {
490 const MappingInfo& mapping = *dumper_->mappings()[i];
491 if (ShouldIncludeMapping(mapping) && !HaveMappingInfo(mapping))
492 num_output_mappings++;
493 }
494
495 TypedMDRVA<uint32_t> list(&minidump_writer_);
496 if (num_output_mappings) {
497 if (!list.AllocateObjectAndArray(num_output_mappings, MD_MODULE_SIZE))
498 return false;
499 } else {
500 // Still create the module list stream, although it will have zero
501 // modules.
502 if (!list.Allocate())
503 return false;
504 }
505
506 dirent->stream_type = MD_MODULE_LIST_STREAM;
507 dirent->location = list.location();
508 *list.get() = num_output_mappings;
509
510 // First write all the mappings from the dumper
511 unsigned int j = 0;
512 for (unsigned i = 0; i < num_mappings; ++i) {
513 const MappingInfo& mapping = *dumper_->mappings()[i];
514 if (!ShouldIncludeMapping(mapping) || HaveMappingInfo(mapping))
515 continue;
516
517 MDRawModule mod;
518 if (!FillRawModule(mapping, true, i, mod, NULL))
519 return false;
520 list.CopyIndexAfterObject(j++, &mod, MD_MODULE_SIZE);
521 }
522 // Next write all the mappings provided by the caller
523 for (MappingList::const_iterator iter = mapping_list_.begin();
524 iter != mapping_list_.end();
525 ++iter) {
526 MDRawModule mod;
527 if (!FillRawModule(iter->first, false, 0, mod, iter->second))
528 return false;
529 list.CopyIndexAfterObject(j++, &mod, MD_MODULE_SIZE);
530 }
531
532 return true;
533 }
534
535 // Fill the MDRawModule |mod| with information about the provided
536 // |mapping|. If |identifier| is non-NULL, use it instead of calculating
537 // a file ID from the mapping.
FillRawModule(const MappingInfo & mapping,bool member,unsigned int mapping_id,MDRawModule & mod,const uint8_t * identifier)538 bool FillRawModule(const MappingInfo& mapping,
539 bool member,
540 unsigned int mapping_id,
541 MDRawModule& mod,
542 const uint8_t* identifier) {
543 my_memset(&mod, 0, MD_MODULE_SIZE);
544
545 mod.base_of_image = mapping.start_addr;
546 mod.size_of_image = mapping.size;
547
548 uint8_t cv_buf[MDCVInfoPDB70_minsize + NAME_MAX];
549 uint8_t* cv_ptr = cv_buf;
550
551 const uint32_t cv_signature = MD_CVINFOPDB70_SIGNATURE;
552 my_memcpy(cv_ptr, &cv_signature, sizeof(cv_signature));
553 cv_ptr += sizeof(cv_signature);
554 uint8_t* signature = cv_ptr;
555 cv_ptr += sizeof(MDGUID);
556 if (identifier) {
557 // GUID was provided by caller.
558 my_memcpy(signature, identifier, sizeof(MDGUID));
559 } else {
560 // Note: ElfFileIdentifierForMapping() can manipulate the |mapping.name|.
561 dumper_->ElfFileIdentifierForMapping(mapping, member,
562 mapping_id, signature);
563 }
564 my_memset(cv_ptr, 0, sizeof(uint32_t)); // Set age to 0 on Linux.
565 cv_ptr += sizeof(uint32_t);
566
567 char file_name[NAME_MAX];
568 char file_path[NAME_MAX];
569 LinuxDumper::GetMappingEffectiveNameAndPath(
570 mapping, file_path, sizeof(file_path), file_name, sizeof(file_name));
571
572 const size_t file_name_len = my_strlen(file_name);
573 UntypedMDRVA cv(&minidump_writer_);
574 if (!cv.Allocate(MDCVInfoPDB70_minsize + file_name_len + 1))
575 return false;
576
577 // Write pdb_file_name
578 my_memcpy(cv_ptr, file_name, file_name_len + 1);
579 cv.Copy(cv_buf, MDCVInfoPDB70_minsize + file_name_len + 1);
580
581 mod.cv_record = cv.location();
582
583 MDLocationDescriptor ld;
584 if (!minidump_writer_.WriteString(file_path, my_strlen(file_path), &ld))
585 return false;
586 mod.module_name_rva = ld.rva;
587 return true;
588 }
589
WriteMemoryListStream(MDRawDirectory * dirent)590 bool WriteMemoryListStream(MDRawDirectory* dirent) {
591 TypedMDRVA<uint32_t> list(&minidump_writer_);
592 if (memory_blocks_.size()) {
593 if (!list.AllocateObjectAndArray(memory_blocks_.size(),
594 sizeof(MDMemoryDescriptor)))
595 return false;
596 } else {
597 // Still create the memory list stream, although it will have zero
598 // memory blocks.
599 if (!list.Allocate())
600 return false;
601 }
602
603 dirent->stream_type = MD_MEMORY_LIST_STREAM;
604 dirent->location = list.location();
605
606 *list.get() = memory_blocks_.size();
607
608 for (size_t i = 0; i < memory_blocks_.size(); ++i) {
609 list.CopyIndexAfterObject(i, &memory_blocks_[i],
610 sizeof(MDMemoryDescriptor));
611 }
612 return true;
613 }
614
WriteExceptionStream(MDRawDirectory * dirent)615 bool WriteExceptionStream(MDRawDirectory* dirent) {
616 TypedMDRVA<MDRawExceptionStream> exc(&minidump_writer_);
617 if (!exc.Allocate())
618 return false;
619 my_memset(exc.get(), 0, sizeof(MDRawExceptionStream));
620
621 dirent->stream_type = MD_EXCEPTION_STREAM;
622 dirent->location = exc.location();
623
624 exc.get()->thread_id = GetCrashThread();
625 exc.get()->exception_record.exception_code = dumper_->crash_signal();
626 exc.get()->exception_record.exception_address = dumper_->crash_address();
627 exc.get()->thread_context = crashing_thread_context_;
628
629 return true;
630 }
631
WriteSystemInfoStream(MDRawDirectory * dirent)632 bool WriteSystemInfoStream(MDRawDirectory* dirent) {
633 TypedMDRVA<MDRawSystemInfo> si(&minidump_writer_);
634 if (!si.Allocate())
635 return false;
636 my_memset(si.get(), 0, sizeof(MDRawSystemInfo));
637
638 dirent->stream_type = MD_SYSTEM_INFO_STREAM;
639 dirent->location = si.location();
640
641 WriteCPUInformation(si.get());
642 WriteOSInformation(si.get());
643
644 return true;
645 }
646
WriteDSODebugStream(MDRawDirectory * dirent)647 bool WriteDSODebugStream(MDRawDirectory* dirent) {
648 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr) *>(dumper_->auxv()[AT_PHDR]);
649 char* base;
650 int phnum = dumper_->auxv()[AT_PHNUM];
651 if (!phnum || !phdr)
652 return false;
653
654 // Assume the program base is at the beginning of the same page as the PHDR
655 base = reinterpret_cast<char *>(reinterpret_cast<uintptr_t>(phdr) & ~0xfff);
656
657 // Search for the program PT_DYNAMIC segment
658 ElfW(Addr) dyn_addr = 0;
659 for (; phnum >= 0; phnum--, phdr++) {
660 ElfW(Phdr) ph;
661 if (!dumper_->CopyFromProcess(&ph, GetCrashThread(), phdr, sizeof(ph)))
662 return false;
663
664 // Adjust base address with the virtual address of the PT_LOAD segment
665 // corresponding to offset 0
666 if (ph.p_type == PT_LOAD && ph.p_offset == 0) {
667 base -= ph.p_vaddr;
668 }
669 if (ph.p_type == PT_DYNAMIC) {
670 dyn_addr = ph.p_vaddr;
671 }
672 }
673 if (!dyn_addr)
674 return false;
675
676 ElfW(Dyn) *dynamic = reinterpret_cast<ElfW(Dyn) *>(dyn_addr + base);
677
678 // The dynamic linker makes information available that helps gdb find all
679 // DSOs loaded into the program. If this information is indeed available,
680 // dump it to a MD_LINUX_DSO_DEBUG stream.
681 struct r_debug* r_debug = NULL;
682 uint32_t dynamic_length = 0;
683
684 for (int i = 0; ; ++i) {
685 ElfW(Dyn) dyn;
686 dynamic_length += sizeof(dyn);
687 if (!dumper_->CopyFromProcess(&dyn, GetCrashThread(), dynamic + i,
688 sizeof(dyn))) {
689 return false;
690 }
691
692 #ifdef __mips__
693 if (dyn.d_tag == DT_MIPS_RLD_MAP) {
694 r_debug = reinterpret_cast<struct r_debug*>(dyn.d_un.d_ptr);
695 continue;
696 }
697 #else
698 if (dyn.d_tag == DT_DEBUG) {
699 r_debug = reinterpret_cast<struct r_debug*>(dyn.d_un.d_ptr);
700 continue;
701 }
702 #endif
703 else if (dyn.d_tag == DT_NULL) {
704 break;
705 }
706 }
707
708 // The "r_map" field of that r_debug struct contains a linked list of all
709 // loaded DSOs.
710 // Our list of DSOs potentially is different from the ones in the crashing
711 // process. So, we have to be careful to never dereference pointers
712 // directly. Instead, we use CopyFromProcess() everywhere.
713 // See <link.h> for a more detailed discussion of the how the dynamic
714 // loader communicates with debuggers.
715
716 // Count the number of loaded DSOs
717 int dso_count = 0;
718 struct r_debug debug_entry;
719 if (!dumper_->CopyFromProcess(&debug_entry, GetCrashThread(), r_debug,
720 sizeof(debug_entry))) {
721 return false;
722 }
723 for (struct link_map* ptr = debug_entry.r_map; ptr; ) {
724 struct link_map map;
725 if (!dumper_->CopyFromProcess(&map, GetCrashThread(), ptr, sizeof(map)))
726 return false;
727
728 ptr = map.l_next;
729 dso_count++;
730 }
731
732 MDRVA linkmap_rva = minidump_writer_.kInvalidMDRVA;
733 if (dso_count > 0) {
734 // If we have at least one DSO, create an array of MDRawLinkMap
735 // entries in the minidump file.
736 TypedMDRVA<MDRawLinkMap> linkmap(&minidump_writer_);
737 if (!linkmap.AllocateArray(dso_count))
738 return false;
739 linkmap_rva = linkmap.location().rva;
740 int idx = 0;
741
742 // Iterate over DSOs and write their information to mini dump
743 for (struct link_map* ptr = debug_entry.r_map; ptr; ) {
744 struct link_map map;
745 if (!dumper_->CopyFromProcess(&map, GetCrashThread(), ptr, sizeof(map)))
746 return false;
747
748 ptr = map.l_next;
749 char filename[257] = { 0 };
750 if (map.l_name) {
751 dumper_->CopyFromProcess(filename, GetCrashThread(), map.l_name,
752 sizeof(filename) - 1);
753 }
754 MDLocationDescriptor location;
755 if (!minidump_writer_.WriteString(filename, 0, &location))
756 return false;
757 MDRawLinkMap entry;
758 entry.name = location.rva;
759 entry.addr = map.l_addr;
760 entry.ld = reinterpret_cast<uintptr_t>(map.l_ld);
761 linkmap.CopyIndex(idx++, &entry);
762 }
763 }
764
765 // Write MD_LINUX_DSO_DEBUG record
766 TypedMDRVA<MDRawDebug> debug(&minidump_writer_);
767 if (!debug.AllocateObjectAndArray(1, dynamic_length))
768 return false;
769 my_memset(debug.get(), 0, sizeof(MDRawDebug));
770 dirent->stream_type = MD_LINUX_DSO_DEBUG;
771 dirent->location = debug.location();
772
773 debug.get()->version = debug_entry.r_version;
774 debug.get()->map = linkmap_rva;
775 debug.get()->dso_count = dso_count;
776 debug.get()->brk = debug_entry.r_brk;
777 debug.get()->ldbase = debug_entry.r_ldbase;
778 debug.get()->dynamic = reinterpret_cast<uintptr_t>(dynamic);
779
780 wasteful_vector<char> dso_debug_data(dumper_->allocator(), dynamic_length);
781 // The passed-in size to the constructor (above) is only a hint.
782 // Must call .resize() to do actual initialization of the elements.
783 dso_debug_data.resize(dynamic_length);
784 dumper_->CopyFromProcess(&dso_debug_data[0], GetCrashThread(), dynamic,
785 dynamic_length);
786 debug.CopyIndexAfterObject(0, &dso_debug_data[0], dynamic_length);
787
788 return true;
789 }
790
set_minidump_size_limit(off_t limit)791 void set_minidump_size_limit(off_t limit) { minidump_size_limit_ = limit; }
792
793 private:
Alloc(unsigned bytes)794 void* Alloc(unsigned bytes) {
795 return dumper_->allocator()->Alloc(bytes);
796 }
797
GetCrashThread() const798 pid_t GetCrashThread() const {
799 return dumper_->crash_thread();
800 }
801
NullifyDirectoryEntry(MDRawDirectory * dirent)802 void NullifyDirectoryEntry(MDRawDirectory* dirent) {
803 dirent->stream_type = 0;
804 dirent->location.data_size = 0;
805 dirent->location.rva = 0;
806 }
807
808 #if defined(__i386__) || defined(__x86_64__) || defined(__mips__)
WriteCPUInformation(MDRawSystemInfo * sys_info)809 bool WriteCPUInformation(MDRawSystemInfo* sys_info) {
810 char vendor_id[sizeof(sys_info->cpu.x86_cpu_info.vendor_id) + 1] = {0};
811 static const char vendor_id_name[] = "vendor_id";
812
813 struct CpuInfoEntry {
814 const char* info_name;
815 int value;
816 bool found;
817 } cpu_info_table[] = {
818 { "processor", -1, false },
819 #if defined(__i386__) || defined(__x86_64__)
820 { "model", 0, false },
821 { "stepping", 0, false },
822 { "cpu family", 0, false },
823 #endif
824 };
825
826 // processor_architecture should always be set, do this first
827 sys_info->processor_architecture =
828 #if defined(__mips__)
829 MD_CPU_ARCHITECTURE_MIPS;
830 #elif defined(__i386__)
831 MD_CPU_ARCHITECTURE_X86;
832 #else
833 MD_CPU_ARCHITECTURE_AMD64;
834 #endif
835
836 const int fd = sys_open("/proc/cpuinfo", O_RDONLY, 0);
837 if (fd < 0)
838 return false;
839
840 {
841 PageAllocator allocator;
842 ProcCpuInfoReader* const reader = new(allocator) ProcCpuInfoReader(fd);
843 const char* field;
844 while (reader->GetNextField(&field)) {
845 for (size_t i = 0;
846 i < sizeof(cpu_info_table) / sizeof(cpu_info_table[0]);
847 i++) {
848 CpuInfoEntry* entry = &cpu_info_table[i];
849 if (i > 0 && entry->found) {
850 // except for the 'processor' field, ignore repeated values.
851 continue;
852 }
853 if (!my_strcmp(field, entry->info_name)) {
854 size_t value_len;
855 const char* value = reader->GetValueAndLen(&value_len);
856 if (value_len == 0)
857 continue;
858
859 uintptr_t val;
860 if (my_read_decimal_ptr(&val, value) == value)
861 continue;
862
863 entry->value = static_cast<int>(val);
864 entry->found = true;
865 }
866 }
867
868 // special case for vendor_id
869 if (!my_strcmp(field, vendor_id_name)) {
870 size_t value_len;
871 const char* value = reader->GetValueAndLen(&value_len);
872 if (value_len > 0)
873 my_strlcpy(vendor_id, value, sizeof(vendor_id));
874 }
875 }
876 sys_close(fd);
877 }
878
879 // make sure we got everything we wanted
880 for (size_t i = 0;
881 i < sizeof(cpu_info_table) / sizeof(cpu_info_table[0]);
882 i++) {
883 if (!cpu_info_table[i].found) {
884 return false;
885 }
886 }
887 // cpu_info_table[0] holds the last cpu id listed in /proc/cpuinfo,
888 // assuming this is the highest id, change it to the number of CPUs
889 // by adding one.
890 cpu_info_table[0].value++;
891
892 sys_info->number_of_processors = cpu_info_table[0].value;
893 #if defined(__i386__) || defined(__x86_64__)
894 sys_info->processor_level = cpu_info_table[3].value;
895 sys_info->processor_revision = cpu_info_table[1].value << 8 |
896 cpu_info_table[2].value;
897 #endif
898
899 if (vendor_id[0] != '\0') {
900 my_memcpy(sys_info->cpu.x86_cpu_info.vendor_id, vendor_id,
901 sizeof(sys_info->cpu.x86_cpu_info.vendor_id));
902 }
903 return true;
904 }
905 #elif defined(__arm__) || defined(__aarch64__)
WriteCPUInformation(MDRawSystemInfo * sys_info)906 bool WriteCPUInformation(MDRawSystemInfo* sys_info) {
907 // The CPUID value is broken up in several entries in /proc/cpuinfo.
908 // This table is used to rebuild it from the entries.
909 const struct CpuIdEntry {
910 const char* field;
911 char format;
912 char bit_lshift;
913 char bit_length;
914 } cpu_id_entries[] = {
915 { "CPU implementer", 'x', 24, 8 },
916 { "CPU variant", 'x', 20, 4 },
917 { "CPU part", 'x', 4, 12 },
918 { "CPU revision", 'd', 0, 4 },
919 };
920
921 // The ELF hwcaps are listed in the "Features" entry as textual tags.
922 // This table is used to rebuild them.
923 const struct CpuFeaturesEntry {
924 const char* tag;
925 uint32_t hwcaps;
926 } cpu_features_entries[] = {
927 #if defined(__arm__)
928 { "swp", MD_CPU_ARM_ELF_HWCAP_SWP },
929 { "half", MD_CPU_ARM_ELF_HWCAP_HALF },
930 { "thumb", MD_CPU_ARM_ELF_HWCAP_THUMB },
931 { "26bit", MD_CPU_ARM_ELF_HWCAP_26BIT },
932 { "fastmult", MD_CPU_ARM_ELF_HWCAP_FAST_MULT },
933 { "fpa", MD_CPU_ARM_ELF_HWCAP_FPA },
934 { "vfp", MD_CPU_ARM_ELF_HWCAP_VFP },
935 { "edsp", MD_CPU_ARM_ELF_HWCAP_EDSP },
936 { "java", MD_CPU_ARM_ELF_HWCAP_JAVA },
937 { "iwmmxt", MD_CPU_ARM_ELF_HWCAP_IWMMXT },
938 { "crunch", MD_CPU_ARM_ELF_HWCAP_CRUNCH },
939 { "thumbee", MD_CPU_ARM_ELF_HWCAP_THUMBEE },
940 { "neon", MD_CPU_ARM_ELF_HWCAP_NEON },
941 { "vfpv3", MD_CPU_ARM_ELF_HWCAP_VFPv3 },
942 { "vfpv3d16", MD_CPU_ARM_ELF_HWCAP_VFPv3D16 },
943 { "tls", MD_CPU_ARM_ELF_HWCAP_TLS },
944 { "vfpv4", MD_CPU_ARM_ELF_HWCAP_VFPv4 },
945 { "idiva", MD_CPU_ARM_ELF_HWCAP_IDIVA },
946 { "idivt", MD_CPU_ARM_ELF_HWCAP_IDIVT },
947 { "idiv", MD_CPU_ARM_ELF_HWCAP_IDIVA | MD_CPU_ARM_ELF_HWCAP_IDIVT },
948 #elif defined(__aarch64__)
949 // No hwcaps on aarch64.
950 #endif
951 };
952
953 // processor_architecture should always be set, do this first
954 sys_info->processor_architecture =
955 #if defined(__aarch64__)
956 MD_CPU_ARCHITECTURE_ARM64;
957 #else
958 MD_CPU_ARCHITECTURE_ARM;
959 #endif
960
961 // /proc/cpuinfo is not readable under various sandboxed environments
962 // (e.g. Android services with the android:isolatedProcess attribute)
963 // prepare for this by setting default values now, which will be
964 // returned when this happens.
965 //
966 // Note: Bogus values are used to distinguish between failures (to
967 // read /sys and /proc files) and really badly configured kernels.
968 sys_info->number_of_processors = 0;
969 sys_info->processor_level = 1U; // There is no ARMv1
970 sys_info->processor_revision = 42;
971 sys_info->cpu.arm_cpu_info.cpuid = 0;
972 sys_info->cpu.arm_cpu_info.elf_hwcaps = 0;
973
974 // Counting the number of CPUs involves parsing two sysfs files,
975 // because the content of /proc/cpuinfo will only mirror the number
976 // of 'online' cores, and thus will vary with time.
977 // See http://www.kernel.org/doc/Documentation/cputopology.txt
978 {
979 CpuSet cpus_present;
980 CpuSet cpus_possible;
981
982 int fd = sys_open("/sys/devices/system/cpu/present", O_RDONLY, 0);
983 if (fd >= 0) {
984 cpus_present.ParseSysFile(fd);
985 sys_close(fd);
986
987 fd = sys_open("/sys/devices/system/cpu/possible", O_RDONLY, 0);
988 if (fd >= 0) {
989 cpus_possible.ParseSysFile(fd);
990 sys_close(fd);
991
992 cpus_present.IntersectWith(cpus_possible);
993 int cpu_count = cpus_present.GetCount();
994 if (cpu_count > 255)
995 cpu_count = 255;
996 sys_info->number_of_processors = static_cast<uint8_t>(cpu_count);
997 }
998 }
999 }
1000
1001 // Parse /proc/cpuinfo to reconstruct the CPUID value, as well
1002 // as the ELF hwcaps field. For the latter, it would be easier to
1003 // read /proc/self/auxv but unfortunately, this file is not always
1004 // readable from regular Android applications on later versions
1005 // (>= 4.1) of the Android platform.
1006 const int fd = sys_open("/proc/cpuinfo", O_RDONLY, 0);
1007 if (fd < 0) {
1008 // Do not return false here to allow the minidump generation
1009 // to happen properly.
1010 return true;
1011 }
1012
1013 {
1014 PageAllocator allocator;
1015 ProcCpuInfoReader* const reader =
1016 new(allocator) ProcCpuInfoReader(fd);
1017 const char* field;
1018 while (reader->GetNextField(&field)) {
1019 for (size_t i = 0;
1020 i < sizeof(cpu_id_entries)/sizeof(cpu_id_entries[0]);
1021 ++i) {
1022 const CpuIdEntry* entry = &cpu_id_entries[i];
1023 if (my_strcmp(entry->field, field) != 0)
1024 continue;
1025 uintptr_t result = 0;
1026 const char* value = reader->GetValue();
1027 const char* p = value;
1028 if (value[0] == '0' && value[1] == 'x') {
1029 p = my_read_hex_ptr(&result, value+2);
1030 } else if (entry->format == 'x') {
1031 p = my_read_hex_ptr(&result, value);
1032 } else {
1033 p = my_read_decimal_ptr(&result, value);
1034 }
1035 if (p == value)
1036 continue;
1037
1038 result &= (1U << entry->bit_length)-1;
1039 result <<= entry->bit_lshift;
1040 sys_info->cpu.arm_cpu_info.cpuid |=
1041 static_cast<uint32_t>(result);
1042 }
1043 #if defined(__arm__)
1044 // Get the architecture version from the "Processor" field.
1045 // Note that it is also available in the "CPU architecture" field,
1046 // however, some existing kernels are misconfigured and will report
1047 // invalid values here (e.g. 6, while the CPU is ARMv7-A based).
1048 // The "Processor" field doesn't have this issue.
1049 if (!my_strcmp(field, "Processor")) {
1050 size_t value_len;
1051 const char* value = reader->GetValueAndLen(&value_len);
1052 // Expected format: <text> (v<level><endian>)
1053 // Where <text> is some text like "ARMv7 Processor rev 2"
1054 // and <level> is a decimal corresponding to the ARM
1055 // architecture number. <endian> is either 'l' or 'b'
1056 // and corresponds to the endianess, it is ignored here.
1057 while (value_len > 0 && my_isspace(value[value_len-1]))
1058 value_len--;
1059
1060 size_t nn = value_len;
1061 while (nn > 0 && value[nn-1] != '(')
1062 nn--;
1063 if (nn > 0 && value[nn] == 'v') {
1064 uintptr_t arch_level = 5;
1065 my_read_decimal_ptr(&arch_level, value + nn + 1);
1066 sys_info->processor_level = static_cast<uint16_t>(arch_level);
1067 }
1068 }
1069 #elif defined(__aarch64__)
1070 // The aarch64 architecture does not provide the architecture level
1071 // in the Processor field, so we instead check the "CPU architecture"
1072 // field.
1073 if (!my_strcmp(field, "CPU architecture")) {
1074 uintptr_t arch_level = 0;
1075 const char* value = reader->GetValue();
1076 const char* p = value;
1077 p = my_read_decimal_ptr(&arch_level, value);
1078 if (p == value)
1079 continue;
1080 sys_info->processor_level = static_cast<uint16_t>(arch_level);
1081 }
1082 #endif
1083 // Rebuild the ELF hwcaps from the 'Features' field.
1084 if (!my_strcmp(field, "Features")) {
1085 size_t value_len;
1086 const char* value = reader->GetValueAndLen(&value_len);
1087
1088 // Parse each space-separated tag.
1089 while (value_len > 0) {
1090 const char* tag = value;
1091 size_t tag_len = value_len;
1092 const char* p = my_strchr(tag, ' ');
1093 if (p != NULL) {
1094 tag_len = static_cast<size_t>(p - tag);
1095 value += tag_len + 1;
1096 value_len -= tag_len + 1;
1097 } else {
1098 tag_len = strlen(tag);
1099 value_len = 0;
1100 }
1101 for (size_t i = 0;
1102 i < sizeof(cpu_features_entries)/
1103 sizeof(cpu_features_entries[0]);
1104 ++i) {
1105 const CpuFeaturesEntry* entry = &cpu_features_entries[i];
1106 if (tag_len == strlen(entry->tag) &&
1107 !memcmp(tag, entry->tag, tag_len)) {
1108 sys_info->cpu.arm_cpu_info.elf_hwcaps |= entry->hwcaps;
1109 break;
1110 }
1111 }
1112 }
1113 }
1114 }
1115 sys_close(fd);
1116 }
1117
1118 return true;
1119 }
1120 #else
1121 # error "Unsupported CPU"
1122 #endif
1123
WriteFile(MDLocationDescriptor * result,const char * filename)1124 bool WriteFile(MDLocationDescriptor* result, const char* filename) {
1125 const int fd = sys_open(filename, O_RDONLY, 0);
1126 if (fd < 0)
1127 return false;
1128
1129 // We can't stat the files because several of the files that we want to
1130 // read are kernel seqfiles, which always have a length of zero. So we have
1131 // to read as much as we can into a buffer.
1132 static const unsigned kBufSize = 1024 - 2*sizeof(void*);
1133 struct Buffers {
1134 Buffers* next;
1135 size_t len;
1136 uint8_t data[kBufSize];
1137 } *buffers = reinterpret_cast<Buffers*>(Alloc(sizeof(Buffers)));
1138 buffers->next = NULL;
1139 buffers->len = 0;
1140
1141 size_t total = 0;
1142 for (Buffers* bufptr = buffers;;) {
1143 ssize_t r;
1144 do {
1145 r = sys_read(fd, &bufptr->data[bufptr->len], kBufSize - bufptr->len);
1146 } while (r == -1 && errno == EINTR);
1147
1148 if (r < 1)
1149 break;
1150
1151 total += r;
1152 bufptr->len += r;
1153 if (bufptr->len == kBufSize) {
1154 bufptr->next = reinterpret_cast<Buffers*>(Alloc(sizeof(Buffers)));
1155 bufptr = bufptr->next;
1156 bufptr->next = NULL;
1157 bufptr->len = 0;
1158 }
1159 }
1160 sys_close(fd);
1161
1162 if (!total)
1163 return false;
1164
1165 UntypedMDRVA memory(&minidump_writer_);
1166 if (!memory.Allocate(total))
1167 return false;
1168 for (MDRVA pos = memory.position(); buffers; buffers = buffers->next) {
1169 // Check for special case of a zero-length buffer. This should only
1170 // occur if a file's size happens to be a multiple of the buffer's
1171 // size, in which case the final sys_read() will have resulted in
1172 // zero bytes being read after the final buffer was just allocated.
1173 if (buffers->len == 0) {
1174 // This can only occur with final buffer.
1175 assert(buffers->next == NULL);
1176 continue;
1177 }
1178 memory.Copy(pos, &buffers->data, buffers->len);
1179 pos += buffers->len;
1180 }
1181 *result = memory.location();
1182 return true;
1183 }
1184
WriteOSInformation(MDRawSystemInfo * sys_info)1185 bool WriteOSInformation(MDRawSystemInfo* sys_info) {
1186 #if defined(__ANDROID__)
1187 sys_info->platform_id = MD_OS_ANDROID;
1188 #else
1189 sys_info->platform_id = MD_OS_LINUX;
1190 #endif
1191
1192 struct utsname uts;
1193 if (uname(&uts))
1194 return false;
1195
1196 static const size_t buf_len = 512;
1197 char buf[buf_len] = {0};
1198 size_t space_left = buf_len - 1;
1199 const char* info_table[] = {
1200 uts.sysname,
1201 uts.release,
1202 uts.version,
1203 uts.machine,
1204 NULL
1205 };
1206 bool first_item = true;
1207 for (const char** cur_info = info_table; *cur_info; cur_info++) {
1208 static const char separator[] = " ";
1209 size_t separator_len = sizeof(separator) - 1;
1210 size_t info_len = my_strlen(*cur_info);
1211 if (info_len == 0)
1212 continue;
1213
1214 if (space_left < info_len + (first_item ? 0 : separator_len))
1215 break;
1216
1217 if (!first_item) {
1218 my_strlcat(buf, separator, sizeof(buf));
1219 space_left -= separator_len;
1220 }
1221
1222 first_item = false;
1223 my_strlcat(buf, *cur_info, sizeof(buf));
1224 space_left -= info_len;
1225 }
1226
1227 MDLocationDescriptor location;
1228 if (!minidump_writer_.WriteString(buf, 0, &location))
1229 return false;
1230 sys_info->csd_version_rva = location.rva;
1231
1232 return true;
1233 }
1234
WriteProcFile(MDLocationDescriptor * result,pid_t pid,const char * filename)1235 bool WriteProcFile(MDLocationDescriptor* result, pid_t pid,
1236 const char* filename) {
1237 char buf[NAME_MAX];
1238 if (!dumper_->BuildProcPath(buf, pid, filename))
1239 return false;
1240 return WriteFile(result, buf);
1241 }
1242
1243 // Only one of the 2 member variables below should be set to a valid value.
1244 const int fd_; // File descriptor where the minidum should be written.
1245 const char* path_; // Path to the file where the minidum should be written.
1246
1247 const struct ucontext* const ucontext_; // also from the signal handler
1248 #if !defined(__ARM_EABI__) && !defined(__mips__)
1249 const google_breakpad::fpstate_t* const float_state_; // ditto
1250 #endif
1251 LinuxDumper* dumper_;
1252 MinidumpFileWriter minidump_writer_;
1253 off_t minidump_size_limit_;
1254 MDLocationDescriptor crashing_thread_context_;
1255 // Blocks of memory written to the dump. These are all currently
1256 // written while writing the thread list stream, but saved here
1257 // so a memory list stream can be written afterwards.
1258 wasteful_vector<MDMemoryDescriptor> memory_blocks_;
1259 // Additional information about some mappings provided by the caller.
1260 const MappingList& mapping_list_;
1261 // Additional memory regions to be included in the dump,
1262 // provided by the caller.
1263 const AppMemoryList& app_memory_list_;
1264 };
1265
1266
WriteMinidumpImpl(const char * minidump_path,int minidump_fd,off_t minidump_size_limit,pid_t crashing_process,const void * blob,size_t blob_size,const MappingList & mappings,const AppMemoryList & appmem)1267 bool WriteMinidumpImpl(const char* minidump_path,
1268 int minidump_fd,
1269 off_t minidump_size_limit,
1270 pid_t crashing_process,
1271 const void* blob, size_t blob_size,
1272 const MappingList& mappings,
1273 const AppMemoryList& appmem) {
1274 LinuxPtraceDumper dumper(crashing_process);
1275 const ExceptionHandler::CrashContext* context = NULL;
1276 if (blob) {
1277 if (blob_size != sizeof(ExceptionHandler::CrashContext))
1278 return false;
1279 context = reinterpret_cast<const ExceptionHandler::CrashContext*>(blob);
1280 dumper.set_crash_address(
1281 reinterpret_cast<uintptr_t>(context->siginfo.si_addr));
1282 dumper.set_crash_signal(context->siginfo.si_signo);
1283 dumper.set_crash_thread(context->tid);
1284 }
1285 MinidumpWriter writer(minidump_path, minidump_fd, context, mappings,
1286 appmem, &dumper);
1287 // Set desired limit for file size of minidump (-1 means no limit).
1288 writer.set_minidump_size_limit(minidump_size_limit);
1289 if (!writer.Init())
1290 return false;
1291 return writer.Dump();
1292 }
1293
1294 } // namespace
1295
1296 namespace google_breakpad {
1297
WriteMinidump(const char * minidump_path,pid_t crashing_process,const void * blob,size_t blob_size)1298 bool WriteMinidump(const char* minidump_path, pid_t crashing_process,
1299 const void* blob, size_t blob_size) {
1300 return WriteMinidumpImpl(minidump_path, -1, -1,
1301 crashing_process, blob, blob_size,
1302 MappingList(), AppMemoryList());
1303 }
1304
WriteMinidump(int minidump_fd,pid_t crashing_process,const void * blob,size_t blob_size)1305 bool WriteMinidump(int minidump_fd, pid_t crashing_process,
1306 const void* blob, size_t blob_size) {
1307 return WriteMinidumpImpl(NULL, minidump_fd, -1,
1308 crashing_process, blob, blob_size,
1309 MappingList(), AppMemoryList());
1310 }
1311
WriteMinidump(const char * minidump_path,pid_t process,pid_t process_blamed_thread)1312 bool WriteMinidump(const char* minidump_path, pid_t process,
1313 pid_t process_blamed_thread) {
1314 LinuxPtraceDumper dumper(process);
1315 // MinidumpWriter will set crash address
1316 dumper.set_crash_signal(MD_EXCEPTION_CODE_LIN_DUMP_REQUESTED);
1317 dumper.set_crash_thread(process_blamed_thread);
1318 MinidumpWriter writer(minidump_path, -1, NULL, MappingList(),
1319 AppMemoryList(), &dumper);
1320 if (!writer.Init())
1321 return false;
1322 return writer.Dump();
1323 }
1324
WriteMinidump(const char * minidump_path,pid_t crashing_process,const void * blob,size_t blob_size,const MappingList & mappings,const AppMemoryList & appmem)1325 bool WriteMinidump(const char* minidump_path, pid_t crashing_process,
1326 const void* blob, size_t blob_size,
1327 const MappingList& mappings,
1328 const AppMemoryList& appmem) {
1329 return WriteMinidumpImpl(minidump_path, -1, -1, crashing_process,
1330 blob, blob_size,
1331 mappings, appmem);
1332 }
1333
WriteMinidump(int minidump_fd,pid_t crashing_process,const void * blob,size_t blob_size,const MappingList & mappings,const AppMemoryList & appmem)1334 bool WriteMinidump(int minidump_fd, pid_t crashing_process,
1335 const void* blob, size_t blob_size,
1336 const MappingList& mappings,
1337 const AppMemoryList& appmem) {
1338 return WriteMinidumpImpl(NULL, minidump_fd, -1, crashing_process,
1339 blob, blob_size,
1340 mappings, appmem);
1341 }
1342
WriteMinidump(const char * minidump_path,off_t minidump_size_limit,pid_t crashing_process,const void * blob,size_t blob_size,const MappingList & mappings,const AppMemoryList & appmem)1343 bool WriteMinidump(const char* minidump_path, off_t minidump_size_limit,
1344 pid_t crashing_process,
1345 const void* blob, size_t blob_size,
1346 const MappingList& mappings,
1347 const AppMemoryList& appmem) {
1348 return WriteMinidumpImpl(minidump_path, -1, minidump_size_limit,
1349 crashing_process, blob, blob_size,
1350 mappings, appmem);
1351 }
1352
WriteMinidump(int minidump_fd,off_t minidump_size_limit,pid_t crashing_process,const void * blob,size_t blob_size,const MappingList & mappings,const AppMemoryList & appmem)1353 bool WriteMinidump(int minidump_fd, off_t minidump_size_limit,
1354 pid_t crashing_process,
1355 const void* blob, size_t blob_size,
1356 const MappingList& mappings,
1357 const AppMemoryList& appmem) {
1358 return WriteMinidumpImpl(NULL, minidump_fd, minidump_size_limit,
1359 crashing_process, blob, blob_size,
1360 mappings, appmem);
1361 }
1362
WriteMinidump(const char * filename,const MappingList & mappings,const AppMemoryList & appmem,LinuxDumper * dumper)1363 bool WriteMinidump(const char* filename,
1364 const MappingList& mappings,
1365 const AppMemoryList& appmem,
1366 LinuxDumper* dumper) {
1367 MinidumpWriter writer(filename, -1, NULL, mappings, appmem, dumper);
1368 if (!writer.Init())
1369 return false;
1370 return writer.Dump();
1371 }
1372
1373 } // namespace google_breakpad
1374