1 // Copyright (c) 2006, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 #include <algorithm>
31 #include <cstdio>
32
33 #include <mach/host_info.h>
34 #include <mach/machine.h>
35 #include <mach/vm_statistics.h>
36 #include <mach-o/dyld.h>
37 #include <mach-o/loader.h>
38 #include <sys/sysctl.h>
39 #include <sys/resource.h>
40
41 #include <CoreFoundation/CoreFoundation.h>
42
43 #include "client/mac/handler/minidump_generator.h"
44
45 #if defined(HAS_ARM_SUPPORT) || defined(HAS_ARM64_SUPPORT)
46 #include <mach/arm/thread_status.h>
47 #endif
48 #ifdef HAS_PPC_SUPPORT
49 #include <mach/ppc/thread_status.h>
50 #endif
51 #ifdef HAS_X86_SUPPORT
52 #include <mach/i386/thread_status.h>
53 #endif
54
55 #include "client/minidump_file_writer-inl.h"
56 #include "common/mac/file_id.h"
57 #include "common/mac/macho_id.h"
58 #include "common/mac/string_utilities.h"
59
60 using MacStringUtils::ConvertToString;
61 using MacStringUtils::IntegerValueAtIndex;
62
63 namespace google_breakpad {
64
65 #if defined(__LP64__) && __LP64__
66 #define LC_SEGMENT_ARCH LC_SEGMENT_64
67 #else
68 #define LC_SEGMENT_ARCH LC_SEGMENT
69 #endif
70
71 // constructor when generating from within the crashed process
MinidumpGenerator()72 MinidumpGenerator::MinidumpGenerator()
73 : writer_(),
74 exception_type_(0),
75 exception_code_(0),
76 exception_subcode_(0),
77 exception_thread_(0),
78 crashing_task_(mach_task_self()),
79 handler_thread_(mach_thread_self()),
80 cpu_type_(DynamicImages::GetNativeCPUType()),
81 task_context_(NULL),
82 dynamic_images_(NULL),
83 memory_blocks_(&allocator_) {
84 GatherSystemInformation();
85 }
86
87 // constructor when generating from a different process than the
88 // crashed process
MinidumpGenerator(mach_port_t crashing_task,mach_port_t handler_thread)89 MinidumpGenerator::MinidumpGenerator(mach_port_t crashing_task,
90 mach_port_t handler_thread)
91 : writer_(),
92 exception_type_(0),
93 exception_code_(0),
94 exception_subcode_(0),
95 exception_thread_(0),
96 crashing_task_(crashing_task),
97 handler_thread_(handler_thread),
98 cpu_type_(DynamicImages::GetNativeCPUType()),
99 task_context_(NULL),
100 dynamic_images_(NULL),
101 memory_blocks_(&allocator_) {
102 if (crashing_task != mach_task_self()) {
103 dynamic_images_ = new DynamicImages(crashing_task_);
104 cpu_type_ = dynamic_images_->GetCPUType();
105 } else {
106 dynamic_images_ = NULL;
107 cpu_type_ = DynamicImages::GetNativeCPUType();
108 }
109
110 GatherSystemInformation();
111 }
112
~MinidumpGenerator()113 MinidumpGenerator::~MinidumpGenerator() {
114 delete dynamic_images_;
115 }
116
117 char MinidumpGenerator::build_string_[16];
118 int MinidumpGenerator::os_major_version_ = 0;
119 int MinidumpGenerator::os_minor_version_ = 0;
120 int MinidumpGenerator::os_build_number_ = 0;
121
122 // static
GatherSystemInformation()123 void MinidumpGenerator::GatherSystemInformation() {
124 // If this is non-zero, then we've already gathered the information
125 if (os_major_version_)
126 return;
127
128 // This code extracts the version and build information from the OS
129 CFStringRef vers_path =
130 CFSTR("/System/Library/CoreServices/SystemVersion.plist");
131 CFURLRef sys_vers =
132 CFURLCreateWithFileSystemPath(NULL,
133 vers_path,
134 kCFURLPOSIXPathStyle,
135 false);
136 CFDataRef data;
137 SInt32 error;
138 CFURLCreateDataAndPropertiesFromResource(NULL, sys_vers, &data, NULL, NULL,
139 &error);
140
141 if (!data) {
142 CFRelease(sys_vers);
143 return;
144 }
145
146 CFDictionaryRef list = static_cast<CFDictionaryRef>
147 (CFPropertyListCreateFromXMLData(NULL, data, kCFPropertyListImmutable,
148 NULL));
149 if (!list) {
150 CFRelease(sys_vers);
151 CFRelease(data);
152 return;
153 }
154
155 CFStringRef build_version = static_cast<CFStringRef>
156 (CFDictionaryGetValue(list, CFSTR("ProductBuildVersion")));
157 CFStringRef product_version = static_cast<CFStringRef>
158 (CFDictionaryGetValue(list, CFSTR("ProductVersion")));
159 string build_str = ConvertToString(build_version);
160 string product_str = ConvertToString(product_version);
161
162 CFRelease(list);
163 CFRelease(sys_vers);
164 CFRelease(data);
165
166 strlcpy(build_string_, build_str.c_str(), sizeof(build_string_));
167
168 // Parse the string that looks like "10.4.8"
169 os_major_version_ = IntegerValueAtIndex(product_str, 0);
170 os_minor_version_ = IntegerValueAtIndex(product_str, 1);
171 os_build_number_ = IntegerValueAtIndex(product_str, 2);
172 }
173
SetTaskContext(breakpad_ucontext_t * task_context)174 void MinidumpGenerator::SetTaskContext(breakpad_ucontext_t *task_context) {
175 task_context_ = task_context;
176 }
177
UniqueNameInDirectory(const string & dir,string * unique_name)178 string MinidumpGenerator::UniqueNameInDirectory(const string &dir,
179 string *unique_name) {
180 CFUUIDRef uuid = CFUUIDCreate(NULL);
181 CFStringRef uuid_cfstr = CFUUIDCreateString(NULL, uuid);
182 CFRelease(uuid);
183 string file_name(ConvertToString(uuid_cfstr));
184 CFRelease(uuid_cfstr);
185 string path(dir);
186
187 // Ensure that the directory (if non-empty) has a trailing slash so that
188 // we can append the file name and have a valid pathname.
189 if (!dir.empty()) {
190 if (dir.at(dir.size() - 1) != '/')
191 path.append(1, '/');
192 }
193
194 path.append(file_name);
195 path.append(".dmp");
196
197 if (unique_name)
198 *unique_name = file_name;
199
200 return path;
201 }
202
Write(const char * path)203 bool MinidumpGenerator::Write(const char *path) {
204 WriteStreamFN writers[] = {
205 &MinidumpGenerator::WriteThreadListStream,
206 &MinidumpGenerator::WriteMemoryListStream,
207 &MinidumpGenerator::WriteSystemInfoStream,
208 &MinidumpGenerator::WriteModuleListStream,
209 &MinidumpGenerator::WriteMiscInfoStream,
210 &MinidumpGenerator::WriteBreakpadInfoStream,
211 // Exception stream needs to be the last entry in this array as it may
212 // be omitted in the case where the minidump is written without an
213 // exception.
214 &MinidumpGenerator::WriteExceptionStream,
215 };
216 bool result = false;
217
218 // If opening was successful, create the header, directory, and call each
219 // writer. The destructor for the TypedMDRVAs will cause the data to be
220 // flushed. The destructor for the MinidumpFileWriter will close the file.
221 if (writer_.Open(path)) {
222 TypedMDRVA<MDRawHeader> header(&writer_);
223 TypedMDRVA<MDRawDirectory> dir(&writer_);
224
225 if (!header.Allocate())
226 return false;
227
228 int writer_count = static_cast<int>(sizeof(writers) / sizeof(writers[0]));
229
230 // If we don't have exception information, don't write out the
231 // exception stream
232 if (!exception_thread_ && !exception_type_)
233 --writer_count;
234
235 // Add space for all writers
236 if (!dir.AllocateArray(writer_count))
237 return false;
238
239 MDRawHeader *header_ptr = header.get();
240 header_ptr->signature = MD_HEADER_SIGNATURE;
241 header_ptr->version = MD_HEADER_VERSION;
242 time(reinterpret_cast<time_t *>(&(header_ptr->time_date_stamp)));
243 header_ptr->stream_count = writer_count;
244 header_ptr->stream_directory_rva = dir.position();
245
246 MDRawDirectory local_dir;
247 result = true;
248 for (int i = 0; (result) && (i < writer_count); ++i) {
249 result = (this->*writers[i])(&local_dir);
250
251 if (result)
252 dir.CopyIndex(i, &local_dir);
253 }
254 }
255 return result;
256 }
257
CalculateStackSize(mach_vm_address_t start_addr)258 size_t MinidumpGenerator::CalculateStackSize(mach_vm_address_t start_addr) {
259 mach_vm_address_t stack_region_base = start_addr;
260 mach_vm_size_t stack_region_size;
261 natural_t nesting_level = 0;
262 vm_region_submap_info_64 submap_info;
263 mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT_64;
264
265 vm_region_recurse_info_t region_info;
266 region_info = reinterpret_cast<vm_region_recurse_info_t>(&submap_info);
267
268 if (start_addr == 0) {
269 return 0;
270 }
271
272 kern_return_t result =
273 mach_vm_region_recurse(crashing_task_, &stack_region_base,
274 &stack_region_size, &nesting_level,
275 region_info, &info_count);
276
277 if (result != KERN_SUCCESS || start_addr < stack_region_base) {
278 // Failure or stack corruption, since mach_vm_region had to go
279 // higher in the process address space to find a valid region.
280 return 0;
281 }
282
283 unsigned int tag = submap_info.user_tag;
284
285 // If the user tag is VM_MEMORY_STACK, look for more readable regions with
286 // the same tag placed immediately above the computed stack region. Under
287 // some circumstances, the stack for thread 0 winds up broken up into
288 // multiple distinct abutting regions. This can happen for several reasons,
289 // including user code that calls setrlimit(RLIMIT_STACK, ...) or changes
290 // the access on stack pages by calling mprotect.
291 if (tag == VM_MEMORY_STACK) {
292 while (true) {
293 mach_vm_address_t next_region_base = stack_region_base +
294 stack_region_size;
295 mach_vm_address_t proposed_next_region_base = next_region_base;
296 mach_vm_size_t next_region_size;
297 nesting_level = 0;
298 info_count = VM_REGION_SUBMAP_INFO_COUNT_64;
299 result = mach_vm_region_recurse(crashing_task_, &next_region_base,
300 &next_region_size, &nesting_level,
301 region_info, &info_count);
302 if (result != KERN_SUCCESS ||
303 next_region_base != proposed_next_region_base ||
304 submap_info.user_tag != tag ||
305 (submap_info.protection & VM_PROT_READ) == 0) {
306 break;
307 }
308
309 stack_region_size += next_region_size;
310 }
311 }
312
313 return stack_region_base + stack_region_size - start_addr;
314 }
315
WriteStackFromStartAddress(mach_vm_address_t start_addr,MDMemoryDescriptor * stack_location)316 bool MinidumpGenerator::WriteStackFromStartAddress(
317 mach_vm_address_t start_addr,
318 MDMemoryDescriptor *stack_location) {
319 UntypedMDRVA memory(&writer_);
320
321 bool result = false;
322 size_t size = CalculateStackSize(start_addr);
323
324 if (size == 0) {
325 // In some situations the stack address for the thread can come back 0.
326 // In these cases we skip over the threads in question and stuff the
327 // stack with a clearly borked value.
328 start_addr = 0xDEADBEEF;
329 size = 16;
330 if (!memory.Allocate(size))
331 return false;
332
333 unsigned long long dummy_stack[2]; // Fill dummy stack with 16 bytes of
334 // junk.
335 dummy_stack[0] = 0xDEADBEEF;
336 dummy_stack[1] = 0xDEADBEEF;
337
338 result = memory.Copy(dummy_stack, size);
339 } else {
340
341 if (!memory.Allocate(size))
342 return false;
343
344 if (dynamic_images_) {
345 vector<uint8_t> stack_memory;
346 if (ReadTaskMemory(crashing_task_,
347 start_addr,
348 size,
349 stack_memory) != KERN_SUCCESS) {
350 return false;
351 }
352
353 result = memory.Copy(&stack_memory[0], size);
354 } else {
355 result = memory.Copy(reinterpret_cast<const void *>(start_addr), size);
356 }
357 }
358
359 stack_location->start_of_memory_range = start_addr;
360 stack_location->memory = memory.location();
361
362 return result;
363 }
364
WriteStack(breakpad_thread_state_data_t state,MDMemoryDescriptor * stack_location)365 bool MinidumpGenerator::WriteStack(breakpad_thread_state_data_t state,
366 MDMemoryDescriptor *stack_location) {
367 switch (cpu_type_) {
368 #ifdef HAS_ARM_SUPPORT
369 case CPU_TYPE_ARM:
370 return WriteStackARM(state, stack_location);
371 #endif
372 #ifdef HAS_ARM64_SUPPORT
373 case CPU_TYPE_ARM64:
374 return WriteStackARM64(state, stack_location);
375 #endif
376 #ifdef HAS_PPC_SUPPORT
377 case CPU_TYPE_POWERPC:
378 return WriteStackPPC(state, stack_location);
379 case CPU_TYPE_POWERPC64:
380 return WriteStackPPC64(state, stack_location);
381 #endif
382 #ifdef HAS_X86_SUPPORT
383 case CPU_TYPE_I386:
384 return WriteStackX86(state, stack_location);
385 case CPU_TYPE_X86_64:
386 return WriteStackX86_64(state, stack_location);
387 #endif
388 default:
389 return false;
390 }
391 }
392
WriteContext(breakpad_thread_state_data_t state,MDLocationDescriptor * register_location)393 bool MinidumpGenerator::WriteContext(breakpad_thread_state_data_t state,
394 MDLocationDescriptor *register_location) {
395 switch (cpu_type_) {
396 #ifdef HAS_ARM_SUPPORT
397 case CPU_TYPE_ARM:
398 return WriteContextARM(state, register_location);
399 #endif
400 #ifdef HAS_ARM64_SUPPORT
401 case CPU_TYPE_ARM64:
402 return WriteContextARM64(state, register_location);
403 #endif
404 #ifdef HAS_PPC_SUPPORT
405 case CPU_TYPE_POWERPC:
406 return WriteContextPPC(state, register_location);
407 case CPU_TYPE_POWERPC64:
408 return WriteContextPPC64(state, register_location);
409 #endif
410 #ifdef HAS_X86_SUPPORT
411 case CPU_TYPE_I386:
412 return WriteContextX86(state, register_location);
413 case CPU_TYPE_X86_64:
414 return WriteContextX86_64(state, register_location);
415 #endif
416 default:
417 return false;
418 }
419 }
420
CurrentPCForStack(breakpad_thread_state_data_t state)421 uint64_t MinidumpGenerator::CurrentPCForStack(
422 breakpad_thread_state_data_t state) {
423 switch (cpu_type_) {
424 #ifdef HAS_ARM_SUPPORT
425 case CPU_TYPE_ARM:
426 return CurrentPCForStackARM(state);
427 #endif
428 #ifdef HAS_ARM64_SUPPORT
429 case CPU_TYPE_ARM64:
430 return CurrentPCForStackARM64(state);
431 #endif
432 #ifdef HAS_PPC_SUPPORT
433 case CPU_TYPE_POWERPC:
434 return CurrentPCForStackPPC(state);
435 case CPU_TYPE_POWERPC64:
436 return CurrentPCForStackPPC64(state);
437 #endif
438 #ifdef HAS_X86_SUPPORT
439 case CPU_TYPE_I386:
440 return CurrentPCForStackX86(state);
441 case CPU_TYPE_X86_64:
442 return CurrentPCForStackX86_64(state);
443 #endif
444 default:
445 assert(0 && "Unknown CPU type!");
446 return 0;
447 }
448 }
449
450 #ifdef HAS_ARM_SUPPORT
WriteStackARM(breakpad_thread_state_data_t state,MDMemoryDescriptor * stack_location)451 bool MinidumpGenerator::WriteStackARM(breakpad_thread_state_data_t state,
452 MDMemoryDescriptor *stack_location) {
453 arm_thread_state_t *machine_state =
454 reinterpret_cast<arm_thread_state_t *>(state);
455 mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, sp);
456 return WriteStackFromStartAddress(start_addr, stack_location);
457 }
458
459 uint64_t
CurrentPCForStackARM(breakpad_thread_state_data_t state)460 MinidumpGenerator::CurrentPCForStackARM(breakpad_thread_state_data_t state) {
461 arm_thread_state_t *machine_state =
462 reinterpret_cast<arm_thread_state_t *>(state);
463
464 return REGISTER_FROM_THREADSTATE(machine_state, pc);
465 }
466
WriteContextARM(breakpad_thread_state_data_t state,MDLocationDescriptor * register_location)467 bool MinidumpGenerator::WriteContextARM(breakpad_thread_state_data_t state,
468 MDLocationDescriptor *register_location)
469 {
470 TypedMDRVA<MDRawContextARM> context(&writer_);
471 arm_thread_state_t *machine_state =
472 reinterpret_cast<arm_thread_state_t *>(state);
473
474 if (!context.Allocate())
475 return false;
476
477 *register_location = context.location();
478 MDRawContextARM *context_ptr = context.get();
479 context_ptr->context_flags = MD_CONTEXT_ARM_FULL;
480
481 #define AddGPR(a) context_ptr->iregs[a] = REGISTER_FROM_THREADSTATE(machine_state, r[a])
482
483 context_ptr->iregs[13] = REGISTER_FROM_THREADSTATE(machine_state, sp);
484 context_ptr->iregs[14] = REGISTER_FROM_THREADSTATE(machine_state, lr);
485 context_ptr->iregs[15] = REGISTER_FROM_THREADSTATE(machine_state, pc);
486 context_ptr->cpsr = REGISTER_FROM_THREADSTATE(machine_state, cpsr);
487
488 AddGPR(0);
489 AddGPR(1);
490 AddGPR(2);
491 AddGPR(3);
492 AddGPR(4);
493 AddGPR(5);
494 AddGPR(6);
495 AddGPR(7);
496 AddGPR(8);
497 AddGPR(9);
498 AddGPR(10);
499 AddGPR(11);
500 AddGPR(12);
501 #undef AddGPR
502
503 return true;
504 }
505 #endif
506
507 #ifdef HAS_ARM64_SUPPORT
WriteStackARM64(breakpad_thread_state_data_t state,MDMemoryDescriptor * stack_location)508 bool MinidumpGenerator::WriteStackARM64(breakpad_thread_state_data_t state,
509 MDMemoryDescriptor *stack_location) {
510 arm_thread_state64_t *machine_state =
511 reinterpret_cast<arm_thread_state64_t *>(state);
512 mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, sp);
513 return WriteStackFromStartAddress(start_addr, stack_location);
514 }
515
516 uint64_t
CurrentPCForStackARM64(breakpad_thread_state_data_t state)517 MinidumpGenerator::CurrentPCForStackARM64(breakpad_thread_state_data_t state) {
518 arm_thread_state64_t *machine_state =
519 reinterpret_cast<arm_thread_state64_t *>(state);
520
521 return REGISTER_FROM_THREADSTATE(machine_state, pc);
522 }
523
524 bool
WriteContextARM64(breakpad_thread_state_data_t state,MDLocationDescriptor * register_location)525 MinidumpGenerator::WriteContextARM64(breakpad_thread_state_data_t state,
526 MDLocationDescriptor *register_location)
527 {
528 TypedMDRVA<MDRawContextARM64> context(&writer_);
529 arm_thread_state64_t *machine_state =
530 reinterpret_cast<arm_thread_state64_t *>(state);
531
532 if (!context.Allocate())
533 return false;
534
535 *register_location = context.location();
536 MDRawContextARM64 *context_ptr = context.get();
537 context_ptr->context_flags = MD_CONTEXT_ARM64_FULL;
538
539 #define AddGPR(a) context_ptr->iregs[a] = \
540 REGISTER_FROM_THREADSTATE(machine_state, x[a])
541
542 context_ptr->iregs[29] = REGISTER_FROM_THREADSTATE(machine_state, fp);
543 context_ptr->iregs[30] = REGISTER_FROM_THREADSTATE(machine_state, lr);
544 context_ptr->iregs[31] = REGISTER_FROM_THREADSTATE(machine_state, sp);
545 context_ptr->iregs[32] = REGISTER_FROM_THREADSTATE(machine_state, pc);
546 context_ptr->cpsr = REGISTER_FROM_THREADSTATE(machine_state, cpsr);
547
548 AddGPR(0);
549 AddGPR(1);
550 AddGPR(2);
551 AddGPR(3);
552 AddGPR(4);
553 AddGPR(5);
554 AddGPR(6);
555 AddGPR(7);
556 AddGPR(8);
557 AddGPR(9);
558 AddGPR(10);
559 AddGPR(11);
560 AddGPR(12);
561 AddGPR(13);
562 AddGPR(14);
563 AddGPR(15);
564 AddGPR(16);
565 AddGPR(17);
566 AddGPR(18);
567 AddGPR(19);
568 AddGPR(20);
569 AddGPR(21);
570 AddGPR(22);
571 AddGPR(23);
572 AddGPR(24);
573 AddGPR(25);
574 AddGPR(26);
575 AddGPR(27);
576 AddGPR(28);
577 #undef AddGPR
578
579 return true;
580 }
581 #endif
582
583 #ifdef HAS_PCC_SUPPORT
WriteStackPPC(breakpad_thread_state_data_t state,MDMemoryDescriptor * stack_location)584 bool MinidumpGenerator::WriteStackPPC(breakpad_thread_state_data_t state,
585 MDMemoryDescriptor *stack_location) {
586 ppc_thread_state_t *machine_state =
587 reinterpret_cast<ppc_thread_state_t *>(state);
588 mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, r1);
589 return WriteStackFromStartAddress(start_addr, stack_location);
590 }
591
WriteStackPPC64(breakpad_thread_state_data_t state,MDMemoryDescriptor * stack_location)592 bool MinidumpGenerator::WriteStackPPC64(breakpad_thread_state_data_t state,
593 MDMemoryDescriptor *stack_location) {
594 ppc_thread_state64_t *machine_state =
595 reinterpret_cast<ppc_thread_state64_t *>(state);
596 mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, r1);
597 return WriteStackFromStartAddress(start_addr, stack_location);
598 }
599
600 uint64_t
CurrentPCForStackPPC(breakpad_thread_state_data_t state)601 MinidumpGenerator::CurrentPCForStackPPC(breakpad_thread_state_data_t state) {
602 ppc_thread_state_t *machine_state =
603 reinterpret_cast<ppc_thread_state_t *>(state);
604
605 return REGISTER_FROM_THREADSTATE(machine_state, srr0);
606 }
607
608 uint64_t
CurrentPCForStackPPC64(breakpad_thread_state_data_t state)609 MinidumpGenerator::CurrentPCForStackPPC64(breakpad_thread_state_data_t state) {
610 ppc_thread_state64_t *machine_state =
611 reinterpret_cast<ppc_thread_state64_t *>(state);
612
613 return REGISTER_FROM_THREADSTATE(machine_state, srr0);
614 }
615
WriteContextPPC(breakpad_thread_state_data_t state,MDLocationDescriptor * register_location)616 bool MinidumpGenerator::WriteContextPPC(breakpad_thread_state_data_t state,
617 MDLocationDescriptor *register_location)
618 {
619 TypedMDRVA<MDRawContextPPC> context(&writer_);
620 ppc_thread_state_t *machine_state =
621 reinterpret_cast<ppc_thread_state_t *>(state);
622
623 if (!context.Allocate())
624 return false;
625
626 *register_location = context.location();
627 MDRawContextPPC *context_ptr = context.get();
628 context_ptr->context_flags = MD_CONTEXT_PPC_BASE;
629
630 #define AddReg(a) context_ptr->a = static_cast<__typeof__(context_ptr->a)>( \
631 REGISTER_FROM_THREADSTATE(machine_state, a))
632 #define AddGPR(a) context_ptr->gpr[a] = \
633 static_cast<__typeof__(context_ptr->a)>( \
634 REGISTER_FROM_THREADSTATE(machine_state, r ## a)
635
636 AddReg(srr0);
637 AddReg(cr);
638 AddReg(xer);
639 AddReg(ctr);
640 AddReg(lr);
641 AddReg(vrsave);
642
643 AddGPR(0);
644 AddGPR(1);
645 AddGPR(2);
646 AddGPR(3);
647 AddGPR(4);
648 AddGPR(5);
649 AddGPR(6);
650 AddGPR(7);
651 AddGPR(8);
652 AddGPR(9);
653 AddGPR(10);
654 AddGPR(11);
655 AddGPR(12);
656 AddGPR(13);
657 AddGPR(14);
658 AddGPR(15);
659 AddGPR(16);
660 AddGPR(17);
661 AddGPR(18);
662 AddGPR(19);
663 AddGPR(20);
664 AddGPR(21);
665 AddGPR(22);
666 AddGPR(23);
667 AddGPR(24);
668 AddGPR(25);
669 AddGPR(26);
670 AddGPR(27);
671 AddGPR(28);
672 AddGPR(29);
673 AddGPR(30);
674 AddGPR(31);
675 AddReg(mq);
676 #undef AddReg
677 #undef AddGPR
678
679 return true;
680 }
681
WriteContextPPC64(breakpad_thread_state_data_t state,MDLocationDescriptor * register_location)682 bool MinidumpGenerator::WriteContextPPC64(
683 breakpad_thread_state_data_t state,
684 MDLocationDescriptor *register_location) {
685 TypedMDRVA<MDRawContextPPC64> context(&writer_);
686 ppc_thread_state64_t *machine_state =
687 reinterpret_cast<ppc_thread_state64_t *>(state);
688
689 if (!context.Allocate())
690 return false;
691
692 *register_location = context.location();
693 MDRawContextPPC64 *context_ptr = context.get();
694 context_ptr->context_flags = MD_CONTEXT_PPC_BASE;
695
696 #define AddReg(a) context_ptr->a = static_cast<__typeof__(context_ptr->a)>( \
697 REGISTER_FROM_THREADSTATE(machine_state, a))
698 #define AddGPR(a) context_ptr->gpr[a] = \
699 static_cast<__typeof__(context_ptr->a)>( \
700 REGISTER_FROM_THREADSTATE(machine_state, r ## a)
701
702 AddReg(srr0);
703 AddReg(cr);
704 AddReg(xer);
705 AddReg(ctr);
706 AddReg(lr);
707 AddReg(vrsave);
708
709 AddGPR(0);
710 AddGPR(1);
711 AddGPR(2);
712 AddGPR(3);
713 AddGPR(4);
714 AddGPR(5);
715 AddGPR(6);
716 AddGPR(7);
717 AddGPR(8);
718 AddGPR(9);
719 AddGPR(10);
720 AddGPR(11);
721 AddGPR(12);
722 AddGPR(13);
723 AddGPR(14);
724 AddGPR(15);
725 AddGPR(16);
726 AddGPR(17);
727 AddGPR(18);
728 AddGPR(19);
729 AddGPR(20);
730 AddGPR(21);
731 AddGPR(22);
732 AddGPR(23);
733 AddGPR(24);
734 AddGPR(25);
735 AddGPR(26);
736 AddGPR(27);
737 AddGPR(28);
738 AddGPR(29);
739 AddGPR(30);
740 AddGPR(31);
741 #undef AddReg
742 #undef AddGPR
743
744 return true;
745 }
746
747 #endif
748
749 #ifdef HAS_X86_SUPPORT
WriteStackX86(breakpad_thread_state_data_t state,MDMemoryDescriptor * stack_location)750 bool MinidumpGenerator::WriteStackX86(breakpad_thread_state_data_t state,
751 MDMemoryDescriptor *stack_location) {
752 i386_thread_state_t *machine_state =
753 reinterpret_cast<i386_thread_state_t *>(state);
754
755 mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, esp);
756 return WriteStackFromStartAddress(start_addr, stack_location);
757 }
758
WriteStackX86_64(breakpad_thread_state_data_t state,MDMemoryDescriptor * stack_location)759 bool MinidumpGenerator::WriteStackX86_64(breakpad_thread_state_data_t state,
760 MDMemoryDescriptor *stack_location) {
761 x86_thread_state64_t *machine_state =
762 reinterpret_cast<x86_thread_state64_t *>(state);
763
764 mach_vm_address_t start_addr = static_cast<mach_vm_address_t>(
765 REGISTER_FROM_THREADSTATE(machine_state, rsp));
766 return WriteStackFromStartAddress(start_addr, stack_location);
767 }
768
769 uint64_t
CurrentPCForStackX86(breakpad_thread_state_data_t state)770 MinidumpGenerator::CurrentPCForStackX86(breakpad_thread_state_data_t state) {
771 i386_thread_state_t *machine_state =
772 reinterpret_cast<i386_thread_state_t *>(state);
773
774 return REGISTER_FROM_THREADSTATE(machine_state, eip);
775 }
776
777 uint64_t
CurrentPCForStackX86_64(breakpad_thread_state_data_t state)778 MinidumpGenerator::CurrentPCForStackX86_64(breakpad_thread_state_data_t state) {
779 x86_thread_state64_t *machine_state =
780 reinterpret_cast<x86_thread_state64_t *>(state);
781
782 return REGISTER_FROM_THREADSTATE(machine_state, rip);
783 }
784
WriteContextX86(breakpad_thread_state_data_t state,MDLocationDescriptor * register_location)785 bool MinidumpGenerator::WriteContextX86(breakpad_thread_state_data_t state,
786 MDLocationDescriptor *register_location)
787 {
788 TypedMDRVA<MDRawContextX86> context(&writer_);
789 i386_thread_state_t *machine_state =
790 reinterpret_cast<i386_thread_state_t *>(state);
791
792 if (!context.Allocate())
793 return false;
794
795 *register_location = context.location();
796 MDRawContextX86 *context_ptr = context.get();
797
798 #define AddReg(a) context_ptr->a = static_cast<__typeof__(context_ptr->a)>( \
799 REGISTER_FROM_THREADSTATE(machine_state, a))
800
801 context_ptr->context_flags = MD_CONTEXT_X86;
802 AddReg(eax);
803 AddReg(ebx);
804 AddReg(ecx);
805 AddReg(edx);
806 AddReg(esi);
807 AddReg(edi);
808 AddReg(ebp);
809 AddReg(esp);
810
811 AddReg(cs);
812 AddReg(ds);
813 AddReg(ss);
814 AddReg(es);
815 AddReg(fs);
816 AddReg(gs);
817 AddReg(eflags);
818
819 AddReg(eip);
820 #undef AddReg
821
822 return true;
823 }
824
WriteContextX86_64(breakpad_thread_state_data_t state,MDLocationDescriptor * register_location)825 bool MinidumpGenerator::WriteContextX86_64(
826 breakpad_thread_state_data_t state,
827 MDLocationDescriptor *register_location) {
828 TypedMDRVA<MDRawContextAMD64> context(&writer_);
829 x86_thread_state64_t *machine_state =
830 reinterpret_cast<x86_thread_state64_t *>(state);
831
832 if (!context.Allocate())
833 return false;
834
835 *register_location = context.location();
836 MDRawContextAMD64 *context_ptr = context.get();
837
838 #define AddReg(a) context_ptr->a = static_cast<__typeof__(context_ptr->a)>( \
839 REGISTER_FROM_THREADSTATE(machine_state, a))
840
841 context_ptr->context_flags = MD_CONTEXT_AMD64;
842 AddReg(rax);
843 AddReg(rbx);
844 AddReg(rcx);
845 AddReg(rdx);
846 AddReg(rdi);
847 AddReg(rsi);
848 AddReg(rbp);
849 AddReg(rsp);
850 AddReg(r8);
851 AddReg(r9);
852 AddReg(r10);
853 AddReg(r11);
854 AddReg(r12);
855 AddReg(r13);
856 AddReg(r14);
857 AddReg(r15);
858 AddReg(rip);
859 // according to AMD's software developer guide, bits above 18 are
860 // not used in the flags register. Since the minidump format
861 // specifies 32 bits for the flags register, we can truncate safely
862 // with no loss.
863 context_ptr->eflags = static_cast<uint32_t>(REGISTER_FROM_THREADSTATE(machine_state, rflags));
864 AddReg(cs);
865 AddReg(fs);
866 AddReg(gs);
867 #undef AddReg
868
869 return true;
870 }
871 #endif
872
GetThreadState(thread_act_t target_thread,thread_state_t state,mach_msg_type_number_t * count)873 bool MinidumpGenerator::GetThreadState(thread_act_t target_thread,
874 thread_state_t state,
875 mach_msg_type_number_t *count) {
876 if (task_context_ && target_thread == mach_thread_self()) {
877 switch (cpu_type_) {
878 #ifdef HAS_ARM_SUPPORT
879 case CPU_TYPE_ARM:
880 size_t final_size =
881 std::min(static_cast<size_t>(*count), sizeof(arm_thread_state_t));
882 memcpy(state, &task_context_->breakpad_uc_mcontext->__ss, final_size);
883 *count = static_cast<mach_msg_type_number_t>(final_size);
884 return true;
885 #endif
886 #ifdef HAS_ARM64_SUPPORT
887 case CPU_TYPE_ARM64: {
888 size_t final_size =
889 std::min(static_cast<size_t>(*count), sizeof(arm_thread_state64_t));
890 memcpy(state, &task_context_->breakpad_uc_mcontext->__ss, final_size);
891 *count = static_cast<mach_msg_type_number_t>(final_size);
892 return true;
893 }
894 #endif
895 #ifdef HAS_X86_SUPPORT
896 case CPU_TYPE_I386:
897 case CPU_TYPE_X86_64: {
898 size_t state_size = cpu_type_ == CPU_TYPE_I386 ?
899 sizeof(i386_thread_state_t) : sizeof(x86_thread_state64_t);
900 size_t final_size =
901 std::min(static_cast<size_t>(*count), state_size);
902 memcpy(state, &task_context_->breakpad_uc_mcontext->__ss, final_size);
903 *count = static_cast<mach_msg_type_number_t>(final_size);
904 return true;
905 }
906 #endif
907 }
908 }
909
910 thread_state_flavor_t flavor;
911 switch (cpu_type_) {
912 #ifdef HAS_ARM_SUPPORT
913 case CPU_TYPE_ARM:
914 flavor = ARM_THREAD_STATE;
915 break;
916 #endif
917 #ifdef HAS_ARM64_SUPPORT
918 case CPU_TYPE_ARM64:
919 flavor = ARM_THREAD_STATE64;
920 break;
921 #endif
922 #ifdef HAS_PPC_SUPPORT
923 case CPU_TYPE_POWERPC:
924 flavor = PPC_THREAD_STATE;
925 break;
926 case CPU_TYPE_POWERPC64:
927 flavor = PPC_THREAD_STATE64;
928 break;
929 #endif
930 #ifdef HAS_X86_SUPPORT
931 case CPU_TYPE_I386:
932 flavor = i386_THREAD_STATE;
933 break;
934 case CPU_TYPE_X86_64:
935 flavor = x86_THREAD_STATE64;
936 break;
937 #endif
938 default:
939 return false;
940 }
941 return thread_get_state(target_thread, flavor,
942 state, count) == KERN_SUCCESS;
943 }
944
WriteThreadStream(mach_port_t thread_id,MDRawThread * thread)945 bool MinidumpGenerator::WriteThreadStream(mach_port_t thread_id,
946 MDRawThread *thread) {
947 breakpad_thread_state_data_t state;
948 mach_msg_type_number_t state_count
949 = static_cast<mach_msg_type_number_t>(sizeof(state));
950
951 if (GetThreadState(thread_id, state, &state_count)) {
952 if (!WriteStack(state, &thread->stack))
953 return false;
954
955 memory_blocks_.push_back(thread->stack);
956
957 if (!WriteContext(state, &thread->thread_context))
958 return false;
959
960 thread->thread_id = thread_id;
961 } else {
962 return false;
963 }
964
965 return true;
966 }
967
WriteThreadListStream(MDRawDirectory * thread_list_stream)968 bool MinidumpGenerator::WriteThreadListStream(
969 MDRawDirectory *thread_list_stream) {
970 TypedMDRVA<MDRawThreadList> list(&writer_);
971 thread_act_port_array_t threads_for_task;
972 mach_msg_type_number_t thread_count;
973 int non_generator_thread_count;
974
975 if (task_threads(crashing_task_, &threads_for_task, &thread_count))
976 return false;
977
978 // Don't include the generator thread
979 if (handler_thread_ != MACH_PORT_NULL)
980 non_generator_thread_count = thread_count - 1;
981 else
982 non_generator_thread_count = thread_count;
983 if (!list.AllocateObjectAndArray(non_generator_thread_count,
984 sizeof(MDRawThread)))
985 return false;
986
987 thread_list_stream->stream_type = MD_THREAD_LIST_STREAM;
988 thread_list_stream->location = list.location();
989
990 list.get()->number_of_threads = non_generator_thread_count;
991
992 MDRawThread thread;
993 int thread_idx = 0;
994
995 for (unsigned int i = 0; i < thread_count; ++i) {
996 memset(&thread, 0, sizeof(MDRawThread));
997
998 if (threads_for_task[i] != handler_thread_) {
999 if (!WriteThreadStream(threads_for_task[i], &thread))
1000 return false;
1001
1002 list.CopyIndexAfterObject(thread_idx++, &thread, sizeof(MDRawThread));
1003 }
1004 }
1005
1006 return true;
1007 }
1008
WriteMemoryListStream(MDRawDirectory * memory_list_stream)1009 bool MinidumpGenerator::WriteMemoryListStream(
1010 MDRawDirectory *memory_list_stream) {
1011 TypedMDRVA<MDRawMemoryList> list(&writer_);
1012
1013 // If the dump has an exception, include some memory around the
1014 // instruction pointer.
1015 const size_t kIPMemorySize = 256; // bytes
1016 bool have_ip_memory = false;
1017 MDMemoryDescriptor ip_memory_d;
1018 if (exception_thread_ && exception_type_) {
1019 breakpad_thread_state_data_t state;
1020 mach_msg_type_number_t stateCount
1021 = static_cast<mach_msg_type_number_t>(sizeof(state));
1022
1023 if (GetThreadState(exception_thread_, state, &stateCount)) {
1024 uint64_t ip = CurrentPCForStack(state);
1025 // Bound it to the upper and lower bounds of the region
1026 // it's contained within. If it's not in a known memory region,
1027 // don't bother trying to write it.
1028 mach_vm_address_t addr = static_cast<vm_address_t>(ip);
1029 mach_vm_size_t size;
1030 natural_t nesting_level = 0;
1031 vm_region_submap_info_64 info;
1032 mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT_64;
1033 vm_region_recurse_info_t recurse_info;
1034 recurse_info = reinterpret_cast<vm_region_recurse_info_t>(&info);
1035
1036 kern_return_t ret =
1037 mach_vm_region_recurse(crashing_task_,
1038 &addr,
1039 &size,
1040 &nesting_level,
1041 recurse_info,
1042 &info_count);
1043 if (ret == KERN_SUCCESS && ip >= addr && ip < (addr + size)) {
1044 // Try to get 128 bytes before and after the IP, but
1045 // settle for whatever's available.
1046 ip_memory_d.start_of_memory_range =
1047 std::max(uintptr_t(addr),
1048 uintptr_t(ip - (kIPMemorySize / 2)));
1049 uintptr_t end_of_range =
1050 std::min(uintptr_t(ip + (kIPMemorySize / 2)),
1051 uintptr_t(addr + size));
1052 uintptr_t range_diff = end_of_range -
1053 static_cast<uintptr_t>(ip_memory_d.start_of_memory_range);
1054 ip_memory_d.memory.data_size = static_cast<uint32_t>(range_diff);
1055 have_ip_memory = true;
1056 // This needs to get appended to the list even though
1057 // the memory bytes aren't filled in yet so the entire
1058 // list can be written first. The memory bytes will get filled
1059 // in after the memory list is written.
1060 memory_blocks_.push_back(ip_memory_d);
1061 }
1062 }
1063 }
1064
1065 // Now fill in the memory list and write it.
1066 size_t memory_count = memory_blocks_.size();
1067 if (!list.AllocateObjectAndArray(memory_count,
1068 sizeof(MDMemoryDescriptor)))
1069 return false;
1070
1071 memory_list_stream->stream_type = MD_MEMORY_LIST_STREAM;
1072 memory_list_stream->location = list.location();
1073
1074 list.get()->number_of_memory_ranges = static_cast<uint32_t>(memory_count);
1075
1076 unsigned int i;
1077 for (i = 0; i < memory_count; ++i) {
1078 list.CopyIndexAfterObject(i, &memory_blocks_[i],
1079 sizeof(MDMemoryDescriptor));
1080 }
1081
1082 if (have_ip_memory) {
1083 // Now read the memory around the instruction pointer.
1084 UntypedMDRVA ip_memory(&writer_);
1085 if (!ip_memory.Allocate(ip_memory_d.memory.data_size))
1086 return false;
1087
1088 if (dynamic_images_) {
1089 // Out-of-process.
1090 vector<uint8_t> memory;
1091 if (ReadTaskMemory(crashing_task_,
1092 ip_memory_d.start_of_memory_range,
1093 ip_memory_d.memory.data_size,
1094 memory) != KERN_SUCCESS) {
1095 return false;
1096 }
1097
1098 ip_memory.Copy(&memory[0], ip_memory_d.memory.data_size);
1099 } else {
1100 // In-process, just copy from local memory.
1101 ip_memory.Copy(
1102 reinterpret_cast<const void *>(ip_memory_d.start_of_memory_range),
1103 ip_memory_d.memory.data_size);
1104 }
1105
1106 ip_memory_d.memory = ip_memory.location();
1107 // Write this again now that the data location is filled in.
1108 list.CopyIndexAfterObject(i - 1, &ip_memory_d,
1109 sizeof(MDMemoryDescriptor));
1110 }
1111
1112 return true;
1113 }
1114
1115 bool
WriteExceptionStream(MDRawDirectory * exception_stream)1116 MinidumpGenerator::WriteExceptionStream(MDRawDirectory *exception_stream) {
1117 TypedMDRVA<MDRawExceptionStream> exception(&writer_);
1118
1119 if (!exception.Allocate())
1120 return false;
1121
1122 exception_stream->stream_type = MD_EXCEPTION_STREAM;
1123 exception_stream->location = exception.location();
1124 MDRawExceptionStream *exception_ptr = exception.get();
1125 exception_ptr->thread_id = exception_thread_;
1126
1127 // This naming is confusing, but it is the proper translation from
1128 // mach naming to minidump naming.
1129 exception_ptr->exception_record.exception_code = exception_type_;
1130 exception_ptr->exception_record.exception_flags = exception_code_;
1131
1132 breakpad_thread_state_data_t state;
1133 mach_msg_type_number_t state_count
1134 = static_cast<mach_msg_type_number_t>(sizeof(state));
1135
1136 if (!GetThreadState(exception_thread_, state, &state_count))
1137 return false;
1138
1139 if (!WriteContext(state, &exception_ptr->thread_context))
1140 return false;
1141
1142 if (exception_type_ == EXC_BAD_ACCESS)
1143 exception_ptr->exception_record.exception_address = exception_subcode_;
1144 else
1145 exception_ptr->exception_record.exception_address = CurrentPCForStack(state);
1146
1147 return true;
1148 }
1149
WriteSystemInfoStream(MDRawDirectory * system_info_stream)1150 bool MinidumpGenerator::WriteSystemInfoStream(
1151 MDRawDirectory *system_info_stream) {
1152 TypedMDRVA<MDRawSystemInfo> info(&writer_);
1153
1154 if (!info.Allocate())
1155 return false;
1156
1157 system_info_stream->stream_type = MD_SYSTEM_INFO_STREAM;
1158 system_info_stream->location = info.location();
1159
1160 // CPU Information
1161 uint32_t number_of_processors;
1162 size_t len = sizeof(number_of_processors);
1163 sysctlbyname("hw.ncpu", &number_of_processors, &len, NULL, 0);
1164 MDRawSystemInfo *info_ptr = info.get();
1165
1166 switch (cpu_type_) {
1167 #ifdef HAS_ARM_SUPPORT
1168 case CPU_TYPE_ARM:
1169 info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_ARM;
1170 break;
1171 #endif
1172 #ifdef HAS_ARM64_SUPPORT
1173 case CPU_TYPE_ARM64:
1174 info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_ARM64;
1175 break;
1176 #endif
1177 #ifdef HAS_PPC_SUPPORT
1178 case CPU_TYPE_POWERPC:
1179 case CPU_TYPE_POWERPC64:
1180 info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_PPC;
1181 break;
1182 #endif
1183 #ifdef HAS_X86_SUPPORT
1184 case CPU_TYPE_I386:
1185 case CPU_TYPE_X86_64:
1186 if (cpu_type_ == CPU_TYPE_I386)
1187 info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_X86;
1188 else
1189 info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_AMD64;
1190 #ifdef __i386__
1191 // ebx is used for PIC code, so we need
1192 // to preserve it.
1193 #define cpuid(op,eax,ebx,ecx,edx) \
1194 asm ("pushl %%ebx \n\t" \
1195 "cpuid \n\t" \
1196 "movl %%ebx,%1 \n\t" \
1197 "popl %%ebx" \
1198 : "=a" (eax), \
1199 "=g" (ebx), \
1200 "=c" (ecx), \
1201 "=d" (edx) \
1202 : "0" (op))
1203 #elif defined(__x86_64__)
1204
1205 #define cpuid(op,eax,ebx,ecx,edx) \
1206 asm ("cpuid \n\t" \
1207 : "=a" (eax), \
1208 "=b" (ebx), \
1209 "=c" (ecx), \
1210 "=d" (edx) \
1211 : "0" (op))
1212 #endif
1213
1214 #if defined(__i386__) || defined(__x86_64__)
1215 int unused, unused2;
1216 // get vendor id
1217 cpuid(0, unused, info_ptr->cpu.x86_cpu_info.vendor_id[0],
1218 info_ptr->cpu.x86_cpu_info.vendor_id[2],
1219 info_ptr->cpu.x86_cpu_info.vendor_id[1]);
1220 // get version and feature info
1221 cpuid(1, info_ptr->cpu.x86_cpu_info.version_information, unused, unused2,
1222 info_ptr->cpu.x86_cpu_info.feature_information);
1223
1224 // family
1225 info_ptr->processor_level =
1226 (info_ptr->cpu.x86_cpu_info.version_information & 0xF00) >> 8;
1227 // 0xMMSS (Model, Stepping)
1228 info_ptr->processor_revision = static_cast<uint16_t>(
1229 (info_ptr->cpu.x86_cpu_info.version_information & 0xF) |
1230 ((info_ptr->cpu.x86_cpu_info.version_information & 0xF0) << 4));
1231
1232 // decode extended model info
1233 if (info_ptr->processor_level == 0xF ||
1234 info_ptr->processor_level == 0x6) {
1235 info_ptr->processor_revision |=
1236 ((info_ptr->cpu.x86_cpu_info.version_information & 0xF0000) >> 4);
1237 }
1238
1239 // decode extended family info
1240 if (info_ptr->processor_level == 0xF) {
1241 info_ptr->processor_level +=
1242 ((info_ptr->cpu.x86_cpu_info.version_information & 0xFF00000) >> 20);
1243 }
1244
1245 #endif // __i386__ || __x86_64_
1246 break;
1247 #endif // HAS_X86_SUPPORT
1248 default:
1249 info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_UNKNOWN;
1250 break;
1251 }
1252
1253 info_ptr->number_of_processors = static_cast<uint8_t>(number_of_processors);
1254 #if TARGET_OS_IPHONE
1255 info_ptr->platform_id = MD_OS_IOS;
1256 #else
1257 info_ptr->platform_id = MD_OS_MAC_OS_X;
1258 #endif // TARGET_OS_IPHONE
1259
1260 MDLocationDescriptor build_string_loc;
1261
1262 if (!writer_.WriteString(build_string_, 0,
1263 &build_string_loc))
1264 return false;
1265
1266 info_ptr->csd_version_rva = build_string_loc.rva;
1267 info_ptr->major_version = os_major_version_;
1268 info_ptr->minor_version = os_minor_version_;
1269 info_ptr->build_number = os_build_number_;
1270
1271 return true;
1272 }
1273
WriteModuleStream(unsigned int index,MDRawModule * module)1274 bool MinidumpGenerator::WriteModuleStream(unsigned int index,
1275 MDRawModule *module) {
1276 if (dynamic_images_) {
1277 // we're in a different process than the crashed process
1278 DynamicImage *image = dynamic_images_->GetImage(index);
1279
1280 if (!image)
1281 return false;
1282
1283 memset(module, 0, sizeof(MDRawModule));
1284
1285 MDLocationDescriptor string_location;
1286
1287 string name = image->GetFilePath();
1288 if (!writer_.WriteString(name.c_str(), 0, &string_location))
1289 return false;
1290
1291 module->base_of_image = image->GetVMAddr() + image->GetVMAddrSlide();
1292 module->size_of_image = static_cast<uint32_t>(image->GetVMSize());
1293 module->module_name_rva = string_location.rva;
1294
1295 // We'll skip the executable module, because they don't have
1296 // LC_ID_DYLIB load commands, and the crash processing server gets
1297 // version information from the Plist file, anyway.
1298 if (index != static_cast<uint32_t>(FindExecutableModule())) {
1299 module->version_info.signature = MD_VSFIXEDFILEINFO_SIGNATURE;
1300 module->version_info.struct_version |= MD_VSFIXEDFILEINFO_VERSION;
1301 // Convert MAC dylib version format, which is a 32 bit number, to the
1302 // format used by minidump. The mac format is <16 bits>.<8 bits>.<8 bits>
1303 // so it fits nicely into the windows version with some massaging
1304 // The mapping is:
1305 // 1) upper 16 bits of MAC version go to lower 16 bits of product HI
1306 // 2) Next most significant 8 bits go to upper 16 bits of product LO
1307 // 3) Least significant 8 bits go to lower 16 bits of product LO
1308 uint32_t modVersion = image->GetVersion();
1309 module->version_info.file_version_hi = 0;
1310 module->version_info.file_version_hi = modVersion >> 16;
1311 module->version_info.file_version_lo |= (modVersion & 0xff00) << 8;
1312 module->version_info.file_version_lo |= (modVersion & 0xff);
1313 }
1314
1315 if (!WriteCVRecord(module, image->GetCPUType(), name.c_str(), false)) {
1316 return false;
1317 }
1318 } else {
1319 // Getting module info in the crashed process
1320 const breakpad_mach_header *header;
1321 header = (breakpad_mach_header*)_dyld_get_image_header(index);
1322 if (!header)
1323 return false;
1324
1325 #ifdef __LP64__
1326 assert(header->magic == MH_MAGIC_64);
1327
1328 if(header->magic != MH_MAGIC_64)
1329 return false;
1330 #else
1331 assert(header->magic == MH_MAGIC);
1332
1333 if(header->magic != MH_MAGIC)
1334 return false;
1335 #endif
1336
1337 int cpu_type = header->cputype;
1338 unsigned long slide = _dyld_get_image_vmaddr_slide(index);
1339 const char* name = _dyld_get_image_name(index);
1340 const struct load_command *cmd =
1341 reinterpret_cast<const struct load_command *>(header + 1);
1342
1343 memset(module, 0, sizeof(MDRawModule));
1344
1345 for (unsigned int i = 0; cmd && (i < header->ncmds); i++) {
1346 if (cmd->cmd == LC_SEGMENT_ARCH) {
1347
1348 const breakpad_mach_segment_command *seg =
1349 reinterpret_cast<const breakpad_mach_segment_command *>(cmd);
1350
1351 if (!strcmp(seg->segname, "__TEXT")) {
1352 MDLocationDescriptor string_location;
1353
1354 if (!writer_.WriteString(name, 0, &string_location))
1355 return false;
1356
1357 module->base_of_image = seg->vmaddr + slide;
1358 module->size_of_image = static_cast<uint32_t>(seg->vmsize);
1359 module->module_name_rva = string_location.rva;
1360
1361 bool in_memory = false;
1362 #if TARGET_OS_IPHONE
1363 in_memory = true;
1364 #endif
1365 if (!WriteCVRecord(module, cpu_type, name, in_memory))
1366 return false;
1367
1368 return true;
1369 }
1370 }
1371
1372 cmd = reinterpret_cast<struct load_command*>((char *)cmd + cmd->cmdsize);
1373 }
1374 }
1375
1376 return true;
1377 }
1378
FindExecutableModule()1379 int MinidumpGenerator::FindExecutableModule() {
1380 if (dynamic_images_) {
1381 int index = dynamic_images_->GetExecutableImageIndex();
1382
1383 if (index >= 0) {
1384 return index;
1385 }
1386 } else {
1387 int image_count = _dyld_image_count();
1388 const struct mach_header *header;
1389
1390 for (int index = 0; index < image_count; ++index) {
1391 header = _dyld_get_image_header(index);
1392
1393 if (header->filetype == MH_EXECUTE)
1394 return index;
1395 }
1396 }
1397
1398 // failed - just use the first image
1399 return 0;
1400 }
1401
WriteCVRecord(MDRawModule * module,int cpu_type,const char * module_path,bool in_memory)1402 bool MinidumpGenerator::WriteCVRecord(MDRawModule *module, int cpu_type,
1403 const char *module_path, bool in_memory) {
1404 TypedMDRVA<MDCVInfoPDB70> cv(&writer_);
1405
1406 // Only return the last path component of the full module path
1407 const char *module_name = strrchr(module_path, '/');
1408
1409 // Increment past the slash
1410 if (module_name)
1411 ++module_name;
1412 else
1413 module_name = "<Unknown>";
1414
1415 size_t module_name_length = strlen(module_name);
1416
1417 if (!cv.AllocateObjectAndArray(module_name_length + 1, sizeof(uint8_t)))
1418 return false;
1419
1420 if (!cv.CopyIndexAfterObject(0, module_name, module_name_length))
1421 return false;
1422
1423 module->cv_record = cv.location();
1424 MDCVInfoPDB70 *cv_ptr = cv.get();
1425 cv_ptr->cv_signature = MD_CVINFOPDB70_SIGNATURE;
1426 cv_ptr->age = 0;
1427
1428 // Get the module identifier
1429 unsigned char identifier[16];
1430 bool result = false;
1431 if (in_memory) {
1432 MacFileUtilities::MachoID macho(module_path,
1433 reinterpret_cast<void *>(module->base_of_image),
1434 static_cast<size_t>(module->size_of_image));
1435 result = macho.UUIDCommand(cpu_type, CPU_SUBTYPE_MULTIPLE, identifier);
1436 if (!result)
1437 result = macho.MD5(cpu_type, CPU_SUBTYPE_MULTIPLE, identifier);
1438 }
1439
1440 if (!result) {
1441 FileID file_id(module_path);
1442 result = file_id.MachoIdentifier(cpu_type, CPU_SUBTYPE_MULTIPLE,
1443 identifier);
1444 }
1445
1446 if (result) {
1447 cv_ptr->signature.data1 =
1448 static_cast<uint32_t>(identifier[0]) << 24 |
1449 static_cast<uint32_t>(identifier[1]) << 16 |
1450 static_cast<uint32_t>(identifier[2]) << 8 |
1451 static_cast<uint32_t>(identifier[3]);
1452 cv_ptr->signature.data2 =
1453 static_cast<uint16_t>(identifier[4] << 8) | identifier[5];
1454 cv_ptr->signature.data3 =
1455 static_cast<uint16_t>(identifier[6] << 8) | identifier[7];
1456 cv_ptr->signature.data4[0] = identifier[8];
1457 cv_ptr->signature.data4[1] = identifier[9];
1458 cv_ptr->signature.data4[2] = identifier[10];
1459 cv_ptr->signature.data4[3] = identifier[11];
1460 cv_ptr->signature.data4[4] = identifier[12];
1461 cv_ptr->signature.data4[5] = identifier[13];
1462 cv_ptr->signature.data4[6] = identifier[14];
1463 cv_ptr->signature.data4[7] = identifier[15];
1464 }
1465
1466 return true;
1467 }
1468
WriteModuleListStream(MDRawDirectory * module_list_stream)1469 bool MinidumpGenerator::WriteModuleListStream(
1470 MDRawDirectory *module_list_stream) {
1471 TypedMDRVA<MDRawModuleList> list(&writer_);
1472
1473 uint32_t image_count = dynamic_images_ ?
1474 dynamic_images_->GetImageCount() :
1475 _dyld_image_count();
1476
1477 if (!list.AllocateObjectAndArray(image_count, MD_MODULE_SIZE))
1478 return false;
1479
1480 module_list_stream->stream_type = MD_MODULE_LIST_STREAM;
1481 module_list_stream->location = list.location();
1482 list.get()->number_of_modules = static_cast<uint32_t>(image_count);
1483
1484 // Write out the executable module as the first one
1485 MDRawModule module;
1486 uint32_t executableIndex = FindExecutableModule();
1487
1488 if (!WriteModuleStream(static_cast<unsigned>(executableIndex), &module)) {
1489 return false;
1490 }
1491
1492 list.CopyIndexAfterObject(0, &module, MD_MODULE_SIZE);
1493 int destinationIndex = 1; // Write all other modules after this one
1494
1495 for (uint32_t i = 0; i < image_count; ++i) {
1496 if (i != executableIndex) {
1497 if (!WriteModuleStream(static_cast<unsigned>(i), &module)) {
1498 return false;
1499 }
1500
1501 list.CopyIndexAfterObject(destinationIndex++, &module, MD_MODULE_SIZE);
1502 }
1503 }
1504
1505 return true;
1506 }
1507
WriteMiscInfoStream(MDRawDirectory * misc_info_stream)1508 bool MinidumpGenerator::WriteMiscInfoStream(MDRawDirectory *misc_info_stream) {
1509 TypedMDRVA<MDRawMiscInfo> info(&writer_);
1510
1511 if (!info.Allocate())
1512 return false;
1513
1514 misc_info_stream->stream_type = MD_MISC_INFO_STREAM;
1515 misc_info_stream->location = info.location();
1516
1517 MDRawMiscInfo *info_ptr = info.get();
1518 info_ptr->size_of_info = static_cast<uint32_t>(sizeof(MDRawMiscInfo));
1519 info_ptr->flags1 = MD_MISCINFO_FLAGS1_PROCESS_ID |
1520 MD_MISCINFO_FLAGS1_PROCESS_TIMES |
1521 MD_MISCINFO_FLAGS1_PROCESSOR_POWER_INFO;
1522
1523 // Process ID
1524 info_ptr->process_id = getpid();
1525
1526 // Times
1527 struct rusage usage;
1528 if (getrusage(RUSAGE_SELF, &usage) != -1) {
1529 // Omit the fractional time since the MDRawMiscInfo only wants seconds
1530 info_ptr->process_user_time =
1531 static_cast<uint32_t>(usage.ru_utime.tv_sec);
1532 info_ptr->process_kernel_time =
1533 static_cast<uint32_t>(usage.ru_stime.tv_sec);
1534 }
1535 int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID,
1536 static_cast<int>(info_ptr->process_id) };
1537 uint mibsize = static_cast<uint>(sizeof(mib) / sizeof(mib[0]));
1538 struct kinfo_proc proc;
1539 size_t size = sizeof(proc);
1540 if (sysctl(mib, mibsize, &proc, &size, NULL, 0) == 0) {
1541 info_ptr->process_create_time =
1542 static_cast<uint32_t>(proc.kp_proc.p_starttime.tv_sec);
1543 }
1544
1545 // Speed
1546 uint64_t speed;
1547 const uint64_t kOneMillion = 1000 * 1000;
1548 size = sizeof(speed);
1549 sysctlbyname("hw.cpufrequency_max", &speed, &size, NULL, 0);
1550 info_ptr->processor_max_mhz = static_cast<uint32_t>(speed / kOneMillion);
1551 info_ptr->processor_mhz_limit = static_cast<uint32_t>(speed / kOneMillion);
1552 size = sizeof(speed);
1553 sysctlbyname("hw.cpufrequency", &speed, &size, NULL, 0);
1554 info_ptr->processor_current_mhz = static_cast<uint32_t>(speed / kOneMillion);
1555
1556 return true;
1557 }
1558
WriteBreakpadInfoStream(MDRawDirectory * breakpad_info_stream)1559 bool MinidumpGenerator::WriteBreakpadInfoStream(
1560 MDRawDirectory *breakpad_info_stream) {
1561 TypedMDRVA<MDRawBreakpadInfo> info(&writer_);
1562
1563 if (!info.Allocate())
1564 return false;
1565
1566 breakpad_info_stream->stream_type = MD_BREAKPAD_INFO_STREAM;
1567 breakpad_info_stream->location = info.location();
1568 MDRawBreakpadInfo *info_ptr = info.get();
1569
1570 if (exception_thread_ && exception_type_) {
1571 info_ptr->validity = MD_BREAKPAD_INFO_VALID_DUMP_THREAD_ID |
1572 MD_BREAKPAD_INFO_VALID_REQUESTING_THREAD_ID;
1573 info_ptr->dump_thread_id = handler_thread_;
1574 info_ptr->requesting_thread_id = exception_thread_;
1575 } else {
1576 info_ptr->validity = MD_BREAKPAD_INFO_VALID_DUMP_THREAD_ID;
1577 info_ptr->dump_thread_id = handler_thread_;
1578 info_ptr->requesting_thread_id = 0;
1579 }
1580
1581 return true;
1582 }
1583
1584 } // namespace google_breakpad
1585