1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "DEBUG"
18
19 #include "libdebuggerd/tombstone.h"
20 #include "libdebuggerd/gwp_asan.h"
21 #if defined(USE_SCUDO)
22 #include "libdebuggerd/scudo.h"
23 #endif
24
25 #include <errno.h>
26 #include <fcntl.h>
27 #include <inttypes.h>
28 #include <signal.h>
29 #include <stddef.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <sys/mman.h>
33 #include <sys/sysinfo.h>
34 #include <time.h>
35
36 #include <memory>
37 #include <optional>
38 #include <set>
39 #include <string>
40
41 #include <async_safe/log.h>
42
43 #include <android-base/file.h>
44 #include <android-base/logging.h>
45 #include <android-base/properties.h>
46 #include <android-base/stringprintf.h>
47 #include <android-base/strings.h>
48 #include <android-base/unique_fd.h>
49
50 #include <android/log.h>
51 #include <bionic/macros.h>
52 #include <bionic/reserved_signals.h>
53 #include <log/log.h>
54 #include <log/log_read.h>
55 #include <log/logprint.h>
56 #include <private/android_filesystem_config.h>
57
58 #include <procinfo/process.h>
59 #include <unwindstack/Maps.h>
60 #include <unwindstack/Memory.h>
61 #include <unwindstack/Regs.h>
62 #include <unwindstack/Unwinder.h>
63
64 #include "libdebuggerd/open_files_list.h"
65 #include "libdebuggerd/utility.h"
66 #include "util.h"
67
68 #include "tombstone.pb.h"
69
70 using android::base::StringPrintf;
71
72 // Use the demangler from libc++.
73 extern "C" char* __cxa_demangle(const char*, char*, size_t*, int* status);
74
get_arch()75 static Architecture get_arch() {
76 #if defined(__arm__)
77 return Architecture::ARM32;
78 #elif defined(__aarch64__)
79 return Architecture::ARM64;
80 #elif defined(__i386__)
81 return Architecture::X86;
82 #elif defined(__x86_64__)
83 return Architecture::X86_64;
84 #else
85 #error Unknown architecture!
86 #endif
87 }
88
get_stack_overflow_cause(uint64_t fault_addr,uint64_t sp,unwindstack::Maps * maps)89 static std::optional<std::string> get_stack_overflow_cause(uint64_t fault_addr, uint64_t sp,
90 unwindstack::Maps* maps) {
91 static constexpr uint64_t kMaxDifferenceBytes = 256;
92 uint64_t difference;
93 if (sp >= fault_addr) {
94 difference = sp - fault_addr;
95 } else {
96 difference = fault_addr - sp;
97 }
98 if (difference <= kMaxDifferenceBytes) {
99 // The faulting address is close to the current sp, check if the sp
100 // indicates a stack overflow.
101 // On arm, the sp does not get updated when the instruction faults.
102 // In this case, the sp will still be in a valid map, which is the
103 // last case below.
104 // On aarch64, the sp does get updated when the instruction faults.
105 // In this case, the sp will be in either an invalid map if triggered
106 // on the main thread, or in a guard map if in another thread, which
107 // will be the first case or second case from below.
108 std::shared_ptr<unwindstack::MapInfo> map_info = maps->Find(sp);
109 if (map_info == nullptr) {
110 return "stack pointer is in a non-existent map; likely due to stack overflow.";
111 } else if ((map_info->flags() & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)) {
112 return "stack pointer is not in a rw map; likely due to stack overflow.";
113 } else if ((sp - map_info->start()) <= kMaxDifferenceBytes) {
114 return "stack pointer is close to top of stack; likely stack overflow.";
115 }
116 }
117 return {};
118 }
119
set_human_readable_cause(Cause * cause,uint64_t fault_addr)120 void set_human_readable_cause(Cause* cause, uint64_t fault_addr) {
121 if (!cause->has_memory_error() || !cause->memory_error().has_heap()) {
122 return;
123 }
124
125 const MemoryError& memory_error = cause->memory_error();
126 const HeapObject& heap_object = memory_error.heap();
127
128 const char *tool_str;
129 switch (memory_error.tool()) {
130 case MemoryError_Tool_GWP_ASAN:
131 tool_str = "GWP-ASan";
132 break;
133 case MemoryError_Tool_SCUDO:
134 tool_str = "MTE";
135 break;
136 default:
137 tool_str = "Unknown";
138 break;
139 }
140
141 const char *error_type_str;
142 switch (memory_error.type()) {
143 case MemoryError_Type_USE_AFTER_FREE:
144 error_type_str = "Use After Free";
145 break;
146 case MemoryError_Type_DOUBLE_FREE:
147 error_type_str = "Double Free";
148 break;
149 case MemoryError_Type_INVALID_FREE:
150 error_type_str = "Invalid (Wild) Free";
151 break;
152 case MemoryError_Type_BUFFER_OVERFLOW:
153 error_type_str = "Buffer Overflow";
154 break;
155 case MemoryError_Type_BUFFER_UNDERFLOW:
156 error_type_str = "Buffer Underflow";
157 break;
158 default:
159 cause->set_human_readable(
160 StringPrintf("[%s]: Unknown error occurred at 0x%" PRIx64 ".", tool_str, fault_addr));
161 return;
162 }
163
164 uint64_t diff;
165 const char* location_str;
166
167 if (fault_addr < heap_object.address()) {
168 // Buffer Underflow, 6 bytes left of a 41-byte allocation at 0xdeadbeef.
169 location_str = "left of";
170 diff = heap_object.address() - fault_addr;
171 } else if (fault_addr - heap_object.address() < heap_object.size()) {
172 // Use After Free, 40 bytes into a 41-byte allocation at 0xdeadbeef.
173 location_str = "into";
174 diff = fault_addr - heap_object.address();
175 } else {
176 // Buffer Overflow, 6 bytes right of a 41-byte allocation at 0xdeadbeef.
177 location_str = "right of";
178 diff = fault_addr - heap_object.address() - heap_object.size();
179 }
180
181 // Suffix of 'bytes', i.e. 4 bytes' vs. '1 byte'.
182 const char* byte_suffix = "s";
183 if (diff == 1) {
184 byte_suffix = "";
185 }
186
187 cause->set_human_readable(StringPrintf(
188 "[%s]: %s, %" PRIu64 " byte%s %s a %" PRIu64 "-byte allocation at 0x%" PRIx64, tool_str,
189 error_type_str, diff, byte_suffix, location_str, heap_object.size(), heap_object.address()));
190 }
191
dump_probable_cause(Tombstone * tombstone,unwindstack::Unwinder * unwinder,const ProcessInfo & process_info,const ThreadInfo & main_thread)192 static void dump_probable_cause(Tombstone* tombstone, unwindstack::Unwinder* unwinder,
193 const ProcessInfo& process_info, const ThreadInfo& main_thread) {
194 #if defined(USE_SCUDO)
195 ScudoCrashData scudo_crash_data(unwinder->GetProcessMemory().get(), process_info);
196 if (scudo_crash_data.CrashIsMine()) {
197 scudo_crash_data.AddCauseProtos(tombstone, unwinder);
198 return;
199 }
200 #endif
201
202 GwpAsanCrashData gwp_asan_crash_data(unwinder->GetProcessMemory().get(), process_info,
203 main_thread);
204 if (gwp_asan_crash_data.CrashIsMine()) {
205 gwp_asan_crash_data.AddCauseProtos(tombstone, unwinder);
206 return;
207 }
208
209 const siginfo *si = main_thread.siginfo;
210 auto fault_addr = reinterpret_cast<uint64_t>(si->si_addr);
211 unwindstack::Maps* maps = unwinder->GetMaps();
212
213 std::optional<std::string> cause;
214 if (si->si_signo == SIGSEGV && si->si_code == SEGV_MAPERR) {
215 if (fault_addr < 4096) {
216 cause = "null pointer dereference";
217 } else if (fault_addr == 0xffff0ffc) {
218 cause = "call to kuser_helper_version";
219 } else if (fault_addr == 0xffff0fe0) {
220 cause = "call to kuser_get_tls";
221 } else if (fault_addr == 0xffff0fc0) {
222 cause = "call to kuser_cmpxchg";
223 } else if (fault_addr == 0xffff0fa0) {
224 cause = "call to kuser_memory_barrier";
225 } else if (fault_addr == 0xffff0f60) {
226 cause = "call to kuser_cmpxchg64";
227 } else {
228 cause = get_stack_overflow_cause(fault_addr, main_thread.registers->sp(), maps);
229 }
230 } else if (si->si_signo == SIGSEGV && si->si_code == SEGV_ACCERR) {
231 auto map_info = maps->Find(fault_addr);
232 if (map_info != nullptr && map_info->flags() == PROT_EXEC) {
233 cause = "execute-only (no-read) memory access error; likely due to data in .text.";
234 } else {
235 cause = get_stack_overflow_cause(fault_addr, main_thread.registers->sp(), maps);
236 }
237 } else if (si->si_signo == SIGSYS && si->si_code == SYS_SECCOMP) {
238 cause = StringPrintf("seccomp prevented call to disallowed %s system call %d", ABI_STRING,
239 si->si_syscall);
240 }
241
242 if (cause) {
243 Cause *cause_proto = tombstone->add_causes();
244 cause_proto->set_human_readable(*cause);
245 }
246 }
247
dump_abort_message(Tombstone * tombstone,unwindstack::Unwinder * unwinder,const ProcessInfo & process_info)248 static void dump_abort_message(Tombstone* tombstone, unwindstack::Unwinder* unwinder,
249 const ProcessInfo& process_info) {
250 std::shared_ptr<unwindstack::Memory> process_memory = unwinder->GetProcessMemory();
251 uintptr_t address = process_info.abort_msg_address;
252 if (address == 0) {
253 return;
254 }
255
256 size_t length;
257 if (!process_memory->ReadFully(address, &length, sizeof(length))) {
258 async_safe_format_log(ANDROID_LOG_ERROR, LOG_TAG, "failed to read abort message header: %s",
259 strerror(errno));
260 return;
261 }
262
263 // The length field includes the length of the length field itself.
264 if (length < sizeof(size_t)) {
265 async_safe_format_log(ANDROID_LOG_ERROR, LOG_TAG,
266 "abort message header malformed: claimed length = %zu", length);
267 return;
268 }
269
270 length -= sizeof(size_t);
271
272 // The abort message should be null terminated already, but reserve a spot for NUL just in case.
273 std::string msg;
274 msg.resize(length);
275
276 if (!process_memory->ReadFully(address + sizeof(length), &msg[0], length)) {
277 async_safe_format_log(ANDROID_LOG_ERROR, LOG_TAG, "failed to read abort message header: %s",
278 strerror(errno));
279 return;
280 }
281
282 // Remove any trailing newlines.
283 size_t index = msg.size();
284 while (index > 0 && (msg[index - 1] == '\0' || msg[index - 1] == '\n')) {
285 --index;
286 }
287 msg.resize(index);
288
289 tombstone->set_abort_message(msg);
290 }
291
dump_open_fds(Tombstone * tombstone,const OpenFilesList * open_files)292 static void dump_open_fds(Tombstone* tombstone, const OpenFilesList* open_files) {
293 if (open_files) {
294 for (auto& [fd, entry] : *open_files) {
295 FD f;
296
297 f.set_fd(fd);
298
299 const std::optional<std::string>& path = entry.path;
300 if (path) {
301 f.set_path(*path);
302 }
303
304 const std::optional<uint64_t>& fdsan_owner = entry.fdsan_owner;
305 if (fdsan_owner) {
306 const char* type = android_fdsan_get_tag_type(*fdsan_owner);
307 uint64_t value = android_fdsan_get_tag_value(*fdsan_owner);
308 f.set_owner(type);
309 f.set_tag(value);
310 }
311
312 *tombstone->add_open_fds() = f;
313 }
314 }
315 }
316
fill_in_backtrace_frame(BacktraceFrame * f,const unwindstack::FrameData & frame)317 void fill_in_backtrace_frame(BacktraceFrame* f, const unwindstack::FrameData& frame) {
318 f->set_rel_pc(frame.rel_pc);
319 f->set_pc(frame.pc);
320 f->set_sp(frame.sp);
321
322 if (!frame.function_name.empty()) {
323 // TODO: Should this happen here, or on the display side?
324 char* demangled_name = __cxa_demangle(frame.function_name.c_str(), nullptr, nullptr, nullptr);
325 if (demangled_name) {
326 f->set_function_name(demangled_name);
327 free(demangled_name);
328 } else {
329 f->set_function_name(frame.function_name);
330 }
331 }
332
333 f->set_function_offset(frame.function_offset);
334
335 if (frame.map_info == nullptr) {
336 // No valid map associated with this frame.
337 f->set_file_name("<unknown>");
338 return;
339 }
340
341 if (!frame.map_info->name().empty()) {
342 f->set_file_name(frame.map_info->GetFullName());
343 } else {
344 f->set_file_name(StringPrintf("<anonymous:%" PRIx64 ">", frame.map_info->start()));
345 }
346 f->set_file_map_offset(frame.map_info->elf_start_offset());
347
348 f->set_build_id(frame.map_info->GetPrintableBuildID());
349 }
350
dump_registers(unwindstack::Unwinder * unwinder,const std::unique_ptr<unwindstack::Regs> & regs,Thread & thread,bool memory_dump)351 static void dump_registers(unwindstack::Unwinder* unwinder,
352 const std::unique_ptr<unwindstack::Regs>& regs, Thread& thread,
353 bool memory_dump) {
354 if (regs == nullptr) {
355 return;
356 }
357
358 unwindstack::Maps* maps = unwinder->GetMaps();
359 unwindstack::Memory* memory = unwinder->GetProcessMemory().get();
360
361 regs->IterateRegisters([&thread, memory_dump, maps, memory](const char* name, uint64_t value) {
362 Register r;
363 r.set_name(name);
364 r.set_u64(value);
365 *thread.add_registers() = r;
366
367 if (memory_dump) {
368 MemoryDump dump;
369
370 dump.set_register_name(name);
371 std::shared_ptr<unwindstack::MapInfo> map_info = maps->Find(untag_address(value));
372 if (map_info) {
373 dump.set_mapping_name(map_info->name());
374 }
375
376 constexpr size_t kNumBytesAroundRegister = 256;
377 constexpr size_t kNumTagsAroundRegister = kNumBytesAroundRegister / kTagGranuleSize;
378 char buf[kNumBytesAroundRegister];
379 uint8_t tags[kNumTagsAroundRegister];
380 ssize_t bytes = dump_memory(buf, sizeof(buf), tags, sizeof(tags), &value, memory);
381 if (bytes == -1) {
382 return;
383 }
384 dump.set_begin_address(value);
385 dump.set_memory(buf, bytes);
386
387 bool has_tags = false;
388 #if defined(__aarch64__)
389 for (size_t i = 0; i < kNumTagsAroundRegister; ++i) {
390 if (tags[i] != 0) {
391 has_tags = true;
392 }
393 }
394 #endif // defined(__aarch64__)
395
396 if (has_tags) {
397 dump.mutable_arm_mte_metadata()->set_memory_tags(tags, kNumTagsAroundRegister);
398 }
399
400 *thread.add_memory_dump() = std::move(dump);
401 }
402 });
403 }
404
log_unwinder_error(unwindstack::Unwinder * unwinder)405 static void log_unwinder_error(unwindstack::Unwinder* unwinder) {
406 if (unwinder->LastErrorCode() == unwindstack::ERROR_NONE) {
407 return;
408 }
409
410 async_safe_format_log(ANDROID_LOG_ERROR, LOG_TAG, " error code: %s",
411 unwinder->LastErrorCodeString());
412 async_safe_format_log(ANDROID_LOG_ERROR, LOG_TAG, " error address: 0x%" PRIx64,
413 unwinder->LastErrorAddress());
414 }
415
dump_thread_backtrace(unwindstack::Unwinder * unwinder,Thread & thread)416 static void dump_thread_backtrace(unwindstack::Unwinder* unwinder, Thread& thread) {
417 if (unwinder->NumFrames() == 0) {
418 async_safe_format_log(ANDROID_LOG_ERROR, LOG_TAG, "failed to unwind");
419 log_unwinder_error(unwinder);
420 return;
421 }
422
423 unwinder->SetDisplayBuildID(true);
424 std::set<std::string> unreadable_elf_files;
425 for (const auto& frame : unwinder->frames()) {
426 BacktraceFrame* f = thread.add_current_backtrace();
427 fill_in_backtrace_frame(f, frame);
428 if (frame.map_info != nullptr && frame.map_info->ElfFileNotReadable()) {
429 unreadable_elf_files.emplace(frame.map_info->name());
430 }
431 }
432
433 if (!unreadable_elf_files.empty()) {
434 auto unreadable_elf_files_proto = thread.mutable_unreadable_elf_files();
435 auto backtrace_note = thread.mutable_backtrace_note();
436 *backtrace_note->Add() =
437 "Function names and BuildId information is missing for some frames due";
438 *backtrace_note->Add() = "to unreadable libraries. For unwinds of apps, only shared libraries";
439 *backtrace_note->Add() = "found under the lib/ directory are readable.";
440 *backtrace_note->Add() = "On this device, run setenforce 0 to make the libraries readable.";
441 *backtrace_note->Add() = "Unreadable libraries:";
442 for (auto& name : unreadable_elf_files) {
443 *backtrace_note->Add() = " " + name;
444 *unreadable_elf_files_proto->Add() = name;
445 }
446 }
447 }
448
dump_thread(Tombstone * tombstone,unwindstack::Unwinder * unwinder,const ThreadInfo & thread_info,bool memory_dump=false)449 static void dump_thread(Tombstone* tombstone, unwindstack::Unwinder* unwinder,
450 const ThreadInfo& thread_info, bool memory_dump = false) {
451 Thread thread;
452
453 thread.set_id(thread_info.tid);
454 thread.set_name(thread_info.thread_name);
455 thread.set_tagged_addr_ctrl(thread_info.tagged_addr_ctrl);
456 thread.set_pac_enabled_keys(thread_info.pac_enabled_keys);
457
458 if (thread_info.registers == nullptr) {
459 // Fallback path for non-main thread, doing unwind from running process.
460 unwindstack::ThreadUnwinder thread_unwinder(kMaxFrames, unwinder->GetMaps());
461 if (!thread_unwinder.Init()) {
462 async_safe_format_log(ANDROID_LOG_ERROR, LOG_TAG,
463 "Unable to initialize ThreadUnwinder object.");
464 log_unwinder_error(&thread_unwinder);
465 return;
466 }
467
468 std::unique_ptr<unwindstack::Regs> initial_regs;
469 thread_unwinder.UnwindWithSignal(BIONIC_SIGNAL_BACKTRACE, thread_info.tid, &initial_regs);
470 dump_registers(&thread_unwinder, initial_regs, thread, memory_dump);
471 dump_thread_backtrace(&thread_unwinder, thread);
472 } else {
473 dump_registers(unwinder, thread_info.registers, thread, memory_dump);
474 std::unique_ptr<unwindstack::Regs> regs_copy(thread_info.registers->Clone());
475 unwinder->SetRegs(regs_copy.get());
476 unwinder->Unwind();
477 dump_thread_backtrace(unwinder, thread);
478 }
479
480 auto& threads = *tombstone->mutable_threads();
481 threads[thread_info.tid] = thread;
482 }
483
dump_mappings(Tombstone * tombstone,unwindstack::Unwinder * unwinder)484 static void dump_mappings(Tombstone* tombstone, unwindstack::Unwinder* unwinder) {
485 unwindstack::Maps* maps = unwinder->GetMaps();
486 std::shared_ptr<unwindstack::Memory> process_memory = unwinder->GetProcessMemory();
487
488 for (const auto& map_info : *maps) {
489 auto* map = tombstone->add_memory_mappings();
490 map->set_begin_address(map_info->start());
491 map->set_end_address(map_info->end());
492 map->set_offset(map_info->offset());
493
494 if (map_info->flags() & PROT_READ) {
495 map->set_read(true);
496 }
497 if (map_info->flags() & PROT_WRITE) {
498 map->set_write(true);
499 }
500 if (map_info->flags() & PROT_EXEC) {
501 map->set_execute(true);
502 }
503
504 map->set_mapping_name(map_info->name());
505
506 std::string build_id = map_info->GetPrintableBuildID();
507 if (!build_id.empty()) {
508 map->set_build_id(build_id);
509 }
510
511 map->set_load_bias(map_info->GetLoadBias(process_memory));
512 }
513 }
514
dump_log_file(Tombstone * tombstone,const char * logger,pid_t pid)515 static void dump_log_file(Tombstone* tombstone, const char* logger, pid_t pid) {
516 logger_list* logger_list =
517 android_logger_list_open(android_name_to_log_id(logger), ANDROID_LOG_NONBLOCK, 0, pid);
518
519 LogBuffer buffer;
520
521 while (true) {
522 log_msg log_entry;
523 ssize_t actual = android_logger_list_read(logger_list, &log_entry);
524
525 if (actual < 0) {
526 if (actual == -EINTR) {
527 // interrupted by signal, retry
528 continue;
529 }
530 if (actual == -EAGAIN) {
531 // non-blocking EOF; we're done
532 break;
533 } else {
534 break;
535 }
536 } else if (actual == 0) {
537 break;
538 }
539
540 char timestamp_secs[32];
541 time_t sec = static_cast<time_t>(log_entry.entry.sec);
542 tm tm;
543 localtime_r(&sec, &tm);
544 strftime(timestamp_secs, sizeof(timestamp_secs), "%m-%d %H:%M:%S", &tm);
545 std::string timestamp =
546 StringPrintf("%s.%03d", timestamp_secs, log_entry.entry.nsec / 1'000'000);
547
548 // Msg format is: <priority:1><tag:N>\0<message:N>\0
549 char* msg = log_entry.msg();
550 if (msg == nullptr) {
551 continue;
552 }
553
554 unsigned char prio = msg[0];
555 char* tag = msg + 1;
556 msg = tag + strlen(tag) + 1;
557
558 // consume any trailing newlines
559 char* nl = msg + strlen(msg) - 1;
560 while (nl >= msg && *nl == '\n') {
561 *nl-- = '\0';
562 }
563
564 // Look for line breaks ('\n') and display each text line
565 // on a separate line, prefixed with the header, like logcat does.
566 do {
567 nl = strchr(msg, '\n');
568 if (nl != nullptr) {
569 *nl = '\0';
570 ++nl;
571 }
572
573 LogMessage* log_msg = buffer.add_logs();
574 log_msg->set_timestamp(timestamp);
575 log_msg->set_pid(log_entry.entry.pid);
576 log_msg->set_tid(log_entry.entry.tid);
577 log_msg->set_priority(prio);
578 log_msg->set_tag(tag);
579 log_msg->set_message(msg);
580 } while ((msg = nl));
581 }
582 android_logger_list_free(logger_list);
583
584 if (!buffer.logs().empty()) {
585 buffer.set_name(logger);
586 *tombstone->add_log_buffers() = std::move(buffer);
587 }
588 }
589
dump_logcat(Tombstone * tombstone,pid_t pid)590 static void dump_logcat(Tombstone* tombstone, pid_t pid) {
591 dump_log_file(tombstone, "system", pid);
592 dump_log_file(tombstone, "main", pid);
593 }
594
dump_tags_around_fault_addr(Signal * signal,const Tombstone & tombstone,unwindstack::Unwinder * unwinder,uintptr_t fault_addr)595 static void dump_tags_around_fault_addr(Signal* signal, const Tombstone& tombstone,
596 unwindstack::Unwinder* unwinder, uintptr_t fault_addr) {
597 if (tombstone.arch() != Architecture::ARM64) return;
598
599 fault_addr = untag_address(fault_addr);
600 constexpr size_t kNumGranules = kNumTagRows * kNumTagColumns;
601 constexpr size_t kBytesToRead = kNumGranules * kTagGranuleSize;
602
603 // If the low part of the tag dump would underflow to the high address space, it's probably not
604 // a valid address for us to dump tags from.
605 if (fault_addr < kBytesToRead / 2) return;
606
607 unwindstack::Memory* memory = unwinder->GetProcessMemory().get();
608
609 constexpr uintptr_t kRowStartMask = ~(kNumTagColumns * kTagGranuleSize - 1);
610 size_t start_address = (fault_addr & kRowStartMask) - kBytesToRead / 2;
611 MemoryDump tag_dump;
612 size_t granules_to_read = kNumGranules;
613
614 // Attempt to read the first tag. If reading fails, this likely indicates the
615 // lowest touched page is inaccessible or not marked with PROT_MTE.
616 // Fast-forward over pages until one has tags, or we exhaust the search range.
617 while (memory->ReadTag(start_address) < 0) {
618 size_t page_size = sysconf(_SC_PAGE_SIZE);
619 size_t bytes_to_next_page = page_size - (start_address % page_size);
620 if (bytes_to_next_page >= granules_to_read * kTagGranuleSize) return;
621 start_address += bytes_to_next_page;
622 granules_to_read -= bytes_to_next_page / kTagGranuleSize;
623 }
624 tag_dump.set_begin_address(start_address);
625
626 std::string* mte_tags = tag_dump.mutable_arm_mte_metadata()->mutable_memory_tags();
627
628 for (size_t i = 0; i < granules_to_read; ++i) {
629 long tag = memory->ReadTag(start_address + i * kTagGranuleSize);
630 if (tag < 0) break;
631 mte_tags->push_back(static_cast<uint8_t>(tag));
632 }
633
634 if (!mte_tags->empty()) {
635 *signal->mutable_fault_adjacent_metadata() = tag_dump;
636 }
637 }
638
engrave_tombstone_proto(Tombstone * tombstone,unwindstack::Unwinder * unwinder,const std::map<pid_t,ThreadInfo> & threads,pid_t target_thread,const ProcessInfo & process_info,const OpenFilesList * open_files)639 void engrave_tombstone_proto(Tombstone* tombstone, unwindstack::Unwinder* unwinder,
640 const std::map<pid_t, ThreadInfo>& threads, pid_t target_thread,
641 const ProcessInfo& process_info, const OpenFilesList* open_files) {
642 Tombstone result;
643
644 result.set_arch(get_arch());
645 result.set_build_fingerprint(android::base::GetProperty("ro.build.fingerprint", "unknown"));
646 result.set_revision(android::base::GetProperty("ro.revision", "unknown"));
647 result.set_timestamp(get_timestamp());
648
649 const ThreadInfo& main_thread = threads.at(target_thread);
650 result.set_pid(main_thread.pid);
651 result.set_tid(main_thread.tid);
652 result.set_uid(main_thread.uid);
653 result.set_selinux_label(main_thread.selinux_label);
654 // The main thread must have a valid siginfo.
655 CHECK(main_thread.siginfo != nullptr);
656
657 struct sysinfo si;
658 sysinfo(&si);
659 android::procinfo::ProcessInfo proc_info;
660 std::string error;
661 if (android::procinfo::GetProcessInfo(main_thread.pid, &proc_info, &error)) {
662 uint64_t starttime = proc_info.starttime / sysconf(_SC_CLK_TCK);
663 result.set_process_uptime(si.uptime - starttime);
664 } else {
665 async_safe_format_log(ANDROID_LOG_ERROR, LOG_TAG, "failed to read process info: %s",
666 error.c_str());
667 }
668
669 auto cmd_line = result.mutable_command_line();
670 for (const auto& arg : main_thread.command_line) {
671 *cmd_line->Add() = arg;
672 }
673
674 if (!main_thread.siginfo) {
675 async_safe_fatal("siginfo missing");
676 }
677
678 Signal sig;
679 sig.set_number(main_thread.signo);
680 sig.set_name(get_signame(main_thread.siginfo));
681 sig.set_code(main_thread.siginfo->si_code);
682 sig.set_code_name(get_sigcode(main_thread.siginfo));
683
684 if (signal_has_sender(main_thread.siginfo, main_thread.pid)) {
685 sig.set_has_sender(true);
686 sig.set_sender_uid(main_thread.siginfo->si_uid);
687 sig.set_sender_pid(main_thread.siginfo->si_pid);
688 }
689
690 if (process_info.has_fault_address) {
691 sig.set_has_fault_address(true);
692 uintptr_t fault_addr = process_info.maybe_tagged_fault_address;
693 sig.set_fault_address(fault_addr);
694 dump_tags_around_fault_addr(&sig, result, unwinder, fault_addr);
695 }
696
697 *result.mutable_signal_info() = sig;
698
699 dump_abort_message(&result, unwinder, process_info);
700
701 // Dump the main thread, but save the memory around the registers.
702 dump_thread(&result, unwinder, main_thread, /* memory_dump */ true);
703
704 for (const auto& [tid, thread_info] : threads) {
705 if (tid != target_thread) {
706 dump_thread(&result, unwinder, thread_info);
707 }
708 }
709
710 dump_probable_cause(&result, unwinder, process_info, main_thread);
711
712 dump_mappings(&result, unwinder);
713
714 // Only dump logs on debuggable devices.
715 if (android::base::GetBoolProperty("ro.debuggable", false)) {
716 dump_logcat(&result, main_thread.pid);
717 }
718
719 dump_open_fds(&result, open_files);
720
721 *tombstone = std::move(result);
722 }
723