• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <dirent.h>
18 #include <fcntl.h>
19 #include <poll.h>
20 #include <pthread.h>
21 #include <stddef.h>
22 #include <sys/ucontext.h>
23 #include <syscall.h>
24 #include <unistd.h>
25 
26 #include <atomic>
27 #include <memory>
28 #include <mutex>
29 
30 #include <android-base/file.h>
31 #include <android-base/unique_fd.h>
32 #include <async_safe/log.h>
33 #include <bionic/reserved_signals.h>
34 #include <unwindstack/AndroidUnwinder.h>
35 #include <unwindstack/Memory.h>
36 #include <unwindstack/Regs.h>
37 
38 #include "debuggerd/handler.h"
39 #include "handler/fallback.h"
40 #include "tombstoned/tombstoned.h"
41 #include "util.h"
42 
43 #include "libdebuggerd/backtrace.h"
44 #include "libdebuggerd/tombstone.h"
45 
46 using android::base::unique_fd;
47 
48 extern "C" bool __linker_enable_fallback_allocator();
49 extern "C" void __linker_disable_fallback_allocator();
50 
51 // This is incredibly sketchy to do inside of a signal handler, especially when libbacktrace
52 // uses the C++ standard library throughout, but this code runs in the linker, so we'll be using
53 // the linker's malloc instead of the libc one. Switch it out for a replacement, just in case.
54 //
55 // This isn't the default method of dumping because it can fail in cases such as address space
56 // exhaustion.
debuggerd_fallback_trace(int output_fd,ucontext_t * ucontext)57 static void debuggerd_fallback_trace(int output_fd, ucontext_t* ucontext) {
58   if (!__linker_enable_fallback_allocator()) {
59     async_safe_format_log(ANDROID_LOG_ERROR, "libc", "fallback allocator already in use");
60     return;
61   }
62 
63   {
64     std::unique_ptr<unwindstack::Regs> regs;
65 
66     ThreadInfo thread;
67     thread.pid = getpid();
68     thread.tid = gettid();
69     thread.thread_name = get_thread_name(gettid());
70     thread.registers.reset(
71         unwindstack::Regs::CreateFromUcontext(unwindstack::Regs::CurrentArch(), ucontext));
72 
73     // Do not use the thread cache here because it will call pthread_key_create
74     // which doesn't work in linker code. See b/189803009.
75     // Use a normal cached object because the thread is stopped, and there
76     // is no chance of data changing between reads.
77     auto process_memory = unwindstack::Memory::CreateProcessMemoryCached(getpid());
78     // TODO: Create this once and store it in a global?
79     unwindstack::AndroidLocalUnwinder unwinder(process_memory);
80     dump_backtrace_thread(output_fd, &unwinder, thread);
81   }
82   __linker_disable_fallback_allocator();
83 }
84 
debuggerd_fallback_tombstone(int output_fd,int proto_fd,ucontext_t * ucontext,siginfo_t * siginfo,void * abort_message)85 static void debuggerd_fallback_tombstone(int output_fd, int proto_fd, ucontext_t* ucontext,
86                                          siginfo_t* siginfo, void* abort_message) {
87   if (!__linker_enable_fallback_allocator()) {
88     async_safe_format_log(ANDROID_LOG_ERROR, "libc", "fallback allocator already in use");
89     return;
90   }
91 
92   engrave_tombstone_ucontext(output_fd, proto_fd, reinterpret_cast<uintptr_t>(abort_message),
93                              siginfo, ucontext);
94   __linker_disable_fallback_allocator();
95 }
96 
forward_output(int src_fd,int dst_fd,pid_t expected_tid)97 static bool forward_output(int src_fd, int dst_fd, pid_t expected_tid) {
98   // Make sure the thread actually got the signal.
99   struct pollfd pfd = {
100     .fd = src_fd, .events = POLLIN,
101   };
102 
103   // Wait for up to a second for output to start flowing.
104   if (poll(&pfd, 1, 1000) != 1) {
105     return false;
106   }
107 
108   pid_t tid;
109   if (TEMP_FAILURE_RETRY(read(src_fd, &tid, sizeof(tid))) != sizeof(tid)) {
110     async_safe_format_log(ANDROID_LOG_ERROR, "libc", "failed to read tid");
111     return false;
112   }
113 
114   if (tid != expected_tid) {
115     async_safe_format_log(ANDROID_LOG_ERROR, "libc", "received tid %d, expected %d", tid,
116                           expected_tid);
117     return false;
118   }
119 
120   while (true) {
121     char buf[512];
122     ssize_t rc = TEMP_FAILURE_RETRY(read(src_fd, buf, sizeof(buf)));
123     if (rc == 0) {
124       return true;
125     } else if (rc < 0) {
126       return false;
127     }
128 
129     if (!android::base::WriteFully(dst_fd, buf, rc)) {
130       // We failed to write to tombstoned, but there's not much we can do.
131       // Keep reading from src_fd to keep things going.
132       continue;
133     }
134   }
135 }
136 
137 struct __attribute__((__packed__)) packed_thread_output {
138   int32_t tid;
139   int32_t fd;
140 };
141 
pack_thread_fd(pid_t tid,int fd)142 static uint64_t pack_thread_fd(pid_t tid, int fd) {
143   packed_thread_output packed = {.tid = tid, .fd = fd};
144   uint64_t result;
145   static_assert(sizeof(packed) == sizeof(result));
146   memcpy(&result, &packed, sizeof(packed));
147   return result;
148 }
149 
unpack_thread_fd(uint64_t value)150 static std::pair<pid_t, int> unpack_thread_fd(uint64_t value) {
151   packed_thread_output result;
152   memcpy(&result, &value, sizeof(value));
153   return std::make_pair(result.tid, result.fd);
154 }
155 
trace_handler(siginfo_t * info,ucontext_t * ucontext)156 static void trace_handler(siginfo_t* info, ucontext_t* ucontext) {
157   static std::atomic<uint64_t> trace_output(pack_thread_fd(-1, -1));
158 
159   if (info->si_value.sival_ptr == kDebuggerdFallbackSivalPtrRequestDump) {
160     // Asked to dump by the original signal recipient.
161     uint64_t val = trace_output.load();
162     auto [tid, fd] = unpack_thread_fd(val);
163     if (tid != gettid()) {
164       // We received some other thread's info request?
165       async_safe_format_log(ANDROID_LOG_ERROR, "libc",
166                             "thread %d received output fd for thread %d?", gettid(), tid);
167       return;
168     }
169 
170     if (!trace_output.compare_exchange_strong(val, pack_thread_fd(-1, -1))) {
171       // Presumably, the timeout in forward_output expired, and the main thread moved on.
172       // If this happened, the main thread closed our fd for us, so just return.
173       async_safe_format_log(ANDROID_LOG_ERROR, "libc", "cmpxchg for thread %d failed", gettid());
174       return;
175     }
176 
177     // Write our tid to the output fd to let the main thread know that we're working.
178     if (TEMP_FAILURE_RETRY(write(fd, &tid, sizeof(tid))) == sizeof(tid)) {
179       debuggerd_fallback_trace(fd, ucontext);
180     } else {
181       async_safe_format_log(ANDROID_LOG_ERROR, "libc", "failed to write to output fd");
182     }
183 
184     close(fd);
185     return;
186   }
187 
188   // Only allow one thread to perform a trace at a time.
189   static std::mutex trace_mutex;
190   if (!trace_mutex.try_lock()) {
191     async_safe_format_log(ANDROID_LOG_INFO, "libc", "trace lock failed");
192     return;
193   }
194 
195   std::lock_guard<std::mutex> scoped_lock(trace_mutex, std::adopt_lock);
196 
197   // Fetch output fd from tombstoned.
198   unique_fd tombstone_socket, output_fd;
199   if (!tombstoned_connect(getpid(), &tombstone_socket, &output_fd, nullptr,
200                           kDebuggerdNativeBacktrace)) {
201     async_safe_format_log(ANDROID_LOG_ERROR, "libc",
202                           "missing crash_dump_fallback() in selinux policy?");
203     return;
204   }
205 
206   dump_backtrace_header(output_fd.get());
207 
208   // Dump our own stack.
209   debuggerd_fallback_trace(output_fd.get(), ucontext);
210 
211   // Send a signal to all of our siblings, asking them to dump their stack.
212   pid_t current_tid = gettid();
213   if (!iterate_tids(current_tid, [&output_fd, &current_tid](pid_t tid) {
214         if (current_tid == tid) {
215           return;
216         }
217         // Use a pipe, to be able to detect situations where the thread gracefully exits before
218         // receiving our signal.
219         unique_fd pipe_read, pipe_write;
220         if (!Pipe(&pipe_read, &pipe_write)) {
221           async_safe_format_log(ANDROID_LOG_ERROR, "libc", "failed to create pipe: %s",
222                                 strerror(errno));
223           return;
224         }
225 
226         uint64_t expected = pack_thread_fd(-1, -1);
227         int sent_fd = pipe_write.release();
228         if (!trace_output.compare_exchange_strong(expected, pack_thread_fd(tid, sent_fd))) {
229           auto [tid, fd] = unpack_thread_fd(expected);
230           async_safe_format_log(ANDROID_LOG_ERROR, "libc",
231                                 "thread %d is already outputting to fd %d?", tid, fd);
232           close(sent_fd);
233           return;
234         }
235 
236         siginfo_t siginfo = {};
237         siginfo.si_code = SI_QUEUE;
238         siginfo.si_value.sival_ptr = kDebuggerdFallbackSivalPtrRequestDump;
239         siginfo.si_pid = getpid();
240         siginfo.si_uid = getuid();
241 
242         if (syscall(__NR_rt_tgsigqueueinfo, getpid(), tid, BIONIC_SIGNAL_DEBUGGER, &siginfo) != 0) {
243           async_safe_format_log(ANDROID_LOG_ERROR, "libc", "failed to send trace signal to %d: %s",
244                                 tid, strerror(errno));
245           return;
246         }
247 
248         bool success = forward_output(pipe_read.get(), output_fd.get(), tid);
249         if (!success) {
250           async_safe_format_log(ANDROID_LOG_ERROR, "libc",
251                                 "timeout expired while waiting for thread %d to dump", tid);
252         }
253 
254         // Regardless of whether the poll succeeds, check to see if the thread took fd ownership.
255         uint64_t post_wait = trace_output.exchange(pack_thread_fd(-1, -1));
256         if (post_wait != pack_thread_fd(-1, -1)) {
257           auto [tid, fd] = unpack_thread_fd(post_wait);
258           if (fd != -1) {
259             async_safe_format_log(ANDROID_LOG_ERROR, "libc", "closing fd %d for thread %d", fd, tid);
260             close(fd);
261           }
262         }
263 
264         return;
265       })) {
266     async_safe_format_log(ANDROID_LOG_ERROR, "libc", "failed to open /proc/%d/task: %s",
267                           current_tid, strerror(errno));
268   }
269 
270   dump_backtrace_footer(output_fd.get());
271   tombstoned_notify_completion(tombstone_socket.get());
272 }
273 
crash_handler(siginfo_t * info,ucontext_t * ucontext,void * abort_message)274 static void crash_handler(siginfo_t* info, ucontext_t* ucontext, void* abort_message) {
275   // Only allow one thread to handle a crash at a time (this can happen multiple times without
276   // exit, since tombstones can be requested without a real crash happening.)
277   static std::recursive_mutex crash_mutex;
278   static int lock_count;
279 
280   crash_mutex.lock();
281   if (lock_count++ > 0) {
282     async_safe_format_log(ANDROID_LOG_ERROR, "libc", "recursed signal handler call, aborting");
283     signal(SIGABRT, SIG_DFL);
284     raise(SIGABRT);
285     sigset_t sigset;
286     sigemptyset(&sigset);
287     sigaddset(&sigset, SIGABRT);
288     sigprocmask(SIG_UNBLOCK, &sigset, nullptr);
289 
290     // Just in case...
291     async_safe_format_log(ANDROID_LOG_ERROR, "libc", "abort didn't exit, exiting");
292     _exit(1);
293   }
294 
295   unique_fd tombstone_socket, output_fd, proto_fd;
296   bool tombstoned_connected = tombstoned_connect(getpid(), &tombstone_socket, &output_fd, &proto_fd,
297                                                  kDebuggerdTombstoneProto);
298   debuggerd_fallback_tombstone(output_fd.get(), proto_fd.get(), ucontext, info, abort_message);
299   if (tombstoned_connected) {
300     tombstoned_notify_completion(tombstone_socket.get());
301   }
302 
303   --lock_count;
304   crash_mutex.unlock();
305 }
306 
debuggerd_fallback_handler(siginfo_t * info,ucontext_t * ucontext,void * abort_message)307 extern "C" void debuggerd_fallback_handler(siginfo_t* info, ucontext_t* ucontext,
308                                            void* abort_message) {
309   if (info->si_signo == BIONIC_SIGNAL_DEBUGGER && info->si_value.sival_ptr != nullptr) {
310     return trace_handler(info, ucontext);
311   } else {
312     return crash_handler(info, ucontext, abort_message);
313   }
314 }
315