1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define _GNU_SOURCE 1
18 #include <dirent.h>
19 #include <dlfcn.h>
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <inttypes.h>
23 #include <pthread.h>
24 #include <signal.h>
25 #include <stdint.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <sys/ptrace.h>
30 #include <sys/stat.h>
31 #include <sys/types.h>
32 #include <sys/wait.h>
33 #include <time.h>
34 #include <unistd.h>
35
36 #include <algorithm>
37 #include <list>
38 #include <memory>
39 #include <ostream>
40 #include <string>
41 #include <vector>
42
43 #include <backtrace/Backtrace.h>
44 #include <backtrace/BacktraceMap.h>
45
46 #include <android-base/macros.h>
47 #include <android-base/stringprintf.h>
48 #include <android-base/unique_fd.h>
49 #include <cutils/atomic.h>
50 #include <cutils/threads.h>
51
52 #include <gtest/gtest.h>
53
54 // For the THREAD_SIGNAL definition.
55 #include "BacktraceCurrent.h"
56 #include "backtrace_testlib.h"
57 #include "thread_utils.h"
58
59 // Number of microseconds per milliseconds.
60 #define US_PER_MSEC 1000
61
62 // Number of nanoseconds in a second.
63 #define NS_PER_SEC 1000000000ULL
64
65 // Number of simultaneous dumping operations to perform.
66 #define NUM_THREADS 40
67
68 // Number of simultaneous threads running in our forked process.
69 #define NUM_PTRACE_THREADS 5
70
71 struct thread_t {
72 pid_t tid;
73 int32_t state;
74 pthread_t threadId;
75 void* data;
76 };
77
78 struct dump_thread_t {
79 thread_t thread;
80 Backtrace* backtrace;
81 int32_t* now;
82 int32_t done;
83 };
84
NanoTime()85 static uint64_t NanoTime() {
86 struct timespec t = { 0, 0 };
87 clock_gettime(CLOCK_MONOTONIC, &t);
88 return static_cast<uint64_t>(t.tv_sec * NS_PER_SEC + t.tv_nsec);
89 }
90
DumpFrames(Backtrace * backtrace)91 static std::string DumpFrames(Backtrace* backtrace) {
92 if (backtrace->NumFrames() == 0) {
93 return " No frames to dump.\n";
94 }
95
96 std::string frame;
97 for (size_t i = 0; i < backtrace->NumFrames(); i++) {
98 frame += " " + backtrace->FormatFrameData(i) + '\n';
99 }
100 return frame;
101 }
102
WaitForStop(pid_t pid)103 static void WaitForStop(pid_t pid) {
104 uint64_t start = NanoTime();
105
106 siginfo_t si;
107 while (ptrace(PTRACE_GETSIGINFO, pid, 0, &si) < 0 && (errno == EINTR || errno == ESRCH)) {
108 if ((NanoTime() - start) > NS_PER_SEC) {
109 printf("The process did not get to a stopping point in 1 second.\n");
110 break;
111 }
112 usleep(US_PER_MSEC);
113 }
114 }
115
CreateRemoteProcess(pid_t * pid)116 static void CreateRemoteProcess(pid_t* pid) {
117 if ((*pid = fork()) == 0) {
118 while (true)
119 ;
120 _exit(0);
121 }
122 ASSERT_NE(-1, *pid);
123
124 ASSERT_TRUE(ptrace(PTRACE_ATTACH, *pid, 0, 0) == 0);
125
126 // Wait for the process to get to a stopping point.
127 WaitForStop(*pid);
128 }
129
FinishRemoteProcess(pid_t pid)130 static void FinishRemoteProcess(pid_t pid) {
131 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
132
133 kill(pid, SIGKILL);
134 ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
135 }
136
ReadyLevelBacktrace(Backtrace * backtrace)137 static bool ReadyLevelBacktrace(Backtrace* backtrace) {
138 // See if test_level_four is in the backtrace.
139 bool found = false;
140 for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) {
141 if (it->func_name == "test_level_four") {
142 found = true;
143 break;
144 }
145 }
146
147 return found;
148 }
149
VerifyLevelDump(Backtrace * backtrace)150 static void VerifyLevelDump(Backtrace* backtrace) {
151 ASSERT_GT(backtrace->NumFrames(), static_cast<size_t>(0))
152 << DumpFrames(backtrace);
153 ASSERT_LT(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
154 << DumpFrames(backtrace);
155
156 // Look through the frames starting at the highest to find the
157 // frame we want.
158 size_t frame_num = 0;
159 for (size_t i = backtrace->NumFrames()-1; i > 2; i--) {
160 if (backtrace->GetFrame(i)->func_name == "test_level_one") {
161 frame_num = i;
162 break;
163 }
164 }
165 ASSERT_LT(static_cast<size_t>(0), frame_num) << DumpFrames(backtrace);
166 ASSERT_LE(static_cast<size_t>(3), frame_num) << DumpFrames(backtrace);
167
168 ASSERT_EQ(backtrace->GetFrame(frame_num)->func_name, "test_level_one")
169 << DumpFrames(backtrace);
170 ASSERT_EQ(backtrace->GetFrame(frame_num-1)->func_name, "test_level_two")
171 << DumpFrames(backtrace);
172 ASSERT_EQ(backtrace->GetFrame(frame_num-2)->func_name, "test_level_three")
173 << DumpFrames(backtrace);
174 ASSERT_EQ(backtrace->GetFrame(frame_num-3)->func_name, "test_level_four")
175 << DumpFrames(backtrace);
176 }
177
VerifyLevelBacktrace(void *)178 static void VerifyLevelBacktrace(void*) {
179 std::unique_ptr<Backtrace> backtrace(
180 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
181 ASSERT_TRUE(backtrace.get() != nullptr);
182 ASSERT_TRUE(backtrace->Unwind(0));
183 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
184
185 VerifyLevelDump(backtrace.get());
186 }
187
ReadyMaxBacktrace(Backtrace * backtrace)188 static bool ReadyMaxBacktrace(Backtrace* backtrace) {
189 return (backtrace->NumFrames() == MAX_BACKTRACE_FRAMES);
190 }
191
VerifyMaxDump(Backtrace * backtrace)192 static void VerifyMaxDump(Backtrace* backtrace) {
193 ASSERT_EQ(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
194 << DumpFrames(backtrace);
195 // Verify that the last frame is our recursive call.
196 ASSERT_EQ(backtrace->GetFrame(MAX_BACKTRACE_FRAMES-1)->func_name, "test_recursive_call")
197 << DumpFrames(backtrace);
198 }
199
VerifyMaxBacktrace(void *)200 static void VerifyMaxBacktrace(void*) {
201 std::unique_ptr<Backtrace> backtrace(
202 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
203 ASSERT_TRUE(backtrace.get() != nullptr);
204 ASSERT_TRUE(backtrace->Unwind(0));
205 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
206
207 VerifyMaxDump(backtrace.get());
208 }
209
ThreadSetState(void * data)210 static void ThreadSetState(void* data) {
211 thread_t* thread = reinterpret_cast<thread_t*>(data);
212 android_atomic_acquire_store(1, &thread->state);
213 volatile int i = 0;
214 while (thread->state) {
215 i++;
216 }
217 }
218
WaitForNonZero(int32_t * value,uint64_t seconds)219 static bool WaitForNonZero(int32_t* value, uint64_t seconds) {
220 uint64_t start = NanoTime();
221 do {
222 if (android_atomic_acquire_load(value)) {
223 return true;
224 }
225 } while ((NanoTime() - start) < seconds * NS_PER_SEC);
226 return false;
227 }
228
TEST(libbacktrace,local_no_unwind_frames)229 TEST(libbacktrace, local_no_unwind_frames) {
230 // Verify that a local unwind does not include any frames within
231 // libunwind or libbacktrace.
232 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), getpid()));
233 ASSERT_TRUE(backtrace.get() != nullptr);
234 ASSERT_TRUE(backtrace->Unwind(0));
235 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
236
237 ASSERT_TRUE(backtrace->NumFrames() != 0);
238 for (const auto& frame : *backtrace ) {
239 if (BacktraceMap::IsValid(frame.map)) {
240 const std::string name = basename(frame.map.name.c_str());
241 ASSERT_TRUE(name != "libunwind.so" && name != "libbacktrace.so")
242 << DumpFrames(backtrace.get());
243 }
244 break;
245 }
246 }
247
TEST(libbacktrace,local_trace)248 TEST(libbacktrace, local_trace) {
249 ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelBacktrace, nullptr), 0);
250 }
251
VerifyIgnoreFrames(Backtrace * bt_all,Backtrace * bt_ign1,Backtrace * bt_ign2,const char * cur_proc)252 static void VerifyIgnoreFrames(Backtrace* bt_all, Backtrace* bt_ign1, Backtrace* bt_ign2,
253 const char* cur_proc) {
254 EXPECT_EQ(bt_all->NumFrames(), bt_ign1->NumFrames() + 1)
255 << "All backtrace:\n" << DumpFrames(bt_all) << "Ignore 1 backtrace:\n" << DumpFrames(bt_ign1);
256 EXPECT_EQ(bt_all->NumFrames(), bt_ign2->NumFrames() + 2)
257 << "All backtrace:\n" << DumpFrames(bt_all) << "Ignore 2 backtrace:\n" << DumpFrames(bt_ign2);
258
259 // Check all of the frames are the same > the current frame.
260 bool check = (cur_proc == nullptr);
261 for (size_t i = 0; i < bt_ign2->NumFrames(); i++) {
262 if (check) {
263 EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_ign1->GetFrame(i+1)->pc);
264 EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_ign1->GetFrame(i+1)->sp);
265 EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_ign1->GetFrame(i+1)->stack_size);
266
267 EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_all->GetFrame(i+2)->pc);
268 EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_all->GetFrame(i+2)->sp);
269 EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_all->GetFrame(i+2)->stack_size);
270 }
271 if (!check && bt_ign2->GetFrame(i)->func_name == cur_proc) {
272 check = true;
273 }
274 }
275 }
276
VerifyLevelIgnoreFrames(void *)277 static void VerifyLevelIgnoreFrames(void*) {
278 std::unique_ptr<Backtrace> all(
279 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
280 ASSERT_TRUE(all.get() != nullptr);
281 ASSERT_TRUE(all->Unwind(0));
282 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, all->GetError());
283
284 std::unique_ptr<Backtrace> ign1(
285 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
286 ASSERT_TRUE(ign1.get() != nullptr);
287 ASSERT_TRUE(ign1->Unwind(1));
288 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign1->GetError());
289
290 std::unique_ptr<Backtrace> ign2(
291 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
292 ASSERT_TRUE(ign2.get() != nullptr);
293 ASSERT_TRUE(ign2->Unwind(2));
294 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign2->GetError());
295
296 VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), "VerifyLevelIgnoreFrames");
297 }
298
TEST(libbacktrace,local_trace_ignore_frames)299 TEST(libbacktrace, local_trace_ignore_frames) {
300 ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelIgnoreFrames, nullptr), 0);
301 }
302
TEST(libbacktrace,local_max_trace)303 TEST(libbacktrace, local_max_trace) {
304 ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxBacktrace, nullptr), 0);
305 }
306
VerifyProcTest(pid_t pid,pid_t tid,bool share_map,bool (* ReadyFunc)(Backtrace *),void (* VerifyFunc)(Backtrace *))307 static void VerifyProcTest(pid_t pid, pid_t tid, bool share_map, bool (*ReadyFunc)(Backtrace*),
308 void (*VerifyFunc)(Backtrace*)) {
309 pid_t ptrace_tid;
310 if (tid < 0) {
311 ptrace_tid = pid;
312 } else {
313 ptrace_tid = tid;
314 }
315 uint64_t start = NanoTime();
316 bool verified = false;
317 std::string last_dump;
318 do {
319 usleep(US_PER_MSEC);
320 if (ptrace(PTRACE_ATTACH, ptrace_tid, 0, 0) == 0) {
321 // Wait for the process to get to a stopping point.
322 WaitForStop(ptrace_tid);
323
324 std::unique_ptr<BacktraceMap> map;
325 if (share_map) {
326 map.reset(BacktraceMap::Create(pid));
327 }
328 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, tid, map.get()));
329 ASSERT_TRUE(backtrace.get() != nullptr);
330 ASSERT_TRUE(backtrace->Unwind(0));
331 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
332 if (ReadyFunc(backtrace.get())) {
333 VerifyFunc(backtrace.get());
334 verified = true;
335 } else {
336 last_dump = DumpFrames(backtrace.get());
337 }
338
339 ASSERT_TRUE(ptrace(PTRACE_DETACH, ptrace_tid, 0, 0) == 0);
340 }
341 // If 5 seconds have passed, then we are done.
342 } while (!verified && (NanoTime() - start) <= 5 * NS_PER_SEC);
343 ASSERT_TRUE(verified) << "Last backtrace:\n" << last_dump;
344 }
345
TEST(libbacktrace,ptrace_trace)346 TEST(libbacktrace, ptrace_trace) {
347 pid_t pid;
348 if ((pid = fork()) == 0) {
349 ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
350 _exit(1);
351 }
352 VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyLevelBacktrace, VerifyLevelDump);
353
354 kill(pid, SIGKILL);
355 int status;
356 ASSERT_EQ(waitpid(pid, &status, 0), pid);
357 }
358
TEST(libbacktrace,ptrace_trace_shared_map)359 TEST(libbacktrace, ptrace_trace_shared_map) {
360 pid_t pid;
361 if ((pid = fork()) == 0) {
362 ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
363 _exit(1);
364 }
365
366 VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, true, ReadyLevelBacktrace, VerifyLevelDump);
367
368 kill(pid, SIGKILL);
369 int status;
370 ASSERT_EQ(waitpid(pid, &status, 0), pid);
371 }
372
TEST(libbacktrace,ptrace_max_trace)373 TEST(libbacktrace, ptrace_max_trace) {
374 pid_t pid;
375 if ((pid = fork()) == 0) {
376 ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, nullptr, nullptr), 0);
377 _exit(1);
378 }
379 VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyMaxBacktrace, VerifyMaxDump);
380
381 kill(pid, SIGKILL);
382 int status;
383 ASSERT_EQ(waitpid(pid, &status, 0), pid);
384 }
385
VerifyProcessIgnoreFrames(Backtrace * bt_all)386 static void VerifyProcessIgnoreFrames(Backtrace* bt_all) {
387 std::unique_ptr<Backtrace> ign1(Backtrace::Create(bt_all->Pid(), BACKTRACE_CURRENT_THREAD));
388 ASSERT_TRUE(ign1.get() != nullptr);
389 ASSERT_TRUE(ign1->Unwind(1));
390 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign1->GetError());
391
392 std::unique_ptr<Backtrace> ign2(Backtrace::Create(bt_all->Pid(), BACKTRACE_CURRENT_THREAD));
393 ASSERT_TRUE(ign2.get() != nullptr);
394 ASSERT_TRUE(ign2->Unwind(2));
395 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign2->GetError());
396
397 VerifyIgnoreFrames(bt_all, ign1.get(), ign2.get(), nullptr);
398 }
399
TEST(libbacktrace,ptrace_ignore_frames)400 TEST(libbacktrace, ptrace_ignore_frames) {
401 pid_t pid;
402 if ((pid = fork()) == 0) {
403 ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
404 _exit(1);
405 }
406 VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyLevelBacktrace, VerifyProcessIgnoreFrames);
407
408 kill(pid, SIGKILL);
409 int status;
410 ASSERT_EQ(waitpid(pid, &status, 0), pid);
411 }
412
413 // Create a process with multiple threads and dump all of the threads.
PtraceThreadLevelRun(void *)414 static void* PtraceThreadLevelRun(void*) {
415 EXPECT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
416 return nullptr;
417 }
418
GetThreads(pid_t pid,std::vector<pid_t> * threads)419 static void GetThreads(pid_t pid, std::vector<pid_t>* threads) {
420 // Get the list of tasks.
421 char task_path[128];
422 snprintf(task_path, sizeof(task_path), "/proc/%d/task", pid);
423
424 std::unique_ptr<DIR, decltype(&closedir)> tasks_dir(opendir(task_path), closedir);
425 ASSERT_TRUE(tasks_dir != nullptr);
426 struct dirent* entry;
427 while ((entry = readdir(tasks_dir.get())) != nullptr) {
428 char* end;
429 pid_t tid = strtoul(entry->d_name, &end, 10);
430 if (*end == '\0') {
431 threads->push_back(tid);
432 }
433 }
434 }
435
TEST(libbacktrace,ptrace_threads)436 TEST(libbacktrace, ptrace_threads) {
437 pid_t pid;
438 if ((pid = fork()) == 0) {
439 for (size_t i = 0; i < NUM_PTRACE_THREADS; i++) {
440 pthread_attr_t attr;
441 pthread_attr_init(&attr);
442 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
443
444 pthread_t thread;
445 ASSERT_TRUE(pthread_create(&thread, &attr, PtraceThreadLevelRun, nullptr) == 0);
446 }
447 ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
448 _exit(1);
449 }
450
451 // Check to see that all of the threads are running before unwinding.
452 std::vector<pid_t> threads;
453 uint64_t start = NanoTime();
454 do {
455 usleep(US_PER_MSEC);
456 threads.clear();
457 GetThreads(pid, &threads);
458 } while ((threads.size() != NUM_PTRACE_THREADS + 1) &&
459 ((NanoTime() - start) <= 5 * NS_PER_SEC));
460 ASSERT_EQ(threads.size(), static_cast<size_t>(NUM_PTRACE_THREADS + 1));
461
462 ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
463 WaitForStop(pid);
464 for (std::vector<int>::const_iterator it = threads.begin(); it != threads.end(); ++it) {
465 // Skip the current forked process, we only care about the threads.
466 if (pid == *it) {
467 continue;
468 }
469 VerifyProcTest(pid, *it, false, ReadyLevelBacktrace, VerifyLevelDump);
470 }
471
472 FinishRemoteProcess(pid);
473 }
474
VerifyLevelThread(void *)475 void VerifyLevelThread(void*) {
476 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), gettid()));
477 ASSERT_TRUE(backtrace.get() != nullptr);
478 ASSERT_TRUE(backtrace->Unwind(0));
479 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
480
481 VerifyLevelDump(backtrace.get());
482 }
483
TEST(libbacktrace,thread_current_level)484 TEST(libbacktrace, thread_current_level) {
485 ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelThread, nullptr), 0);
486 }
487
VerifyMaxThread(void *)488 static void VerifyMaxThread(void*) {
489 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), gettid()));
490 ASSERT_TRUE(backtrace.get() != nullptr);
491 ASSERT_TRUE(backtrace->Unwind(0));
492 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
493
494 VerifyMaxDump(backtrace.get());
495 }
496
TEST(libbacktrace,thread_current_max)497 TEST(libbacktrace, thread_current_max) {
498 ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxThread, nullptr), 0);
499 }
500
ThreadLevelRun(void * data)501 static void* ThreadLevelRun(void* data) {
502 thread_t* thread = reinterpret_cast<thread_t*>(data);
503
504 thread->tid = gettid();
505 EXPECT_NE(test_level_one(1, 2, 3, 4, ThreadSetState, data), 0);
506 return nullptr;
507 }
508
TEST(libbacktrace,thread_level_trace)509 TEST(libbacktrace, thread_level_trace) {
510 pthread_attr_t attr;
511 pthread_attr_init(&attr);
512 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
513
514 thread_t thread_data = { 0, 0, 0, nullptr };
515 pthread_t thread;
516 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
517
518 // Wait up to 2 seconds for the tid to be set.
519 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
520
521 // Make sure that the thread signal used is not visible when compiled for
522 // the target.
523 #if !defined(__GLIBC__)
524 ASSERT_LT(THREAD_SIGNAL, SIGRTMIN);
525 #endif
526
527 // Save the current signal action and make sure it is restored afterwards.
528 struct sigaction cur_action;
529 ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &cur_action) == 0);
530
531 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
532 ASSERT_TRUE(backtrace.get() != nullptr);
533 ASSERT_TRUE(backtrace->Unwind(0));
534 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
535
536 VerifyLevelDump(backtrace.get());
537
538 // Tell the thread to exit its infinite loop.
539 android_atomic_acquire_store(0, &thread_data.state);
540
541 // Verify that the old action was restored.
542 struct sigaction new_action;
543 ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &new_action) == 0);
544 EXPECT_EQ(cur_action.sa_sigaction, new_action.sa_sigaction);
545 // The SA_RESTORER flag gets set behind our back, so a direct comparison
546 // doesn't work unless we mask the value off. Mips doesn't have this
547 // flag, so skip this on that platform.
548 #if defined(SA_RESTORER)
549 cur_action.sa_flags &= ~SA_RESTORER;
550 new_action.sa_flags &= ~SA_RESTORER;
551 #elif defined(__GLIBC__)
552 // Our host compiler doesn't appear to define this flag for some reason.
553 cur_action.sa_flags &= ~0x04000000;
554 new_action.sa_flags &= ~0x04000000;
555 #endif
556 EXPECT_EQ(cur_action.sa_flags, new_action.sa_flags);
557 }
558
TEST(libbacktrace,thread_ignore_frames)559 TEST(libbacktrace, thread_ignore_frames) {
560 pthread_attr_t attr;
561 pthread_attr_init(&attr);
562 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
563
564 thread_t thread_data = { 0, 0, 0, nullptr };
565 pthread_t thread;
566 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
567
568 // Wait up to 2 seconds for the tid to be set.
569 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
570
571 std::unique_ptr<Backtrace> all(Backtrace::Create(getpid(), thread_data.tid));
572 ASSERT_TRUE(all.get() != nullptr);
573 ASSERT_TRUE(all->Unwind(0));
574 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, all->GetError());
575
576 std::unique_ptr<Backtrace> ign1(Backtrace::Create(getpid(), thread_data.tid));
577 ASSERT_TRUE(ign1.get() != nullptr);
578 ASSERT_TRUE(ign1->Unwind(1));
579 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign1->GetError());
580
581 std::unique_ptr<Backtrace> ign2(Backtrace::Create(getpid(), thread_data.tid));
582 ASSERT_TRUE(ign2.get() != nullptr);
583 ASSERT_TRUE(ign2->Unwind(2));
584 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign2->GetError());
585
586 VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), nullptr);
587
588 // Tell the thread to exit its infinite loop.
589 android_atomic_acquire_store(0, &thread_data.state);
590 }
591
ThreadMaxRun(void * data)592 static void* ThreadMaxRun(void* data) {
593 thread_t* thread = reinterpret_cast<thread_t*>(data);
594
595 thread->tid = gettid();
596 EXPECT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, ThreadSetState, data), 0);
597 return nullptr;
598 }
599
TEST(libbacktrace,thread_max_trace)600 TEST(libbacktrace, thread_max_trace) {
601 pthread_attr_t attr;
602 pthread_attr_init(&attr);
603 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
604
605 thread_t thread_data = { 0, 0, 0, nullptr };
606 pthread_t thread;
607 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadMaxRun, &thread_data) == 0);
608
609 // Wait for the tid to be set.
610 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
611
612 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
613 ASSERT_TRUE(backtrace.get() != nullptr);
614 ASSERT_TRUE(backtrace->Unwind(0));
615 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
616
617 VerifyMaxDump(backtrace.get());
618
619 // Tell the thread to exit its infinite loop.
620 android_atomic_acquire_store(0, &thread_data.state);
621 }
622
ThreadDump(void * data)623 static void* ThreadDump(void* data) {
624 dump_thread_t* dump = reinterpret_cast<dump_thread_t*>(data);
625 while (true) {
626 if (android_atomic_acquire_load(dump->now)) {
627 break;
628 }
629 }
630
631 // The status of the actual unwind will be checked elsewhere.
632 dump->backtrace = Backtrace::Create(getpid(), dump->thread.tid);
633 dump->backtrace->Unwind(0);
634
635 android_atomic_acquire_store(1, &dump->done);
636
637 return nullptr;
638 }
639
TEST(libbacktrace,thread_multiple_dump)640 TEST(libbacktrace, thread_multiple_dump) {
641 // Dump NUM_THREADS simultaneously.
642 std::vector<thread_t> runners(NUM_THREADS);
643 std::vector<dump_thread_t> dumpers(NUM_THREADS);
644
645 pthread_attr_t attr;
646 pthread_attr_init(&attr);
647 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
648 for (size_t i = 0; i < NUM_THREADS; i++) {
649 // Launch the runners, they will spin in hard loops doing nothing.
650 runners[i].tid = 0;
651 runners[i].state = 0;
652 ASSERT_TRUE(pthread_create(&runners[i].threadId, &attr, ThreadMaxRun, &runners[i]) == 0);
653 }
654
655 // Wait for tids to be set.
656 for (std::vector<thread_t>::iterator it = runners.begin(); it != runners.end(); ++it) {
657 ASSERT_TRUE(WaitForNonZero(&it->state, 30));
658 }
659
660 // Start all of the dumpers at once, they will spin until they are signalled
661 // to begin their dump run.
662 int32_t dump_now = 0;
663 for (size_t i = 0; i < NUM_THREADS; i++) {
664 dumpers[i].thread.tid = runners[i].tid;
665 dumpers[i].thread.state = 0;
666 dumpers[i].done = 0;
667 dumpers[i].now = &dump_now;
668
669 ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0);
670 }
671
672 // Start all of the dumpers going at once.
673 android_atomic_acquire_store(1, &dump_now);
674
675 for (size_t i = 0; i < NUM_THREADS; i++) {
676 ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 30));
677
678 // Tell the runner thread to exit its infinite loop.
679 android_atomic_acquire_store(0, &runners[i].state);
680
681 ASSERT_TRUE(dumpers[i].backtrace != nullptr);
682 VerifyMaxDump(dumpers[i].backtrace);
683
684 delete dumpers[i].backtrace;
685 dumpers[i].backtrace = nullptr;
686 }
687 }
688
TEST(libbacktrace,thread_multiple_dump_same_thread)689 TEST(libbacktrace, thread_multiple_dump_same_thread) {
690 pthread_attr_t attr;
691 pthread_attr_init(&attr);
692 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
693 thread_t runner;
694 runner.tid = 0;
695 runner.state = 0;
696 ASSERT_TRUE(pthread_create(&runner.threadId, &attr, ThreadMaxRun, &runner) == 0);
697
698 // Wait for tids to be set.
699 ASSERT_TRUE(WaitForNonZero(&runner.state, 30));
700
701 // Start all of the dumpers at once, they will spin until they are signalled
702 // to begin their dump run.
703 int32_t dump_now = 0;
704 // Dump the same thread NUM_THREADS simultaneously.
705 std::vector<dump_thread_t> dumpers(NUM_THREADS);
706 for (size_t i = 0; i < NUM_THREADS; i++) {
707 dumpers[i].thread.tid = runner.tid;
708 dumpers[i].thread.state = 0;
709 dumpers[i].done = 0;
710 dumpers[i].now = &dump_now;
711
712 ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0);
713 }
714
715 // Start all of the dumpers going at once.
716 android_atomic_acquire_store(1, &dump_now);
717
718 for (size_t i = 0; i < NUM_THREADS; i++) {
719 ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 30));
720
721 ASSERT_TRUE(dumpers[i].backtrace != nullptr);
722 VerifyMaxDump(dumpers[i].backtrace);
723
724 delete dumpers[i].backtrace;
725 dumpers[i].backtrace = nullptr;
726 }
727
728 // Tell the runner thread to exit its infinite loop.
729 android_atomic_acquire_store(0, &runner.state);
730 }
731
732 // This test is for UnwindMaps that should share the same map cursor when
733 // multiple maps are created for the current process at the same time.
TEST(libbacktrace,simultaneous_maps)734 TEST(libbacktrace, simultaneous_maps) {
735 BacktraceMap* map1 = BacktraceMap::Create(getpid());
736 BacktraceMap* map2 = BacktraceMap::Create(getpid());
737 BacktraceMap* map3 = BacktraceMap::Create(getpid());
738
739 Backtrace* back1 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map1);
740 ASSERT_TRUE(back1 != nullptr);
741 EXPECT_TRUE(back1->Unwind(0));
742 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, back1->GetError());
743 delete back1;
744 delete map1;
745
746 Backtrace* back2 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map2);
747 ASSERT_TRUE(back2 != nullptr);
748 EXPECT_TRUE(back2->Unwind(0));
749 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, back2->GetError());
750 delete back2;
751 delete map2;
752
753 Backtrace* back3 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map3);
754 ASSERT_TRUE(back3 != nullptr);
755 EXPECT_TRUE(back3->Unwind(0));
756 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, back3->GetError());
757 delete back3;
758 delete map3;
759 }
760
TEST(libbacktrace,fillin_erases)761 TEST(libbacktrace, fillin_erases) {
762 BacktraceMap* back_map = BacktraceMap::Create(getpid());
763
764 backtrace_map_t map;
765
766 map.start = 1;
767 map.end = 3;
768 map.flags = 1;
769 map.name = "Initialized";
770 back_map->FillIn(0, &map);
771 delete back_map;
772
773 ASSERT_FALSE(BacktraceMap::IsValid(map));
774 ASSERT_EQ(static_cast<uintptr_t>(0), map.start);
775 ASSERT_EQ(static_cast<uintptr_t>(0), map.end);
776 ASSERT_EQ(0, map.flags);
777 ASSERT_EQ("", map.name);
778 }
779
TEST(libbacktrace,format_test)780 TEST(libbacktrace, format_test) {
781 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD));
782 ASSERT_TRUE(backtrace.get() != nullptr);
783
784 backtrace_frame_data_t frame;
785 frame.num = 1;
786 frame.pc = 2;
787 frame.sp = 0;
788 frame.stack_size = 0;
789 frame.func_offset = 0;
790
791 // Check no map set.
792 frame.num = 1;
793 #if defined(__LP64__)
794 EXPECT_EQ("#01 pc 0000000000000002 <unknown>",
795 #else
796 EXPECT_EQ("#01 pc 00000002 <unknown>",
797 #endif
798 backtrace->FormatFrameData(&frame));
799
800 // Check map name empty, but exists.
801 frame.pc = 0xb0020;
802 frame.map.start = 0xb0000;
803 frame.map.end = 0xbffff;
804 frame.map.load_base = 0;
805 #if defined(__LP64__)
806 EXPECT_EQ("#01 pc 0000000000000020 <anonymous:00000000000b0000>",
807 #else
808 EXPECT_EQ("#01 pc 00000020 <anonymous:000b0000>",
809 #endif
810 backtrace->FormatFrameData(&frame));
811
812 // Check map name begins with a [.
813 frame.pc = 0xc0020;
814 frame.map.start = 0xc0000;
815 frame.map.end = 0xcffff;
816 frame.map.load_base = 0;
817 frame.map.name = "[anon:thread signal stack]";
818 #if defined(__LP64__)
819 EXPECT_EQ("#01 pc 0000000000000020 [anon:thread signal stack:00000000000c0000]",
820 #else
821 EXPECT_EQ("#01 pc 00000020 [anon:thread signal stack:000c0000]",
822 #endif
823 backtrace->FormatFrameData(&frame));
824
825 // Check relative pc is set and map name is set.
826 frame.pc = 0x12345679;
827 frame.map.name = "MapFake";
828 frame.map.start = 1;
829 frame.map.end = 1;
830 #if defined(__LP64__)
831 EXPECT_EQ("#01 pc 0000000012345678 MapFake",
832 #else
833 EXPECT_EQ("#01 pc 12345678 MapFake",
834 #endif
835 backtrace->FormatFrameData(&frame));
836
837 // Check func_name is set, but no func offset.
838 frame.func_name = "ProcFake";
839 #if defined(__LP64__)
840 EXPECT_EQ("#01 pc 0000000012345678 MapFake (ProcFake)",
841 #else
842 EXPECT_EQ("#01 pc 12345678 MapFake (ProcFake)",
843 #endif
844 backtrace->FormatFrameData(&frame));
845
846 // Check func_name is set, and func offset is non-zero.
847 frame.func_offset = 645;
848 #if defined(__LP64__)
849 EXPECT_EQ("#01 pc 0000000012345678 MapFake (ProcFake+645)",
850 #else
851 EXPECT_EQ("#01 pc 12345678 MapFake (ProcFake+645)",
852 #endif
853 backtrace->FormatFrameData(&frame));
854
855 // Check func_name is set, func offset is non-zero, and load_base is non-zero.
856 frame.func_offset = 645;
857 frame.map.load_base = 100;
858 #if defined(__LP64__)
859 EXPECT_EQ("#01 pc 00000000123456dc MapFake (ProcFake+645)",
860 #else
861 EXPECT_EQ("#01 pc 123456dc MapFake (ProcFake+645)",
862 #endif
863 backtrace->FormatFrameData(&frame));
864
865 // Check a non-zero map offset.
866 frame.map.offset = 0x1000;
867 #if defined(__LP64__)
868 EXPECT_EQ("#01 pc 00000000123456dc MapFake (offset 0x1000) (ProcFake+645)",
869 #else
870 EXPECT_EQ("#01 pc 123456dc MapFake (offset 0x1000) (ProcFake+645)",
871 #endif
872 backtrace->FormatFrameData(&frame));
873 }
874
875 struct map_test_t {
876 uintptr_t start;
877 uintptr_t end;
878 };
879
map_sort(map_test_t i,map_test_t j)880 static bool map_sort(map_test_t i, map_test_t j) { return i.start < j.start; }
881
VerifyMap(pid_t pid)882 static void VerifyMap(pid_t pid) {
883 char buffer[4096];
884 snprintf(buffer, sizeof(buffer), "/proc/%d/maps", pid);
885
886 FILE* map_file = fopen(buffer, "r");
887 ASSERT_TRUE(map_file != nullptr);
888 std::vector<map_test_t> test_maps;
889 while (fgets(buffer, sizeof(buffer), map_file)) {
890 map_test_t map;
891 ASSERT_EQ(2, sscanf(buffer, "%" SCNxPTR "-%" SCNxPTR " ", &map.start, &map.end));
892 test_maps.push_back(map);
893 }
894 fclose(map_file);
895 std::sort(test_maps.begin(), test_maps.end(), map_sort);
896
897 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(pid));
898
899 // Basic test that verifies that the map is in the expected order.
900 ScopedBacktraceMapIteratorLock lock(map.get());
901 std::vector<map_test_t>::const_iterator test_it = test_maps.begin();
902 for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
903 ASSERT_TRUE(test_it != test_maps.end());
904 ASSERT_EQ(test_it->start, it->start);
905 ASSERT_EQ(test_it->end, it->end);
906 ++test_it;
907 }
908 ASSERT_TRUE(test_it == test_maps.end());
909 }
910
TEST(libbacktrace,verify_map_remote)911 TEST(libbacktrace, verify_map_remote) {
912 pid_t pid;
913 CreateRemoteProcess(&pid);
914
915 // The maps should match exactly since the forked process has been paused.
916 VerifyMap(pid);
917
918 FinishRemoteProcess(pid);
919 }
920
InitMemory(uint8_t * memory,size_t bytes)921 static void InitMemory(uint8_t* memory, size_t bytes) {
922 for (size_t i = 0; i < bytes; i++) {
923 memory[i] = i;
924 if (memory[i] == '\0') {
925 // Don't use '\0' in our data so we can verify that an overread doesn't
926 // occur by using a '\0' as the character after the read data.
927 memory[i] = 23;
928 }
929 }
930 }
931
ThreadReadTest(void * data)932 static void* ThreadReadTest(void* data) {
933 thread_t* thread_data = reinterpret_cast<thread_t*>(data);
934
935 thread_data->tid = gettid();
936
937 // Create two map pages.
938 // Mark the second page as not-readable.
939 size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
940 uint8_t* memory;
941 if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
942 return reinterpret_cast<void*>(-1);
943 }
944
945 if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
946 return reinterpret_cast<void*>(-1);
947 }
948
949 // Set up a simple pattern in memory.
950 InitMemory(memory, pagesize);
951
952 thread_data->data = memory;
953
954 // Tell the caller it's okay to start reading memory.
955 android_atomic_acquire_store(1, &thread_data->state);
956
957 // Loop waiting for the caller to finish reading the memory.
958 while (thread_data->state) {
959 }
960
961 // Re-enable read-write on the page so that we don't crash if we try
962 // and access data on this page when freeing the memory.
963 if (mprotect(&memory[pagesize], pagesize, PROT_READ | PROT_WRITE) != 0) {
964 return reinterpret_cast<void*>(-1);
965 }
966 free(memory);
967
968 android_atomic_acquire_store(1, &thread_data->state);
969
970 return nullptr;
971 }
972
RunReadTest(Backtrace * backtrace,uintptr_t read_addr)973 static void RunReadTest(Backtrace* backtrace, uintptr_t read_addr) {
974 size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
975
976 // Create a page of data to use to do quick compares.
977 uint8_t* expected = new uint8_t[pagesize];
978 InitMemory(expected, pagesize);
979
980 uint8_t* data = new uint8_t[2*pagesize];
981 // Verify that we can only read one page worth of data.
982 size_t bytes_read = backtrace->Read(read_addr, data, 2 * pagesize);
983 ASSERT_EQ(pagesize, bytes_read);
984 ASSERT_TRUE(memcmp(data, expected, pagesize) == 0);
985
986 // Verify unaligned reads.
987 for (size_t i = 1; i < sizeof(word_t); i++) {
988 bytes_read = backtrace->Read(read_addr + i, data, 2 * sizeof(word_t));
989 ASSERT_EQ(2 * sizeof(word_t), bytes_read);
990 ASSERT_TRUE(memcmp(data, &expected[i], 2 * sizeof(word_t)) == 0)
991 << "Offset at " << i << " failed";
992 }
993
994 // Verify small unaligned reads.
995 for (size_t i = 1; i < sizeof(word_t); i++) {
996 for (size_t j = 1; j < sizeof(word_t); j++) {
997 // Set one byte past what we expect to read, to guarantee we don't overread.
998 data[j] = '\0';
999 bytes_read = backtrace->Read(read_addr + i, data, j);
1000 ASSERT_EQ(j, bytes_read);
1001 ASSERT_TRUE(memcmp(data, &expected[i], j) == 0)
1002 << "Offset at " << i << " length " << j << " miscompared";
1003 ASSERT_EQ('\0', data[j])
1004 << "Offset at " << i << " length " << j << " wrote too much data";
1005 }
1006 }
1007 delete[] data;
1008 delete[] expected;
1009 }
1010
TEST(libbacktrace,thread_read)1011 TEST(libbacktrace, thread_read) {
1012 pthread_attr_t attr;
1013 pthread_attr_init(&attr);
1014 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
1015 pthread_t thread;
1016 thread_t thread_data = { 0, 0, 0, nullptr };
1017 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadReadTest, &thread_data) == 0);
1018
1019 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
1020
1021 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
1022 ASSERT_TRUE(backtrace.get() != nullptr);
1023
1024 RunReadTest(backtrace.get(), reinterpret_cast<uintptr_t>(thread_data.data));
1025
1026 android_atomic_acquire_store(0, &thread_data.state);
1027
1028 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
1029 }
1030
1031 volatile uintptr_t g_ready = 0;
1032 volatile uintptr_t g_addr = 0;
1033
ForkedReadTest()1034 static void ForkedReadTest() {
1035 // Create two map pages.
1036 size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
1037 uint8_t* memory;
1038 if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
1039 perror("Failed to allocate memory\n");
1040 exit(1);
1041 }
1042
1043 // Mark the second page as not-readable.
1044 if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
1045 perror("Failed to mprotect memory\n");
1046 exit(1);
1047 }
1048
1049 // Set up a simple pattern in memory.
1050 InitMemory(memory, pagesize);
1051
1052 g_addr = reinterpret_cast<uintptr_t>(memory);
1053 g_ready = 1;
1054
1055 while (1) {
1056 usleep(US_PER_MSEC);
1057 }
1058 }
1059
TEST(libbacktrace,process_read)1060 TEST(libbacktrace, process_read) {
1061 g_ready = 0;
1062 pid_t pid;
1063 if ((pid = fork()) == 0) {
1064 ForkedReadTest();
1065 exit(0);
1066 }
1067 ASSERT_NE(-1, pid);
1068
1069 bool test_executed = false;
1070 uint64_t start = NanoTime();
1071 while (1) {
1072 if (ptrace(PTRACE_ATTACH, pid, 0, 0) == 0) {
1073 WaitForStop(pid);
1074
1075 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
1076 ASSERT_TRUE(backtrace.get() != nullptr);
1077
1078 uintptr_t read_addr;
1079 size_t bytes_read = backtrace->Read(reinterpret_cast<uintptr_t>(&g_ready),
1080 reinterpret_cast<uint8_t*>(&read_addr),
1081 sizeof(uintptr_t));
1082 ASSERT_EQ(sizeof(uintptr_t), bytes_read);
1083 if (read_addr) {
1084 // The forked process is ready to be read.
1085 bytes_read = backtrace->Read(reinterpret_cast<uintptr_t>(&g_addr),
1086 reinterpret_cast<uint8_t*>(&read_addr),
1087 sizeof(uintptr_t));
1088 ASSERT_EQ(sizeof(uintptr_t), bytes_read);
1089
1090 RunReadTest(backtrace.get(), read_addr);
1091
1092 test_executed = true;
1093 break;
1094 }
1095 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1096 }
1097 if ((NanoTime() - start) > 5 * NS_PER_SEC) {
1098 break;
1099 }
1100 usleep(US_PER_MSEC);
1101 }
1102 kill(pid, SIGKILL);
1103 ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1104
1105 ASSERT_TRUE(test_executed);
1106 }
1107
VerifyFunctionsFound(const std::vector<std::string> & found_functions)1108 static void VerifyFunctionsFound(const std::vector<std::string>& found_functions) {
1109 // We expect to find these functions in libbacktrace_test. If we don't
1110 // find them, that's a bug in the memory read handling code in libunwind.
1111 std::list<std::string> expected_functions;
1112 expected_functions.push_back("test_recursive_call");
1113 expected_functions.push_back("test_level_one");
1114 expected_functions.push_back("test_level_two");
1115 expected_functions.push_back("test_level_three");
1116 expected_functions.push_back("test_level_four");
1117 for (const auto& found_function : found_functions) {
1118 for (const auto& expected_function : expected_functions) {
1119 if (found_function == expected_function) {
1120 expected_functions.remove(found_function);
1121 break;
1122 }
1123 }
1124 }
1125 ASSERT_TRUE(expected_functions.empty()) << "Not all functions found in shared library.";
1126 }
1127
CopySharedLibrary()1128 static const char* CopySharedLibrary() {
1129 #if defined(__LP64__)
1130 const char* lib_name = "lib64";
1131 #else
1132 const char* lib_name = "lib";
1133 #endif
1134
1135 #if defined(__BIONIC__)
1136 const char* tmp_so_name = "/data/local/tmp/libbacktrace_test.so";
1137 std::string cp_cmd = android::base::StringPrintf("cp /system/%s/libbacktrace_test.so %s",
1138 lib_name, tmp_so_name);
1139 #else
1140 const char* tmp_so_name = "/tmp/libbacktrace_test.so";
1141 if (getenv("ANDROID_HOST_OUT") == NULL) {
1142 fprintf(stderr, "ANDROID_HOST_OUT not set, make sure you run lunch.");
1143 return nullptr;
1144 }
1145 std::string cp_cmd = android::base::StringPrintf("cp %s/%s/libbacktrace_test.so %s",
1146 getenv("ANDROID_HOST_OUT"), lib_name,
1147 tmp_so_name);
1148 #endif
1149
1150 // Copy the shared so to a tempory directory.
1151 system(cp_cmd.c_str());
1152
1153 return tmp_so_name;
1154 }
1155
TEST(libbacktrace,check_unreadable_elf_local)1156 TEST(libbacktrace, check_unreadable_elf_local) {
1157 const char* tmp_so_name = CopySharedLibrary();
1158 ASSERT_TRUE(tmp_so_name != nullptr);
1159
1160 struct stat buf;
1161 ASSERT_TRUE(stat(tmp_so_name, &buf) != -1);
1162 uintptr_t map_size = buf.st_size;
1163
1164 int fd = open(tmp_so_name, O_RDONLY);
1165 ASSERT_TRUE(fd != -1);
1166
1167 void* map = mmap(NULL, map_size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0);
1168 ASSERT_TRUE(map != MAP_FAILED);
1169 close(fd);
1170 ASSERT_TRUE(unlink(tmp_so_name) != -1);
1171
1172 std::vector<std::string> found_functions;
1173 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS,
1174 BACKTRACE_CURRENT_THREAD));
1175 ASSERT_TRUE(backtrace.get() != nullptr);
1176
1177 // Needed before GetFunctionName will work.
1178 backtrace->Unwind(0);
1179
1180 // Loop through the entire map, and get every function we can find.
1181 map_size += reinterpret_cast<uintptr_t>(map);
1182 std::string last_func;
1183 for (uintptr_t read_addr = reinterpret_cast<uintptr_t>(map);
1184 read_addr < map_size; read_addr += 4) {
1185 uintptr_t offset;
1186 std::string func_name = backtrace->GetFunctionName(read_addr, &offset);
1187 if (!func_name.empty() && last_func != func_name) {
1188 found_functions.push_back(func_name);
1189 }
1190 last_func = func_name;
1191 }
1192
1193 ASSERT_TRUE(munmap(map, map_size - reinterpret_cast<uintptr_t>(map)) == 0);
1194
1195 VerifyFunctionsFound(found_functions);
1196 }
1197
TEST(libbacktrace,check_unreadable_elf_remote)1198 TEST(libbacktrace, check_unreadable_elf_remote) {
1199 const char* tmp_so_name = CopySharedLibrary();
1200 ASSERT_TRUE(tmp_so_name != nullptr);
1201
1202 g_ready = 0;
1203
1204 struct stat buf;
1205 ASSERT_TRUE(stat(tmp_so_name, &buf) != -1);
1206 uintptr_t map_size = buf.st_size;
1207
1208 pid_t pid;
1209 if ((pid = fork()) == 0) {
1210 int fd = open(tmp_so_name, O_RDONLY);
1211 if (fd == -1) {
1212 fprintf(stderr, "Failed to open file %s: %s\n", tmp_so_name, strerror(errno));
1213 unlink(tmp_so_name);
1214 exit(0);
1215 }
1216
1217 void* map = mmap(NULL, map_size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0);
1218 if (map == MAP_FAILED) {
1219 fprintf(stderr, "Failed to map in memory: %s\n", strerror(errno));
1220 unlink(tmp_so_name);
1221 exit(0);
1222 }
1223 close(fd);
1224 if (unlink(tmp_so_name) == -1) {
1225 fprintf(stderr, "Failed to unlink: %s\n", strerror(errno));
1226 exit(0);
1227 }
1228
1229 g_addr = reinterpret_cast<uintptr_t>(map);
1230 g_ready = 1;
1231 while (true) {
1232 usleep(US_PER_MSEC);
1233 }
1234 exit(0);
1235 }
1236 ASSERT_TRUE(pid > 0);
1237
1238 std::vector<std::string> found_functions;
1239 uint64_t start = NanoTime();
1240 while (true) {
1241 ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1242
1243 // Wait for the process to get to a stopping point.
1244 WaitForStop(pid);
1245
1246 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, BACKTRACE_CURRENT_THREAD));
1247 ASSERT_TRUE(backtrace.get() != nullptr);
1248
1249 uintptr_t read_addr;
1250 ASSERT_EQ(sizeof(uintptr_t), backtrace->Read(reinterpret_cast<uintptr_t>(&g_ready), reinterpret_cast<uint8_t*>(&read_addr), sizeof(uintptr_t)));
1251 if (read_addr) {
1252 ASSERT_EQ(sizeof(uintptr_t), backtrace->Read(reinterpret_cast<uintptr_t>(&g_addr), reinterpret_cast<uint8_t*>(&read_addr), sizeof(uintptr_t)));
1253
1254 // Needed before GetFunctionName will work.
1255 backtrace->Unwind(0);
1256
1257 // Loop through the entire map, and get every function we can find.
1258 map_size += read_addr;
1259 std::string last_func;
1260 for (; read_addr < map_size; read_addr += 4) {
1261 uintptr_t offset;
1262 std::string func_name = backtrace->GetFunctionName(read_addr, &offset);
1263 if (!func_name.empty() && last_func != func_name) {
1264 found_functions.push_back(func_name);
1265 }
1266 last_func = func_name;
1267 }
1268 break;
1269 }
1270 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1271
1272 if ((NanoTime() - start) > 5 * NS_PER_SEC) {
1273 break;
1274 }
1275 usleep(US_PER_MSEC);
1276 }
1277
1278 kill(pid, SIGKILL);
1279 ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1280
1281 VerifyFunctionsFound(found_functions);
1282 }
1283
FindFuncFrameInBacktrace(Backtrace * backtrace,uintptr_t test_func,size_t * frame_num)1284 static bool FindFuncFrameInBacktrace(Backtrace* backtrace, uintptr_t test_func, size_t* frame_num) {
1285 backtrace_map_t map;
1286 backtrace->FillInMap(test_func, &map);
1287 if (!BacktraceMap::IsValid(map)) {
1288 return false;
1289 }
1290
1291 // Loop through the frames, and find the one that is in the map.
1292 *frame_num = 0;
1293 for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) {
1294 if (BacktraceMap::IsValid(it->map) && map.start == it->map.start &&
1295 it->pc >= test_func) {
1296 *frame_num = it->num;
1297 return true;
1298 }
1299 }
1300 return false;
1301 }
1302
VerifyUnreadableElfFrame(Backtrace * backtrace,uintptr_t test_func,size_t frame_num)1303 static void VerifyUnreadableElfFrame(Backtrace* backtrace, uintptr_t test_func, size_t frame_num) {
1304 ASSERT_LT(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
1305 << DumpFrames(backtrace);
1306
1307 ASSERT_TRUE(frame_num != 0) << DumpFrames(backtrace);
1308 // Make sure that there is at least one more frame above the test func call.
1309 ASSERT_LT(frame_num, backtrace->NumFrames()) << DumpFrames(backtrace);
1310
1311 uintptr_t diff = backtrace->GetFrame(frame_num)->pc - test_func;
1312 ASSERT_LT(diff, 200U) << DumpFrames(backtrace);
1313 }
1314
VerifyUnreadableElfBacktrace(uintptr_t test_func)1315 static void VerifyUnreadableElfBacktrace(uintptr_t test_func) {
1316 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS,
1317 BACKTRACE_CURRENT_THREAD));
1318 ASSERT_TRUE(backtrace.get() != nullptr);
1319 ASSERT_TRUE(backtrace->Unwind(0));
1320 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
1321
1322 size_t frame_num;
1323 ASSERT_TRUE(FindFuncFrameInBacktrace(backtrace.get(), test_func, &frame_num));
1324
1325 VerifyUnreadableElfFrame(backtrace.get(), test_func, frame_num);
1326 }
1327
1328 typedef int (*test_func_t)(int, int, int, int, void (*)(uintptr_t), uintptr_t);
1329
TEST(libbacktrace,unwind_through_unreadable_elf_local)1330 TEST(libbacktrace, unwind_through_unreadable_elf_local) {
1331 const char* tmp_so_name = CopySharedLibrary();
1332 ASSERT_TRUE(tmp_so_name != nullptr);
1333 void* lib_handle = dlopen(tmp_so_name, RTLD_NOW);
1334 ASSERT_TRUE(lib_handle != nullptr);
1335 ASSERT_TRUE(unlink(tmp_so_name) != -1);
1336
1337 test_func_t test_func;
1338 test_func = reinterpret_cast<test_func_t>(dlsym(lib_handle, "test_level_one"));
1339 ASSERT_TRUE(test_func != nullptr);
1340
1341 ASSERT_NE(test_func(1, 2, 3, 4, VerifyUnreadableElfBacktrace,
1342 reinterpret_cast<uintptr_t>(test_func)), 0);
1343
1344 ASSERT_TRUE(dlclose(lib_handle) == 0);
1345 }
1346
TEST(libbacktrace,unwind_through_unreadable_elf_remote)1347 TEST(libbacktrace, unwind_through_unreadable_elf_remote) {
1348 const char* tmp_so_name = CopySharedLibrary();
1349 ASSERT_TRUE(tmp_so_name != nullptr);
1350 void* lib_handle = dlopen(tmp_so_name, RTLD_NOW);
1351 ASSERT_TRUE(lib_handle != nullptr);
1352 ASSERT_TRUE(unlink(tmp_so_name) != -1);
1353
1354 test_func_t test_func;
1355 test_func = reinterpret_cast<test_func_t>(dlsym(lib_handle, "test_level_one"));
1356 ASSERT_TRUE(test_func != nullptr);
1357
1358 pid_t pid;
1359 if ((pid = fork()) == 0) {
1360 test_func(1, 2, 3, 4, 0, 0);
1361 exit(0);
1362 }
1363 ASSERT_TRUE(pid > 0);
1364 ASSERT_TRUE(dlclose(lib_handle) == 0);
1365
1366 uint64_t start = NanoTime();
1367 bool done = false;
1368 while (!done) {
1369 ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1370
1371 // Wait for the process to get to a stopping point.
1372 WaitForStop(pid);
1373
1374 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, BACKTRACE_CURRENT_THREAD));
1375 ASSERT_TRUE(backtrace.get() != nullptr);
1376 ASSERT_TRUE(backtrace->Unwind(0));
1377 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
1378
1379 size_t frame_num;
1380 if (FindFuncFrameInBacktrace(backtrace.get(),
1381 reinterpret_cast<uintptr_t>(test_func), &frame_num)) {
1382
1383 VerifyUnreadableElfFrame(backtrace.get(), reinterpret_cast<uintptr_t>(test_func), frame_num);
1384 done = true;
1385 }
1386
1387 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1388
1389 if ((NanoTime() - start) > 5 * NS_PER_SEC) {
1390 break;
1391 }
1392 usleep(US_PER_MSEC);
1393 }
1394
1395 kill(pid, SIGKILL);
1396 ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1397
1398 ASSERT_TRUE(done) << "Test function never found in unwind.";
1399 }
1400
TEST(libbacktrace,unwind_thread_doesnt_exist)1401 TEST(libbacktrace, unwind_thread_doesnt_exist) {
1402 std::unique_ptr<Backtrace> backtrace(
1403 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, 99999999));
1404 ASSERT_TRUE(backtrace.get() != nullptr);
1405 ASSERT_FALSE(backtrace->Unwind(0));
1406 ASSERT_EQ(BACKTRACE_UNWIND_ERROR_THREAD_DOESNT_EXIST, backtrace->GetError());
1407 }
1408
TEST(libbacktrace,local_get_function_name_before_unwind)1409 TEST(libbacktrace, local_get_function_name_before_unwind) {
1410 std::unique_ptr<Backtrace> backtrace(
1411 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
1412 ASSERT_TRUE(backtrace.get() != nullptr);
1413
1414 // Verify that trying to get a function name before doing an unwind works.
1415 uintptr_t cur_func_offset = reinterpret_cast<uintptr_t>(&test_level_one) + 1;
1416 size_t offset;
1417 ASSERT_NE(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset));
1418 }
1419
TEST(libbacktrace,remote_get_function_name_before_unwind)1420 TEST(libbacktrace, remote_get_function_name_before_unwind) {
1421 pid_t pid;
1422 CreateRemoteProcess(&pid);
1423
1424 // Now create an unwind object.
1425 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
1426
1427 // Verify that trying to get a function name before doing an unwind works.
1428 uintptr_t cur_func_offset = reinterpret_cast<uintptr_t>(&test_level_one) + 1;
1429 size_t offset;
1430 ASSERT_NE(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset));
1431
1432 FinishRemoteProcess(pid);
1433 }
1434
SetUcontextSp(uintptr_t sp,ucontext_t * ucontext)1435 static void SetUcontextSp(uintptr_t sp, ucontext_t* ucontext) {
1436 #if defined(__arm__)
1437 ucontext->uc_mcontext.arm_sp = sp;
1438 #elif defined(__aarch64__)
1439 ucontext->uc_mcontext.sp = sp;
1440 #elif defined(__i386__)
1441 ucontext->uc_mcontext.gregs[REG_ESP] = sp;
1442 #elif defined(__x86_64__)
1443 ucontext->uc_mcontext.gregs[REG_RSP] = sp;
1444 #else
1445 UNUSED(sp);
1446 UNUSED(ucontext);
1447 ASSERT_TRUE(false) << "Unsupported architecture";
1448 #endif
1449 }
1450
SetUcontextPc(uintptr_t pc,ucontext_t * ucontext)1451 static void SetUcontextPc(uintptr_t pc, ucontext_t* ucontext) {
1452 #if defined(__arm__)
1453 ucontext->uc_mcontext.arm_pc = pc;
1454 #elif defined(__aarch64__)
1455 ucontext->uc_mcontext.pc = pc;
1456 #elif defined(__i386__)
1457 ucontext->uc_mcontext.gregs[REG_EIP] = pc;
1458 #elif defined(__x86_64__)
1459 ucontext->uc_mcontext.gregs[REG_RIP] = pc;
1460 #else
1461 UNUSED(pc);
1462 UNUSED(ucontext);
1463 ASSERT_TRUE(false) << "Unsupported architecture";
1464 #endif
1465 }
1466
SetUcontextLr(uintptr_t lr,ucontext_t * ucontext)1467 static void SetUcontextLr(uintptr_t lr, ucontext_t* ucontext) {
1468 #if defined(__arm__)
1469 ucontext->uc_mcontext.arm_lr = lr;
1470 #elif defined(__aarch64__)
1471 ucontext->uc_mcontext.regs[30] = lr;
1472 #elif defined(__i386__)
1473 // The lr is on the stack.
1474 ASSERT_TRUE(lr != 0);
1475 ASSERT_TRUE(ucontext != nullptr);
1476 #elif defined(__x86_64__)
1477 // The lr is on the stack.
1478 ASSERT_TRUE(lr != 0);
1479 ASSERT_TRUE(ucontext != nullptr);
1480 #else
1481 UNUSED(lr);
1482 UNUSED(ucontext);
1483 ASSERT_TRUE(false) << "Unsupported architecture";
1484 #endif
1485 }
1486
1487 static constexpr size_t DEVICE_MAP_SIZE = 1024;
1488
SetupDeviceMap(void ** device_map)1489 static void SetupDeviceMap(void** device_map) {
1490 // Make sure that anything in a device map will result in fails
1491 // to read.
1492 android::base::unique_fd device_fd(open("/dev/zero", O_RDONLY | O_CLOEXEC));
1493
1494 *device_map = mmap(nullptr, 1024, PROT_READ, MAP_PRIVATE, device_fd, 0);
1495 ASSERT_TRUE(*device_map != MAP_FAILED);
1496
1497 // Make sure the map is readable.
1498 ASSERT_EQ(0, reinterpret_cast<int*>(*device_map)[0]);
1499 }
1500
UnwindFromDevice(Backtrace * backtrace,void * device_map)1501 static void UnwindFromDevice(Backtrace* backtrace, void* device_map) {
1502 uintptr_t device_map_uint = reinterpret_cast<uintptr_t>(device_map);
1503
1504 backtrace_map_t map;
1505 backtrace->FillInMap(device_map_uint, &map);
1506 // Verify the flag is set.
1507 ASSERT_EQ(PROT_DEVICE_MAP, map.flags & PROT_DEVICE_MAP);
1508
1509 // Quick sanity checks.
1510 size_t offset;
1511 ASSERT_EQ(std::string(""), backtrace->GetFunctionName(device_map_uint, &offset));
1512 ASSERT_EQ(std::string(""), backtrace->GetFunctionName(device_map_uint, &offset, &map));
1513 ASSERT_EQ(std::string(""), backtrace->GetFunctionName(0, &offset));
1514
1515 uintptr_t cur_func_offset = reinterpret_cast<uintptr_t>(&test_level_one) + 1;
1516 // Now verify the device map flag actually causes the function name to be empty.
1517 backtrace->FillInMap(cur_func_offset, &map);
1518 ASSERT_TRUE((map.flags & PROT_DEVICE_MAP) == 0);
1519 ASSERT_NE(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset, &map));
1520 map.flags |= PROT_DEVICE_MAP;
1521 ASSERT_EQ(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset, &map));
1522
1523 ucontext_t ucontext;
1524
1525 // Create a context that has the pc in the device map, but the sp
1526 // in a non-device map.
1527 memset(&ucontext, 0, sizeof(ucontext));
1528 SetUcontextSp(reinterpret_cast<uintptr_t>(&ucontext), &ucontext);
1529 SetUcontextPc(device_map_uint, &ucontext);
1530 SetUcontextLr(cur_func_offset, &ucontext);
1531
1532 ASSERT_TRUE(backtrace->Unwind(0, &ucontext));
1533
1534 // The buffer should only be a single element.
1535 ASSERT_EQ(1U, backtrace->NumFrames());
1536 const backtrace_frame_data_t* frame = backtrace->GetFrame(0);
1537 ASSERT_EQ(device_map_uint, frame->pc);
1538 ASSERT_EQ(reinterpret_cast<uintptr_t>(&ucontext), frame->sp);
1539
1540 // Check what happens when skipping the first frame.
1541 ASSERT_TRUE(backtrace->Unwind(1, &ucontext));
1542 ASSERT_EQ(0U, backtrace->NumFrames());
1543
1544 // Create a context that has the sp in the device map, but the pc
1545 // in a non-device map.
1546 memset(&ucontext, 0, sizeof(ucontext));
1547 SetUcontextSp(device_map_uint, &ucontext);
1548 SetUcontextPc(cur_func_offset, &ucontext);
1549 SetUcontextLr(cur_func_offset, &ucontext);
1550
1551 ASSERT_TRUE(backtrace->Unwind(0, &ucontext));
1552
1553 // The buffer should only be a single element.
1554 ASSERT_EQ(1U, backtrace->NumFrames());
1555 frame = backtrace->GetFrame(0);
1556 ASSERT_EQ(cur_func_offset, frame->pc);
1557 ASSERT_EQ(device_map_uint, frame->sp);
1558
1559 // Check what happens when skipping the first frame.
1560 ASSERT_TRUE(backtrace->Unwind(1, &ucontext));
1561 ASSERT_EQ(0U, backtrace->NumFrames());
1562 }
1563
TEST(libbacktrace,unwind_disallow_device_map_local)1564 TEST(libbacktrace, unwind_disallow_device_map_local) {
1565 void* device_map;
1566 SetupDeviceMap(&device_map);
1567
1568 // Now create an unwind object.
1569 std::unique_ptr<Backtrace> backtrace(
1570 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
1571 ASSERT_TRUE(backtrace);
1572
1573 UnwindFromDevice(backtrace.get(), device_map);
1574
1575 munmap(device_map, DEVICE_MAP_SIZE);
1576 }
1577
TEST(libbacktrace,unwind_disallow_device_map_remote)1578 TEST(libbacktrace, unwind_disallow_device_map_remote) {
1579 void* device_map;
1580 SetupDeviceMap(&device_map);
1581
1582 // Fork a process to do a remote backtrace.
1583 pid_t pid;
1584 CreateRemoteProcess(&pid);
1585
1586 // Now create an unwind object.
1587 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
1588
1589 // TODO: Currently unwind from context doesn't work on remote
1590 // unwind. Keep this test because the new unwinder should support
1591 // this eventually, or we can delete this test.
1592 // properly with unwind from context.
1593 // UnwindFromDevice(backtrace.get(), device_map);
1594
1595 FinishRemoteProcess(pid);
1596
1597 munmap(device_map, DEVICE_MAP_SIZE);
1598 }
1599
1600 class ScopedSignalHandler {
1601 public:
ScopedSignalHandler(int signal_number,void (* handler)(int))1602 ScopedSignalHandler(int signal_number, void (*handler)(int)) : signal_number_(signal_number) {
1603 memset(&action_, 0, sizeof(action_));
1604 action_.sa_handler = handler;
1605 sigaction(signal_number_, &action_, &old_action_);
1606 }
1607
ScopedSignalHandler(int signal_number,void (* action)(int,siginfo_t *,void *))1608 ScopedSignalHandler(int signal_number, void (*action)(int, siginfo_t*, void*))
1609 : signal_number_(signal_number) {
1610 memset(&action_, 0, sizeof(action_));
1611 action_.sa_flags = SA_SIGINFO;
1612 action_.sa_sigaction = action;
1613 sigaction(signal_number_, &action_, &old_action_);
1614 }
1615
~ScopedSignalHandler()1616 ~ScopedSignalHandler() { sigaction(signal_number_, &old_action_, nullptr); }
1617
1618 private:
1619 struct sigaction action_;
1620 struct sigaction old_action_;
1621 const int signal_number_;
1622 };
1623
SetValueAndLoop(void * data)1624 static void SetValueAndLoop(void* data) {
1625 volatile int* value = reinterpret_cast<volatile int*>(data);
1626
1627 *value = 1;
1628 for (volatile int i = 0;; i++)
1629 ;
1630 }
1631
UnwindThroughSignal(bool use_action)1632 static void UnwindThroughSignal(bool use_action) {
1633 volatile int value = 0;
1634 pid_t pid;
1635 if ((pid = fork()) == 0) {
1636 if (use_action) {
1637 ScopedSignalHandler ssh(SIGUSR1, test_signal_action);
1638
1639 test_level_one(1, 2, 3, 4, SetValueAndLoop, const_cast<int*>(&value));
1640 } else {
1641 ScopedSignalHandler ssh(SIGUSR1, test_signal_handler);
1642
1643 test_level_one(1, 2, 3, 4, SetValueAndLoop, const_cast<int*>(&value));
1644 }
1645 }
1646 ASSERT_NE(-1, pid);
1647
1648 int read_value = 0;
1649 uint64_t start = NanoTime();
1650 while (read_value == 0) {
1651 usleep(1000);
1652
1653 // Loop until the remote function gets into the final function.
1654 ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1655
1656 WaitForStop(pid);
1657
1658 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
1659
1660 size_t bytes_read = backtrace->Read(reinterpret_cast<uintptr_t>(const_cast<int*>(&value)),
1661 reinterpret_cast<uint8_t*>(&read_value), sizeof(read_value));
1662 ASSERT_EQ(sizeof(read_value), bytes_read);
1663
1664 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1665
1666 ASSERT_TRUE(NanoTime() - start < 5 * NS_PER_SEC)
1667 << "Remote process did not execute far enough in 5 seconds.";
1668 }
1669
1670 // Now need to send a signal to the remote process.
1671 kill(pid, SIGUSR1);
1672
1673 // Wait for the process to get to the signal handler loop.
1674 Backtrace::const_iterator frame_iter;
1675 start = NanoTime();
1676 std::unique_ptr<Backtrace> backtrace;
1677 while (true) {
1678 usleep(1000);
1679
1680 ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1681
1682 WaitForStop(pid);
1683
1684 backtrace.reset(Backtrace::Create(pid, pid));
1685 ASSERT_TRUE(backtrace->Unwind(0));
1686 bool found = false;
1687 for (frame_iter = backtrace->begin(); frame_iter != backtrace->end(); ++frame_iter) {
1688 if (frame_iter->func_name == "test_loop_forever") {
1689 ++frame_iter;
1690 found = true;
1691 break;
1692 }
1693 }
1694 if (found) {
1695 break;
1696 }
1697
1698 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1699
1700 ASSERT_TRUE(NanoTime() - start < 5 * NS_PER_SEC)
1701 << "Remote process did not get in signal handler in 5 seconds." << std::endl
1702 << DumpFrames(backtrace.get());
1703 }
1704
1705 std::vector<std::string> names;
1706 // Loop through the frames, and save the function names.
1707 size_t frame = 0;
1708 for (; frame_iter != backtrace->end(); ++frame_iter) {
1709 if (frame_iter->func_name == "test_level_four") {
1710 frame = names.size() + 1;
1711 }
1712 names.push_back(frame_iter->func_name);
1713 }
1714 ASSERT_NE(0U, frame) << "Unable to find test_level_four in backtrace" << std::endl
1715 << DumpFrames(backtrace.get());
1716
1717 // The expected order of the frames:
1718 // test_loop_forever
1719 // test_signal_handler|test_signal_action
1720 // <OPTIONAL_FRAME> May or may not exist.
1721 // SetValueAndLoop (but the function name might be empty)
1722 // test_level_four
1723 // test_level_three
1724 // test_level_two
1725 // test_level_one
1726 ASSERT_LE(frame + 2, names.size()) << DumpFrames(backtrace.get());
1727 ASSERT_LE(2U, frame) << DumpFrames(backtrace.get());
1728 if (use_action) {
1729 ASSERT_EQ("test_signal_action", names[0]) << DumpFrames(backtrace.get());
1730 } else {
1731 ASSERT_EQ("test_signal_handler", names[0]) << DumpFrames(backtrace.get());
1732 }
1733 ASSERT_EQ("test_level_three", names[frame]) << DumpFrames(backtrace.get());
1734 ASSERT_EQ("test_level_two", names[frame + 1]) << DumpFrames(backtrace.get());
1735 ASSERT_EQ("test_level_one", names[frame + 2]) << DumpFrames(backtrace.get());
1736
1737 FinishRemoteProcess(pid);
1738 }
1739
TEST(libbacktrace,unwind_remote_through_signal_using_handler)1740 TEST(libbacktrace, unwind_remote_through_signal_using_handler) { UnwindThroughSignal(false); }
1741
TEST(libbacktrace,unwind_remote_through_signal_using_action)1742 TEST(libbacktrace, unwind_remote_through_signal_using_action) { UnwindThroughSignal(true); }
1743
1744 #if defined(ENABLE_PSS_TESTS)
1745 #include "GetPss.h"
1746
1747 #define MAX_LEAK_BYTES (32*1024UL)
1748
CheckForLeak(pid_t pid,pid_t tid)1749 static void CheckForLeak(pid_t pid, pid_t tid) {
1750 // Do a few runs to get the PSS stable.
1751 for (size_t i = 0; i < 100; i++) {
1752 Backtrace* backtrace = Backtrace::Create(pid, tid);
1753 ASSERT_TRUE(backtrace != nullptr);
1754 ASSERT_TRUE(backtrace->Unwind(0));
1755 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
1756 delete backtrace;
1757 }
1758 size_t stable_pss = GetPssBytes();
1759 ASSERT_TRUE(stable_pss != 0);
1760
1761 // Loop enough that even a small leak should be detectable.
1762 for (size_t i = 0; i < 4096; i++) {
1763 Backtrace* backtrace = Backtrace::Create(pid, tid);
1764 ASSERT_TRUE(backtrace != nullptr);
1765 ASSERT_TRUE(backtrace->Unwind(0));
1766 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
1767 delete backtrace;
1768 }
1769 size_t new_pss = GetPssBytes();
1770 ASSERT_TRUE(new_pss != 0);
1771 if (new_pss > stable_pss) {
1772 ASSERT_LE(new_pss - stable_pss, MAX_LEAK_BYTES);
1773 }
1774 }
1775
TEST(libbacktrace,check_for_leak_local)1776 TEST(libbacktrace, check_for_leak_local) {
1777 CheckForLeak(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD);
1778 }
1779
TEST(libbacktrace,check_for_leak_local_thread)1780 TEST(libbacktrace, check_for_leak_local_thread) {
1781 thread_t thread_data = { 0, 0, 0, nullptr };
1782 pthread_t thread;
1783 ASSERT_TRUE(pthread_create(&thread, nullptr, ThreadLevelRun, &thread_data) == 0);
1784
1785 // Wait up to 2 seconds for the tid to be set.
1786 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
1787
1788 CheckForLeak(BACKTRACE_CURRENT_PROCESS, thread_data.tid);
1789
1790 // Tell the thread to exit its infinite loop.
1791 android_atomic_acquire_store(0, &thread_data.state);
1792
1793 ASSERT_TRUE(pthread_join(thread, nullptr) == 0);
1794 }
1795
TEST(libbacktrace,check_for_leak_remote)1796 TEST(libbacktrace, check_for_leak_remote) {
1797 pid_t pid;
1798 CreateRemoteProcess(&pid);
1799
1800 CheckForLeak(pid, BACKTRACE_CURRENT_THREAD);
1801
1802 FinishRemoteProcess(pid);
1803 }
1804 #endif
1805