1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define _GNU_SOURCE 1
18 #include <dirent.h>
19 #include <dlfcn.h>
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <inttypes.h>
23 #include <malloc.h>
24 #include <pthread.h>
25 #include <signal.h>
26 #include <stdint.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/ptrace.h>
31 #include <sys/stat.h>
32 #include <sys/types.h>
33 #include <sys/wait.h>
34 #include <time.h>
35 #include <ucontext.h>
36 #include <unistd.h>
37
38 #include <algorithm>
39 #include <list>
40 #include <memory>
41 #include <ostream>
42 #include <string>
43 #include <vector>
44
45 #include <backtrace/Backtrace.h>
46 #include <backtrace/BacktraceMap.h>
47
48 #include <android-base/macros.h>
49 #include <android-base/stringprintf.h>
50 #include <android-base/test_utils.h>
51 #include <android-base/threads.h>
52 #include <android-base/unique_fd.h>
53 #include <cutils/atomic.h>
54
55 #include <gtest/gtest.h>
56
57 // For the THREAD_SIGNAL definition.
58 #include "BacktraceCurrent.h"
59 #include "BacktraceTest.h"
60 #include "backtrace_testlib.h"
61
62 // Number of microseconds per milliseconds.
63 #define US_PER_MSEC 1000
64
65 // Number of nanoseconds in a second.
66 #define NS_PER_SEC 1000000000ULL
67
68 // Number of simultaneous dumping operations to perform.
69 #define NUM_THREADS 40
70
71 // Number of simultaneous threads running in our forked process.
72 #define NUM_PTRACE_THREADS 5
73
74 // The list of shared libaries that make up the backtrace library.
75 static std::vector<std::string> kBacktraceLibs{"libunwindstack.so", "libbacktrace.so"};
76
77 struct thread_t {
78 pid_t tid;
79 int32_t state;
80 pthread_t threadId;
81 void* data;
82 };
83
84 struct dump_thread_t {
85 thread_t thread;
86 BacktraceMap* map;
87 Backtrace* backtrace;
88 int32_t* now;
89 int32_t done;
90 };
91
92 typedef Backtrace* (*create_func_t)(pid_t, pid_t, BacktraceMap*);
93 typedef BacktraceMap* (*map_create_func_t)(pid_t, bool);
94
95 static void VerifyLevelDump(Backtrace* backtrace, create_func_t create_func = nullptr,
96 map_create_func_t map_func = nullptr);
97 static void VerifyMaxDump(Backtrace* backtrace, create_func_t create_func = nullptr,
98 map_create_func_t map_func = nullptr);
99
100 void* BacktraceTest::dl_handle_;
101 int (*BacktraceTest::test_level_one_)(int, int, int, int, void (*)(void*), void*);
102 int (*BacktraceTest::test_level_two_)(int, int, int, int, void (*)(void*), void*);
103 int (*BacktraceTest::test_level_three_)(int, int, int, int, void (*)(void*), void*);
104 int (*BacktraceTest::test_level_four_)(int, int, int, int, void (*)(void*), void*);
105 int (*BacktraceTest::test_recursive_call_)(int, void (*)(void*), void*);
106 void (*BacktraceTest::test_get_context_and_wait_)(void*, volatile int*);
107 void (*BacktraceTest::test_signal_action_)(int, siginfo_t*, void*);
108 void (*BacktraceTest::test_signal_handler_)(int);
109
GetInitialArgs(const char *** args,size_t * num_args)110 extern "C" bool GetInitialArgs(const char*** args, size_t* num_args) {
111 static const char* initial_args[] = {"--slow_threshold_ms=8000", "--deadline_threshold_ms=15000"};
112 *args = initial_args;
113 *num_args = 2;
114 return true;
115 }
116
NanoTime()117 static uint64_t NanoTime() {
118 struct timespec t = { 0, 0 };
119 clock_gettime(CLOCK_MONOTONIC, &t);
120 return static_cast<uint64_t>(t.tv_sec * NS_PER_SEC + t.tv_nsec);
121 }
122
DumpFrames(Backtrace * backtrace)123 static std::string DumpFrames(Backtrace* backtrace) {
124 if (backtrace->NumFrames() == 0) {
125 return " No frames to dump.\n";
126 }
127
128 std::string frame;
129 for (size_t i = 0; i < backtrace->NumFrames(); i++) {
130 frame += " " + backtrace->FormatFrameData(i) + '\n';
131 }
132 return frame;
133 }
134
WaitForStop(pid_t pid)135 static void WaitForStop(pid_t pid) {
136 uint64_t start = NanoTime();
137
138 siginfo_t si;
139 while (ptrace(PTRACE_GETSIGINFO, pid, 0, &si) < 0 && (errno == EINTR || errno == ESRCH)) {
140 if ((NanoTime() - start) > NS_PER_SEC) {
141 printf("The process did not get to a stopping point in 1 second.\n");
142 break;
143 }
144 usleep(US_PER_MSEC);
145 }
146 }
147
CreateRemoteProcess(pid_t * pid)148 static void CreateRemoteProcess(pid_t* pid) {
149 if ((*pid = fork()) == 0) {
150 while (true)
151 ;
152 _exit(0);
153 }
154 ASSERT_NE(-1, *pid);
155
156 ASSERT_TRUE(ptrace(PTRACE_ATTACH, *pid, 0, 0) == 0);
157
158 // Wait for the process to get to a stopping point.
159 WaitForStop(*pid);
160 }
161
FinishRemoteProcess(pid_t pid)162 static void FinishRemoteProcess(pid_t pid) {
163 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
164
165 kill(pid, SIGKILL);
166 ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
167 }
168
169 #if !defined(__ANDROID__) || defined(__arm__)
170 // On host and arm target we aren't guaranteed that we will terminate cleanly.
171 #define VERIFY_NO_ERROR(error_code) \
172 ASSERT_TRUE(error_code == BACKTRACE_UNWIND_NO_ERROR || \
173 error_code == BACKTRACE_UNWIND_ERROR_UNWIND_INFO || \
174 error_code == BACKTRACE_UNWIND_ERROR_MAP_MISSING) \
175 << "Unknown error code " << std::to_string(error_code);
176 #else
177 #define VERIFY_NO_ERROR(error_code) ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, error_code);
178 #endif
179
ReadyLevelBacktrace(Backtrace * backtrace)180 static bool ReadyLevelBacktrace(Backtrace* backtrace) {
181 // See if test_level_four is in the backtrace.
182 bool found = false;
183 for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) {
184 if (it->func_name == "test_level_four") {
185 found = true;
186 break;
187 }
188 }
189
190 return found;
191 }
192
VerifyLevelDump(Backtrace * backtrace,create_func_t,map_create_func_t)193 static void VerifyLevelDump(Backtrace* backtrace, create_func_t, map_create_func_t) {
194 ASSERT_GT(backtrace->NumFrames(), static_cast<size_t>(0))
195 << DumpFrames(backtrace);
196 ASSERT_LT(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
197 << DumpFrames(backtrace);
198
199 // Look through the frames starting at the highest to find the
200 // frame we want.
201 size_t frame_num = 0;
202 for (size_t i = backtrace->NumFrames()-1; i > 2; i--) {
203 if (backtrace->GetFrame(i)->func_name == "test_level_one") {
204 frame_num = i;
205 break;
206 }
207 }
208 ASSERT_LT(static_cast<size_t>(0), frame_num) << DumpFrames(backtrace);
209 ASSERT_LE(static_cast<size_t>(3), frame_num) << DumpFrames(backtrace);
210
211 ASSERT_EQ(backtrace->GetFrame(frame_num)->func_name, "test_level_one")
212 << DumpFrames(backtrace);
213 ASSERT_EQ(backtrace->GetFrame(frame_num-1)->func_name, "test_level_two")
214 << DumpFrames(backtrace);
215 ASSERT_EQ(backtrace->GetFrame(frame_num-2)->func_name, "test_level_three")
216 << DumpFrames(backtrace);
217 ASSERT_EQ(backtrace->GetFrame(frame_num-3)->func_name, "test_level_four")
218 << DumpFrames(backtrace);
219 }
220
VerifyLevelBacktrace(void *)221 static void VerifyLevelBacktrace(void*) {
222 std::unique_ptr<Backtrace> backtrace(
223 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
224 ASSERT_TRUE(backtrace.get() != nullptr);
225 ASSERT_TRUE(backtrace->Unwind(0));
226 VERIFY_NO_ERROR(backtrace->GetError().error_code);
227
228 VerifyLevelDump(backtrace.get());
229 }
230
ReadyMaxBacktrace(Backtrace * backtrace)231 static bool ReadyMaxBacktrace(Backtrace* backtrace) {
232 return (backtrace->NumFrames() == MAX_BACKTRACE_FRAMES);
233 }
234
VerifyMaxDump(Backtrace * backtrace,create_func_t,map_create_func_t)235 static void VerifyMaxDump(Backtrace* backtrace, create_func_t, map_create_func_t) {
236 ASSERT_EQ(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
237 << DumpFrames(backtrace);
238 // Verify that the last frame is our recursive call.
239 ASSERT_EQ(backtrace->GetFrame(MAX_BACKTRACE_FRAMES-1)->func_name, "test_recursive_call")
240 << DumpFrames(backtrace);
241 }
242
VerifyMaxBacktrace(void *)243 static void VerifyMaxBacktrace(void*) {
244 std::unique_ptr<Backtrace> backtrace(
245 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
246 ASSERT_TRUE(backtrace.get() != nullptr);
247 ASSERT_TRUE(backtrace->Unwind(0));
248 ASSERT_EQ(BACKTRACE_UNWIND_ERROR_EXCEED_MAX_FRAMES_LIMIT, backtrace->GetError().error_code);
249
250 VerifyMaxDump(backtrace.get());
251 }
252
ThreadSetState(void * data)253 static void ThreadSetState(void* data) {
254 thread_t* thread = reinterpret_cast<thread_t*>(data);
255 android_atomic_acquire_store(1, &thread->state);
256 volatile int i = 0;
257 while (thread->state) {
258 i++;
259 }
260 }
261
WaitForNonZero(int32_t * value,uint64_t seconds)262 static bool WaitForNonZero(int32_t* value, uint64_t seconds) {
263 uint64_t start = NanoTime();
264 do {
265 if (android_atomic_acquire_load(value)) {
266 return true;
267 }
268 } while ((NanoTime() - start) < seconds * NS_PER_SEC);
269 return false;
270 }
271
TEST_F(BacktraceTest,local_no_unwind_frames)272 TEST_F(BacktraceTest, local_no_unwind_frames) {
273 // Verify that a local unwind does not include any frames within
274 // libunwind or libbacktrace.
275 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), getpid()));
276 ASSERT_TRUE(backtrace.get() != nullptr);
277 ASSERT_TRUE(backtrace->Unwind(0));
278 VERIFY_NO_ERROR(backtrace->GetError().error_code);
279
280 ASSERT_TRUE(backtrace->NumFrames() != 0);
281 // None of the frames should be in the backtrace libraries.
282 for (const auto& frame : *backtrace ) {
283 if (BacktraceMap::IsValid(frame.map)) {
284 const std::string name = basename(frame.map.name.c_str());
285 for (const auto& lib : kBacktraceLibs) {
286 ASSERT_TRUE(name != lib) << DumpFrames(backtrace.get());
287 }
288 }
289 }
290 }
291
TEST_F(BacktraceTest,local_unwind_frames)292 TEST_F(BacktraceTest, local_unwind_frames) {
293 // Verify that a local unwind with the skip frames disabled does include
294 // frames within the backtrace libraries.
295 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), getpid()));
296 ASSERT_TRUE(backtrace.get() != nullptr);
297 backtrace->SetSkipFrames(false);
298 ASSERT_TRUE(backtrace->Unwind(0));
299 VERIFY_NO_ERROR(backtrace->GetError().error_code);
300
301 ASSERT_TRUE(backtrace->NumFrames() != 0);
302 size_t first_frame_non_backtrace_lib = 0;
303 for (const auto& frame : *backtrace) {
304 if (BacktraceMap::IsValid(frame.map)) {
305 const std::string name = basename(frame.map.name.c_str());
306 bool found = false;
307 for (const auto& lib : kBacktraceLibs) {
308 if (name == lib) {
309 found = true;
310 break;
311 }
312 }
313 if (!found) {
314 first_frame_non_backtrace_lib = frame.num;
315 break;
316 }
317 }
318 }
319
320 ASSERT_NE(0U, first_frame_non_backtrace_lib) << "No frames found in backtrace libraries:\n"
321 << DumpFrames(backtrace.get());
322 }
323
TEST_F(BacktraceTest,local_trace)324 TEST_F(BacktraceTest, local_trace) {
325 ASSERT_NE(test_level_one_(1, 2, 3, 4, VerifyLevelBacktrace, nullptr), 0);
326 }
327
VerifyIgnoreFrames(Backtrace * bt_all,Backtrace * bt_ign1,Backtrace * bt_ign2,const char * cur_proc)328 static void VerifyIgnoreFrames(Backtrace* bt_all, Backtrace* bt_ign1, Backtrace* bt_ign2,
329 const char* cur_proc) {
330 ASSERT_EQ(bt_all->NumFrames(), bt_ign1->NumFrames() + 1) << "All backtrace:\n"
331 << DumpFrames(bt_all)
332 << "Ignore 1 backtrace:\n"
333 << DumpFrames(bt_ign1);
334 ASSERT_EQ(bt_all->NumFrames(), bt_ign2->NumFrames() + 2) << "All backtrace:\n"
335 << DumpFrames(bt_all)
336 << "Ignore 2 backtrace:\n"
337 << DumpFrames(bt_ign2);
338
339 // Check all of the frames are the same > the current frame.
340 bool check = (cur_proc == nullptr);
341 for (size_t i = 0; i < bt_ign2->NumFrames(); i++) {
342 if (check) {
343 EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_ign1->GetFrame(i+1)->pc);
344 EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_ign1->GetFrame(i+1)->sp);
345 EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_ign1->GetFrame(i+1)->stack_size);
346
347 EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_all->GetFrame(i+2)->pc);
348 EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_all->GetFrame(i+2)->sp);
349 EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_all->GetFrame(i+2)->stack_size);
350 }
351 if (!check && bt_ign2->GetFrame(i)->func_name == cur_proc) {
352 check = true;
353 }
354 }
355 }
356
VerifyLevelIgnoreFrames(void *)357 static void VerifyLevelIgnoreFrames(void*) {
358 std::unique_ptr<Backtrace> all(
359 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
360 ASSERT_TRUE(all.get() != nullptr);
361 ASSERT_TRUE(all->Unwind(0));
362 VERIFY_NO_ERROR(all->GetError().error_code);
363
364 std::unique_ptr<Backtrace> ign1(
365 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
366 ASSERT_TRUE(ign1.get() != nullptr);
367 ASSERT_TRUE(ign1->Unwind(1));
368 VERIFY_NO_ERROR(ign1->GetError().error_code);
369
370 std::unique_ptr<Backtrace> ign2(
371 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
372 ASSERT_TRUE(ign2.get() != nullptr);
373 ASSERT_TRUE(ign2->Unwind(2));
374 VERIFY_NO_ERROR(ign2->GetError().error_code);
375
376 VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), "VerifyLevelIgnoreFrames");
377 }
378
TEST_F(BacktraceTest,local_trace_ignore_frames)379 TEST_F(BacktraceTest, local_trace_ignore_frames) {
380 ASSERT_NE(test_level_one_(1, 2, 3, 4, VerifyLevelIgnoreFrames, nullptr), 0);
381 }
382
TEST_F(BacktraceTest,local_max_trace)383 TEST_F(BacktraceTest, local_max_trace) {
384 ASSERT_NE(test_recursive_call_(MAX_BACKTRACE_FRAMES + 10, VerifyMaxBacktrace, nullptr), 0);
385 }
386
VerifyProcTest(pid_t pid,pid_t tid,bool (* ReadyFunc)(Backtrace *),void (* VerifyFunc)(Backtrace *,create_func_t,map_create_func_t),create_func_t create_func,map_create_func_t map_create_func)387 static void VerifyProcTest(pid_t pid, pid_t tid, bool (*ReadyFunc)(Backtrace*),
388 void (*VerifyFunc)(Backtrace*, create_func_t, map_create_func_t),
389 create_func_t create_func, map_create_func_t map_create_func) {
390 pid_t ptrace_tid;
391 if (tid < 0) {
392 ptrace_tid = pid;
393 } else {
394 ptrace_tid = tid;
395 }
396 uint64_t start = NanoTime();
397 bool verified = false;
398 std::string last_dump;
399 do {
400 usleep(US_PER_MSEC);
401 if (ptrace(PTRACE_ATTACH, ptrace_tid, 0, 0) == 0) {
402 // Wait for the process to get to a stopping point.
403 WaitForStop(ptrace_tid);
404
405 std::unique_ptr<BacktraceMap> map;
406 map.reset(map_create_func(pid, false));
407 std::unique_ptr<Backtrace> backtrace(create_func(pid, tid, map.get()));
408 ASSERT_TRUE(backtrace.get() != nullptr);
409 ASSERT_TRUE(backtrace->Unwind(0));
410 if (ReadyFunc(backtrace.get())) {
411 VerifyFunc(backtrace.get(), create_func, map_create_func);
412 verified = true;
413 } else {
414 last_dump = DumpFrames(backtrace.get());
415 }
416
417 ASSERT_TRUE(ptrace(PTRACE_DETACH, ptrace_tid, 0, 0) == 0);
418 }
419 // If 5 seconds have passed, then we are done.
420 } while (!verified && (NanoTime() - start) <= 5 * NS_PER_SEC);
421 ASSERT_TRUE(verified) << "Last backtrace:\n" << last_dump;
422 }
423
TEST_F(BacktraceTest,ptrace_trace)424 TEST_F(BacktraceTest, ptrace_trace) {
425 pid_t pid;
426 if ((pid = fork()) == 0) {
427 ASSERT_NE(test_level_one_(1, 2, 3, 4, nullptr, nullptr), 0);
428 _exit(1);
429 }
430 VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, ReadyLevelBacktrace, VerifyLevelDump,
431 Backtrace::Create, BacktraceMap::Create);
432
433 kill(pid, SIGKILL);
434 int status;
435 ASSERT_EQ(waitpid(pid, &status, 0), pid);
436 }
437
TEST_F(BacktraceTest,ptrace_max_trace)438 TEST_F(BacktraceTest, ptrace_max_trace) {
439 pid_t pid;
440 if ((pid = fork()) == 0) {
441 ASSERT_NE(test_recursive_call_(MAX_BACKTRACE_FRAMES + 10, nullptr, nullptr), 0);
442 _exit(1);
443 }
444 VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, ReadyMaxBacktrace, VerifyMaxDump, Backtrace::Create,
445 BacktraceMap::Create);
446
447 kill(pid, SIGKILL);
448 int status;
449 ASSERT_EQ(waitpid(pid, &status, 0), pid);
450 }
451
VerifyProcessIgnoreFrames(Backtrace * bt_all,create_func_t create_func,map_create_func_t map_create_func)452 static void VerifyProcessIgnoreFrames(Backtrace* bt_all, create_func_t create_func,
453 map_create_func_t map_create_func) {
454 std::unique_ptr<BacktraceMap> map(map_create_func(bt_all->Pid(), false));
455 std::unique_ptr<Backtrace> ign1(create_func(bt_all->Pid(), BACKTRACE_CURRENT_THREAD, map.get()));
456 ASSERT_TRUE(ign1.get() != nullptr);
457 ASSERT_TRUE(ign1->Unwind(1));
458 VERIFY_NO_ERROR(ign1->GetError().error_code);
459
460 std::unique_ptr<Backtrace> ign2(create_func(bt_all->Pid(), BACKTRACE_CURRENT_THREAD, map.get()));
461 ASSERT_TRUE(ign2.get() != nullptr);
462 ASSERT_TRUE(ign2->Unwind(2));
463 VERIFY_NO_ERROR(ign2->GetError().error_code);
464
465 VerifyIgnoreFrames(bt_all, ign1.get(), ign2.get(), nullptr);
466 }
467
TEST_F(BacktraceTest,ptrace_ignore_frames)468 TEST_F(BacktraceTest, ptrace_ignore_frames) {
469 pid_t pid;
470 if ((pid = fork()) == 0) {
471 ASSERT_NE(test_level_one_(1, 2, 3, 4, nullptr, nullptr), 0);
472 _exit(1);
473 }
474 VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, ReadyLevelBacktrace, VerifyProcessIgnoreFrames,
475 Backtrace::Create, BacktraceMap::Create);
476
477 kill(pid, SIGKILL);
478 int status;
479 ASSERT_EQ(waitpid(pid, &status, 0), pid);
480 }
481
482 // Create a process with multiple threads and dump all of the threads.
PtraceThreadLevelRun(void *)483 static void* PtraceThreadLevelRun(void*) {
484 EXPECT_NE(BacktraceTest::test_level_one_(1, 2, 3, 4, nullptr, nullptr), 0);
485 return nullptr;
486 }
487
GetThreads(pid_t pid,std::vector<pid_t> * threads)488 static void GetThreads(pid_t pid, std::vector<pid_t>* threads) {
489 // Get the list of tasks.
490 char task_path[128];
491 snprintf(task_path, sizeof(task_path), "/proc/%d/task", pid);
492
493 std::unique_ptr<DIR, decltype(&closedir)> tasks_dir(opendir(task_path), closedir);
494 ASSERT_TRUE(tasks_dir != nullptr);
495 struct dirent* entry;
496 while ((entry = readdir(tasks_dir.get())) != nullptr) {
497 char* end;
498 pid_t tid = strtoul(entry->d_name, &end, 10);
499 if (*end == '\0') {
500 threads->push_back(tid);
501 }
502 }
503 }
504
TEST_F(BacktraceTest,ptrace_threads)505 TEST_F(BacktraceTest, ptrace_threads) {
506 pid_t pid;
507 if ((pid = fork()) == 0) {
508 for (size_t i = 0; i < NUM_PTRACE_THREADS; i++) {
509 pthread_attr_t attr;
510 pthread_attr_init(&attr);
511 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
512
513 pthread_t thread;
514 ASSERT_TRUE(pthread_create(&thread, &attr, PtraceThreadLevelRun, nullptr) == 0);
515 }
516 ASSERT_NE(test_level_one_(1, 2, 3, 4, nullptr, nullptr), 0);
517 _exit(1);
518 }
519
520 // Check to see that all of the threads are running before unwinding.
521 std::vector<pid_t> threads;
522 uint64_t start = NanoTime();
523 do {
524 usleep(US_PER_MSEC);
525 threads.clear();
526 GetThreads(pid, &threads);
527 } while ((threads.size() != NUM_PTRACE_THREADS + 1) &&
528 ((NanoTime() - start) <= 5 * NS_PER_SEC));
529 ASSERT_EQ(threads.size(), static_cast<size_t>(NUM_PTRACE_THREADS + 1));
530
531 ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
532 WaitForStop(pid);
533 for (std::vector<int>::const_iterator it = threads.begin(); it != threads.end(); ++it) {
534 // Skip the current forked process, we only care about the threads.
535 if (pid == *it) {
536 continue;
537 }
538 VerifyProcTest(pid, *it, ReadyLevelBacktrace, VerifyLevelDump, Backtrace::Create,
539 BacktraceMap::Create);
540 }
541
542 FinishRemoteProcess(pid);
543 }
544
VerifyLevelThread(void *)545 void VerifyLevelThread(void*) {
546 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), android::base::GetThreadId()));
547 ASSERT_TRUE(backtrace.get() != nullptr);
548 ASSERT_TRUE(backtrace->Unwind(0));
549 VERIFY_NO_ERROR(backtrace->GetError().error_code);
550
551 VerifyLevelDump(backtrace.get());
552 }
553
TEST_F(BacktraceTest,thread_current_level)554 TEST_F(BacktraceTest, thread_current_level) {
555 ASSERT_NE(test_level_one_(1, 2, 3, 4, VerifyLevelThread, nullptr), 0);
556 }
557
VerifyMaxThread(void *)558 static void VerifyMaxThread(void*) {
559 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), android::base::GetThreadId()));
560 ASSERT_TRUE(backtrace.get() != nullptr);
561 ASSERT_TRUE(backtrace->Unwind(0));
562 ASSERT_EQ(BACKTRACE_UNWIND_ERROR_EXCEED_MAX_FRAMES_LIMIT, backtrace->GetError().error_code);
563
564 VerifyMaxDump(backtrace.get());
565 }
566
TEST_F(BacktraceTest,thread_current_max)567 TEST_F(BacktraceTest, thread_current_max) {
568 ASSERT_NE(test_recursive_call_(MAX_BACKTRACE_FRAMES + 10, VerifyMaxThread, nullptr), 0);
569 }
570
ThreadLevelRun(void * data)571 static void* ThreadLevelRun(void* data) {
572 thread_t* thread = reinterpret_cast<thread_t*>(data);
573
574 thread->tid = android::base::GetThreadId();
575 EXPECT_NE(BacktraceTest::test_level_one_(1, 2, 3, 4, ThreadSetState, data), 0);
576 return nullptr;
577 }
578
TEST_F(BacktraceTest,thread_level_trace)579 TEST_F(BacktraceTest, thread_level_trace) {
580 pthread_attr_t attr;
581 pthread_attr_init(&attr);
582 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
583
584 thread_t thread_data = { 0, 0, 0, nullptr };
585 pthread_t thread;
586 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
587
588 // Wait up to 2 seconds for the tid to be set.
589 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
590
591 // Make sure that the thread signal used is not visible when compiled for
592 // the target.
593 #if !defined(__GLIBC__)
594 ASSERT_LT(THREAD_SIGNAL, SIGRTMIN);
595 #endif
596
597 // Save the current signal action and make sure it is restored afterwards.
598 struct sigaction cur_action;
599 ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &cur_action) == 0);
600
601 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
602 ASSERT_TRUE(backtrace.get() != nullptr);
603 ASSERT_TRUE(backtrace->Unwind(0));
604 VERIFY_NO_ERROR(backtrace->GetError().error_code);
605
606 VerifyLevelDump(backtrace.get());
607
608 // Tell the thread to exit its infinite loop.
609 android_atomic_acquire_store(0, &thread_data.state);
610
611 // Verify that the old action was restored.
612 struct sigaction new_action;
613 ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &new_action) == 0);
614 EXPECT_EQ(cur_action.sa_sigaction, new_action.sa_sigaction);
615 // The SA_RESTORER flag gets set behind our back, so a direct comparison
616 // doesn't work unless we mask the value off. Mips doesn't have this
617 // flag, so skip this on that platform.
618 #if defined(SA_RESTORER)
619 cur_action.sa_flags &= ~SA_RESTORER;
620 new_action.sa_flags &= ~SA_RESTORER;
621 #elif defined(__GLIBC__)
622 // Our host compiler doesn't appear to define this flag for some reason.
623 cur_action.sa_flags &= ~0x04000000;
624 new_action.sa_flags &= ~0x04000000;
625 #endif
626 EXPECT_EQ(cur_action.sa_flags, new_action.sa_flags);
627 }
628
TEST_F(BacktraceTest,thread_ignore_frames)629 TEST_F(BacktraceTest, thread_ignore_frames) {
630 pthread_attr_t attr;
631 pthread_attr_init(&attr);
632 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
633
634 thread_t thread_data = { 0, 0, 0, nullptr };
635 pthread_t thread;
636 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
637
638 // Wait up to 2 seconds for the tid to be set.
639 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
640
641 std::unique_ptr<Backtrace> all(Backtrace::Create(getpid(), thread_data.tid));
642 ASSERT_TRUE(all.get() != nullptr);
643 ASSERT_TRUE(all->Unwind(0));
644 VERIFY_NO_ERROR(all->GetError().error_code);
645
646 std::unique_ptr<Backtrace> ign1(Backtrace::Create(getpid(), thread_data.tid));
647 ASSERT_TRUE(ign1.get() != nullptr);
648 ASSERT_TRUE(ign1->Unwind(1));
649 VERIFY_NO_ERROR(ign1->GetError().error_code);
650
651 std::unique_ptr<Backtrace> ign2(Backtrace::Create(getpid(), thread_data.tid));
652 ASSERT_TRUE(ign2.get() != nullptr);
653 ASSERT_TRUE(ign2->Unwind(2));
654 VERIFY_NO_ERROR(ign2->GetError().error_code);
655
656 VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), nullptr);
657
658 // Tell the thread to exit its infinite loop.
659 android_atomic_acquire_store(0, &thread_data.state);
660 }
661
ThreadMaxRun(void * data)662 static void* ThreadMaxRun(void* data) {
663 thread_t* thread = reinterpret_cast<thread_t*>(data);
664
665 thread->tid = android::base::GetThreadId();
666 EXPECT_NE(BacktraceTest::test_recursive_call_(MAX_BACKTRACE_FRAMES + 10, ThreadSetState, data),
667 0);
668 return nullptr;
669 }
670
TEST_F(BacktraceTest,thread_max_trace)671 TEST_F(BacktraceTest, thread_max_trace) {
672 pthread_attr_t attr;
673 pthread_attr_init(&attr);
674 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
675
676 thread_t thread_data = { 0, 0, 0, nullptr };
677 pthread_t thread;
678 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadMaxRun, &thread_data) == 0);
679
680 // Wait for the tid to be set.
681 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
682
683 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
684 ASSERT_TRUE(backtrace.get() != nullptr);
685 ASSERT_TRUE(backtrace->Unwind(0));
686 ASSERT_EQ(BACKTRACE_UNWIND_ERROR_EXCEED_MAX_FRAMES_LIMIT, backtrace->GetError().error_code);
687
688 VerifyMaxDump(backtrace.get());
689
690 // Tell the thread to exit its infinite loop.
691 android_atomic_acquire_store(0, &thread_data.state);
692 }
693
ThreadDump(void * data)694 static void* ThreadDump(void* data) {
695 dump_thread_t* dump = reinterpret_cast<dump_thread_t*>(data);
696 while (true) {
697 if (android_atomic_acquire_load(dump->now)) {
698 break;
699 }
700 }
701
702 // The status of the actual unwind will be checked elsewhere.
703 dump->backtrace = Backtrace::Create(getpid(), dump->thread.tid, dump->map);
704 dump->backtrace->Unwind(0);
705
706 android_atomic_acquire_store(1, &dump->done);
707
708 return nullptr;
709 }
710
MultipleThreadDumpTest(bool share_map)711 static void MultipleThreadDumpTest(bool share_map) {
712 // Dump NUM_THREADS simultaneously using the same map.
713 std::vector<thread_t> runners(NUM_THREADS);
714 std::vector<dump_thread_t> dumpers(NUM_THREADS);
715
716 pthread_attr_t attr;
717 pthread_attr_init(&attr);
718 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
719 for (size_t i = 0; i < NUM_THREADS; i++) {
720 // Launch the runners, they will spin in hard loops doing nothing.
721 runners[i].tid = 0;
722 runners[i].state = 0;
723 ASSERT_TRUE(pthread_create(&runners[i].threadId, &attr, ThreadMaxRun, &runners[i]) == 0);
724 }
725
726 // Wait for tids to be set.
727 for (std::vector<thread_t>::iterator it = runners.begin(); it != runners.end(); ++it) {
728 ASSERT_TRUE(WaitForNonZero(&it->state, 30));
729 }
730
731 // Start all of the dumpers at once, they will spin until they are signalled
732 // to begin their dump run.
733 std::unique_ptr<BacktraceMap> map;
734 if (share_map) {
735 map.reset(BacktraceMap::Create(getpid()));
736 }
737 int32_t dump_now = 0;
738 for (size_t i = 0; i < NUM_THREADS; i++) {
739 dumpers[i].thread.tid = runners[i].tid;
740 dumpers[i].thread.state = 0;
741 dumpers[i].done = 0;
742 dumpers[i].now = &dump_now;
743 dumpers[i].map = map.get();
744
745 ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0);
746 }
747
748 // Start all of the dumpers going at once.
749 android_atomic_acquire_store(1, &dump_now);
750
751 for (size_t i = 0; i < NUM_THREADS; i++) {
752 ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 30));
753
754 // Tell the runner thread to exit its infinite loop.
755 android_atomic_acquire_store(0, &runners[i].state);
756
757 ASSERT_TRUE(dumpers[i].backtrace != nullptr);
758 VerifyMaxDump(dumpers[i].backtrace);
759
760 delete dumpers[i].backtrace;
761 dumpers[i].backtrace = nullptr;
762 }
763 }
764
TEST_F(BacktraceTest,thread_multiple_dump)765 TEST_F(BacktraceTest, thread_multiple_dump) {
766 MultipleThreadDumpTest(false);
767 }
768
TEST_F(BacktraceTest,thread_multiple_dump_same_map)769 TEST_F(BacktraceTest, thread_multiple_dump_same_map) {
770 MultipleThreadDumpTest(true);
771 }
772
773 // This test is for UnwindMaps that should share the same map cursor when
774 // multiple maps are created for the current process at the same time.
TEST_F(BacktraceTest,simultaneous_maps)775 TEST_F(BacktraceTest, simultaneous_maps) {
776 BacktraceMap* map1 = BacktraceMap::Create(getpid());
777 BacktraceMap* map2 = BacktraceMap::Create(getpid());
778 BacktraceMap* map3 = BacktraceMap::Create(getpid());
779
780 Backtrace* back1 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map1);
781 ASSERT_TRUE(back1 != nullptr);
782 EXPECT_TRUE(back1->Unwind(0));
783 VERIFY_NO_ERROR(back1->GetError().error_code);
784 delete back1;
785 delete map1;
786
787 Backtrace* back2 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map2);
788 ASSERT_TRUE(back2 != nullptr);
789 EXPECT_TRUE(back2->Unwind(0));
790 VERIFY_NO_ERROR(back2->GetError().error_code);
791 delete back2;
792 delete map2;
793
794 Backtrace* back3 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map3);
795 ASSERT_TRUE(back3 != nullptr);
796 EXPECT_TRUE(back3->Unwind(0));
797 VERIFY_NO_ERROR(back3->GetError().error_code);
798 delete back3;
799 delete map3;
800 }
801
TEST_F(BacktraceTest,fillin_erases)802 TEST_F(BacktraceTest, fillin_erases) {
803 BacktraceMap* back_map = BacktraceMap::Create(getpid());
804
805 backtrace_map_t map;
806
807 map.start = 1;
808 map.end = 3;
809 map.flags = 1;
810 map.name = "Initialized";
811 back_map->FillIn(0, &map);
812 delete back_map;
813
814 ASSERT_FALSE(BacktraceMap::IsValid(map));
815 ASSERT_EQ(static_cast<uint64_t>(0), map.start);
816 ASSERT_EQ(static_cast<uint64_t>(0), map.end);
817 ASSERT_EQ(0, map.flags);
818 ASSERT_EQ("", map.name);
819 }
820
TEST_F(BacktraceTest,format_test)821 TEST_F(BacktraceTest, format_test) {
822 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD));
823 ASSERT_TRUE(backtrace.get() != nullptr);
824
825 backtrace_frame_data_t frame;
826 frame.num = 1;
827 frame.pc = 2;
828 frame.rel_pc = 2;
829 frame.sp = 0;
830 frame.stack_size = 0;
831 frame.func_offset = 0;
832
833 // Check no map set.
834 frame.num = 1;
835 #if defined(__LP64__)
836 EXPECT_EQ("#01 pc 0000000000000002 <unknown>",
837 #else
838 EXPECT_EQ("#01 pc 00000002 <unknown>",
839 #endif
840 backtrace->FormatFrameData(&frame));
841
842 // Check map name empty, but exists.
843 frame.pc = 0xb0020;
844 frame.rel_pc = 0x20;
845 frame.map.start = 0xb0000;
846 frame.map.end = 0xbffff;
847 frame.map.load_bias = 0;
848 #if defined(__LP64__)
849 EXPECT_EQ("#01 pc 0000000000000020 <anonymous:00000000000b0000>",
850 #else
851 EXPECT_EQ("#01 pc 00000020 <anonymous:000b0000>",
852 #endif
853 backtrace->FormatFrameData(&frame));
854
855 // Check map name begins with a [.
856 frame.pc = 0xc0020;
857 frame.map.start = 0xc0000;
858 frame.map.end = 0xcffff;
859 frame.map.load_bias = 0;
860 frame.map.name = "[anon:thread signal stack]";
861 #if defined(__LP64__)
862 EXPECT_EQ("#01 pc 0000000000000020 [anon:thread signal stack:00000000000c0000]",
863 #else
864 EXPECT_EQ("#01 pc 00000020 [anon:thread signal stack:000c0000]",
865 #endif
866 backtrace->FormatFrameData(&frame));
867
868 // Check relative pc is set and map name is set.
869 frame.pc = 0x12345679;
870 frame.rel_pc = 0x12345678;
871 frame.map.name = "MapFake";
872 frame.map.start = 1;
873 frame.map.end = 1;
874 #if defined(__LP64__)
875 EXPECT_EQ("#01 pc 0000000012345678 MapFake",
876 #else
877 EXPECT_EQ("#01 pc 12345678 MapFake",
878 #endif
879 backtrace->FormatFrameData(&frame));
880
881 // Check func_name is set, but no func offset.
882 frame.func_name = "ProcFake";
883 #if defined(__LP64__)
884 EXPECT_EQ("#01 pc 0000000012345678 MapFake (ProcFake)",
885 #else
886 EXPECT_EQ("#01 pc 12345678 MapFake (ProcFake)",
887 #endif
888 backtrace->FormatFrameData(&frame));
889
890 // Check func_name is set, and func offset is non-zero.
891 frame.func_offset = 645;
892 #if defined(__LP64__)
893 EXPECT_EQ("#01 pc 0000000012345678 MapFake (ProcFake+645)",
894 #else
895 EXPECT_EQ("#01 pc 12345678 MapFake (ProcFake+645)",
896 #endif
897 backtrace->FormatFrameData(&frame));
898
899 // Check func_name is set, func offset is non-zero, and load_bias is non-zero.
900 frame.rel_pc = 0x123456dc;
901 frame.func_offset = 645;
902 frame.map.load_bias = 100;
903 #if defined(__LP64__)
904 EXPECT_EQ("#01 pc 00000000123456dc MapFake (ProcFake+645)",
905 #else
906 EXPECT_EQ("#01 pc 123456dc MapFake (ProcFake+645)",
907 #endif
908 backtrace->FormatFrameData(&frame));
909
910 // Check a non-zero map offset.
911 frame.map.offset = 0x1000;
912 #if defined(__LP64__)
913 EXPECT_EQ("#01 pc 00000000123456dc MapFake (offset 0x1000) (ProcFake+645)",
914 #else
915 EXPECT_EQ("#01 pc 123456dc MapFake (offset 0x1000) (ProcFake+645)",
916 #endif
917 backtrace->FormatFrameData(&frame));
918 }
919
920 struct map_test_t {
921 uint64_t start;
922 uint64_t end;
923 };
924
map_sort(map_test_t i,map_test_t j)925 static bool map_sort(map_test_t i, map_test_t j) { return i.start < j.start; }
926
GetTestMapsAsString(const std::vector<map_test_t> & maps)927 static std::string GetTestMapsAsString(const std::vector<map_test_t>& maps) {
928 if (maps.size() == 0) {
929 return "No test map entries\n";
930 }
931 std::string map_txt;
932 for (auto map : maps) {
933 map_txt += android::base::StringPrintf("%" PRIx64 "-%" PRIx64 "\n", map.start, map.end);
934 }
935 return map_txt;
936 }
937
GetMapsAsString(BacktraceMap * maps)938 static std::string GetMapsAsString(BacktraceMap* maps) {
939 if (maps->size() == 0) {
940 return "No map entries\n";
941 }
942 std::string map_txt;
943 for (const backtrace_map_t* map : *maps) {
944 map_txt += android::base::StringPrintf(
945 "%" PRIx64 "-%" PRIx64 " flags: 0x%x offset: 0x%" PRIx64 " load_bias: 0x%" PRIx64,
946 map->start, map->end, map->flags, map->offset, map->load_bias);
947 if (!map->name.empty()) {
948 map_txt += ' ' + map->name;
949 }
950 map_txt += '\n';
951 }
952 return map_txt;
953 }
954
VerifyMap(pid_t pid)955 static void VerifyMap(pid_t pid) {
956 char buffer[4096];
957 snprintf(buffer, sizeof(buffer), "/proc/%d/maps", pid);
958
959 FILE* map_file = fopen(buffer, "r");
960 ASSERT_TRUE(map_file != nullptr);
961 std::vector<map_test_t> test_maps;
962 while (fgets(buffer, sizeof(buffer), map_file)) {
963 map_test_t map;
964 ASSERT_EQ(2, sscanf(buffer, "%" SCNx64 "-%" SCNx64 " ", &map.start, &map.end));
965 test_maps.push_back(map);
966 }
967 fclose(map_file);
968 std::sort(test_maps.begin(), test_maps.end(), map_sort);
969
970 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(pid));
971
972 // Basic test that verifies that the map is in the expected order.
973 auto test_it = test_maps.begin();
974 for (auto it = map->begin(); it != map->end(); ++it) {
975 ASSERT_TRUE(test_it != test_maps.end()) << "Mismatch in number of maps, expected test maps:\n"
976 << GetTestMapsAsString(test_maps) << "Actual maps:\n"
977 << GetMapsAsString(map.get());
978 ASSERT_EQ(test_it->start, (*it)->start) << "Mismatch in map data, expected test maps:\n"
979 << GetTestMapsAsString(test_maps) << "Actual maps:\n"
980 << GetMapsAsString(map.get());
981 ASSERT_EQ(test_it->end, (*it)->end) << "Mismatch maps in map data, expected test maps:\n"
982 << GetTestMapsAsString(test_maps) << "Actual maps:\n"
983 << GetMapsAsString(map.get());
984 // Make sure the load bias get set to a value.
985 ASSERT_NE(static_cast<uint64_t>(-1), (*it)->load_bias) << "Found uninitialized load_bias\n"
986 << GetMapsAsString(map.get());
987 ++test_it;
988 }
989 ASSERT_TRUE(test_it == test_maps.end());
990 }
991
TEST_F(BacktraceTest,verify_map_remote)992 TEST_F(BacktraceTest, verify_map_remote) {
993 pid_t pid;
994 CreateRemoteProcess(&pid);
995
996 // The maps should match exactly since the forked process has been paused.
997 VerifyMap(pid);
998
999 FinishRemoteProcess(pid);
1000 }
1001
InitMemory(uint8_t * memory,size_t bytes)1002 static void InitMemory(uint8_t* memory, size_t bytes) {
1003 for (size_t i = 0; i < bytes; i++) {
1004 memory[i] = i;
1005 if (memory[i] == '\0') {
1006 // Don't use '\0' in our data so we can verify that an overread doesn't
1007 // occur by using a '\0' as the character after the read data.
1008 memory[i] = 23;
1009 }
1010 }
1011 }
1012
ThreadReadTest(void * data)1013 static void* ThreadReadTest(void* data) {
1014 thread_t* thread_data = reinterpret_cast<thread_t*>(data);
1015
1016 thread_data->tid = android::base::GetThreadId();
1017
1018 // Create two map pages.
1019 // Mark the second page as not-readable.
1020 size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
1021 uint8_t* memory;
1022 if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
1023 return reinterpret_cast<void*>(-1);
1024 }
1025
1026 if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
1027 return reinterpret_cast<void*>(-1);
1028 }
1029
1030 // Set up a simple pattern in memory.
1031 InitMemory(memory, pagesize);
1032
1033 thread_data->data = memory;
1034
1035 // Tell the caller it's okay to start reading memory.
1036 android_atomic_acquire_store(1, &thread_data->state);
1037
1038 // Loop waiting for the caller to finish reading the memory.
1039 while (thread_data->state) {
1040 }
1041
1042 // Re-enable read-write on the page so that we don't crash if we try
1043 // and access data on this page when freeing the memory.
1044 if (mprotect(&memory[pagesize], pagesize, PROT_READ | PROT_WRITE) != 0) {
1045 return reinterpret_cast<void*>(-1);
1046 }
1047 free(memory);
1048
1049 android_atomic_acquire_store(1, &thread_data->state);
1050
1051 return nullptr;
1052 }
1053
RunReadTest(Backtrace * backtrace,uint64_t read_addr)1054 static void RunReadTest(Backtrace* backtrace, uint64_t read_addr) {
1055 size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
1056
1057 // Create a page of data to use to do quick compares.
1058 uint8_t* expected = new uint8_t[pagesize];
1059 InitMemory(expected, pagesize);
1060
1061 uint8_t* data = new uint8_t[2 * pagesize];
1062 // Verify that we can only read one page worth of data.
1063 size_t bytes_read = backtrace->Read(read_addr, data, 2 * pagesize);
1064 ASSERT_EQ(pagesize, bytes_read);
1065 ASSERT_TRUE(memcmp(data, expected, pagesize) == 0);
1066
1067 // Verify unaligned reads.
1068 for (size_t i = 1; i < sizeof(word_t); i++) {
1069 bytes_read = backtrace->Read(read_addr + i, data, 2 * sizeof(word_t));
1070 ASSERT_EQ(2 * sizeof(word_t), bytes_read);
1071 ASSERT_TRUE(memcmp(data, &expected[i], 2 * sizeof(word_t)) == 0)
1072 << "Offset at " << i << " failed";
1073 }
1074
1075 // Verify small unaligned reads.
1076 for (size_t i = 1; i < sizeof(word_t); i++) {
1077 for (size_t j = 1; j < sizeof(word_t); j++) {
1078 // Set one byte past what we expect to read, to guarantee we don't overread.
1079 data[j] = '\0';
1080 bytes_read = backtrace->Read(read_addr + i, data, j);
1081 ASSERT_EQ(j, bytes_read);
1082 ASSERT_TRUE(memcmp(data, &expected[i], j) == 0)
1083 << "Offset at " << i << " length " << j << " miscompared";
1084 ASSERT_EQ('\0', data[j])
1085 << "Offset at " << i << " length " << j << " wrote too much data";
1086 }
1087 }
1088 delete[] data;
1089 delete[] expected;
1090 }
1091
TEST_F(BacktraceTest,thread_read)1092 TEST_F(BacktraceTest, thread_read) {
1093 pthread_attr_t attr;
1094 pthread_attr_init(&attr);
1095 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
1096 pthread_t thread;
1097 thread_t thread_data = { 0, 0, 0, nullptr };
1098 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadReadTest, &thread_data) == 0);
1099
1100 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
1101
1102 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
1103 ASSERT_TRUE(backtrace.get() != nullptr);
1104
1105 RunReadTest(backtrace.get(), reinterpret_cast<uint64_t>(thread_data.data));
1106
1107 android_atomic_acquire_store(0, &thread_data.state);
1108
1109 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
1110 }
1111
1112 // The code requires these variables are the same size.
1113 volatile uint64_t g_ready = 0;
1114 volatile uint64_t g_addr = 0;
1115 static_assert(sizeof(g_ready) == sizeof(g_addr), "g_ready/g_addr must be same size");
1116
ForkedReadTest()1117 static void ForkedReadTest() {
1118 // Create two map pages.
1119 size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
1120 uint8_t* memory;
1121 if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
1122 perror("Failed to allocate memory\n");
1123 exit(1);
1124 }
1125
1126 // Mark the second page as not-readable.
1127 if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
1128 perror("Failed to mprotect memory\n");
1129 exit(1);
1130 }
1131
1132 // Set up a simple pattern in memory.
1133 InitMemory(memory, pagesize);
1134
1135 g_addr = reinterpret_cast<uint64_t>(memory);
1136 g_ready = 1;
1137
1138 while (1) {
1139 usleep(US_PER_MSEC);
1140 }
1141 }
1142
TEST_F(BacktraceTest,process_read)1143 TEST_F(BacktraceTest, process_read) {
1144 g_ready = 0;
1145 pid_t pid;
1146 if ((pid = fork()) == 0) {
1147 ForkedReadTest();
1148 exit(0);
1149 }
1150 ASSERT_NE(-1, pid);
1151
1152 bool test_executed = false;
1153 uint64_t start = NanoTime();
1154 while (1) {
1155 if (ptrace(PTRACE_ATTACH, pid, 0, 0) == 0) {
1156 WaitForStop(pid);
1157
1158 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
1159 ASSERT_TRUE(backtrace.get() != nullptr);
1160
1161 uint64_t read_addr;
1162 size_t bytes_read = backtrace->Read(reinterpret_cast<uint64_t>(&g_ready),
1163 reinterpret_cast<uint8_t*>(&read_addr), sizeof(g_ready));
1164 ASSERT_EQ(sizeof(g_ready), bytes_read);
1165 if (read_addr) {
1166 // The forked process is ready to be read.
1167 bytes_read = backtrace->Read(reinterpret_cast<uint64_t>(&g_addr),
1168 reinterpret_cast<uint8_t*>(&read_addr), sizeof(g_addr));
1169 ASSERT_EQ(sizeof(g_addr), bytes_read);
1170
1171 RunReadTest(backtrace.get(), read_addr);
1172
1173 test_executed = true;
1174 break;
1175 }
1176 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1177 }
1178 if ((NanoTime() - start) > 5 * NS_PER_SEC) {
1179 break;
1180 }
1181 usleep(US_PER_MSEC);
1182 }
1183 kill(pid, SIGKILL);
1184 ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1185
1186 ASSERT_TRUE(test_executed);
1187 }
1188
VerifyFunctionsFound(const std::vector<std::string> & found_functions)1189 static void VerifyFunctionsFound(const std::vector<std::string>& found_functions) {
1190 // We expect to find these functions in libbacktrace_test. If we don't
1191 // find them, that's a bug in the memory read handling code in libunwind.
1192 std::list<std::string> expected_functions;
1193 expected_functions.push_back("test_recursive_call");
1194 expected_functions.push_back("test_level_one");
1195 expected_functions.push_back("test_level_two");
1196 expected_functions.push_back("test_level_three");
1197 expected_functions.push_back("test_level_four");
1198 for (const auto& found_function : found_functions) {
1199 for (const auto& expected_function : expected_functions) {
1200 if (found_function == expected_function) {
1201 expected_functions.remove(found_function);
1202 break;
1203 }
1204 }
1205 }
1206 ASSERT_TRUE(expected_functions.empty()) << "Not all functions found in shared library.";
1207 }
1208
CopySharedLibrary(const char * tmp_dir,std::string * tmp_so_name)1209 static void CopySharedLibrary(const char* tmp_dir, std::string* tmp_so_name) {
1210 std::string test_lib(testing::internal::GetArgvs()[0]);
1211 auto const value = test_lib.find_last_of('/');
1212 if (value == std::string::npos) {
1213 test_lib = "../backtrace_test_libs/";
1214 } else {
1215 test_lib = test_lib.substr(0, value + 1) + "../backtrace_test_libs/";
1216 }
1217 test_lib += "libbacktrace_test.so";
1218
1219 *tmp_so_name = std::string(tmp_dir) + "/libbacktrace_test.so";
1220 std::string cp_cmd = android::base::StringPrintf("cp %s %s", test_lib.c_str(), tmp_dir);
1221
1222 // Copy the shared so to a tempory directory.
1223 ASSERT_EQ(0, system(cp_cmd.c_str()));
1224 }
1225
TEST_F(BacktraceTest,check_unreadable_elf_local)1226 TEST_F(BacktraceTest, check_unreadable_elf_local) {
1227 TemporaryDir td;
1228 std::string tmp_so_name;
1229 ASSERT_NO_FATAL_FAILURE(CopySharedLibrary(td.path, &tmp_so_name));
1230
1231 struct stat buf;
1232 ASSERT_TRUE(stat(tmp_so_name.c_str(), &buf) != -1);
1233 uint64_t map_size = buf.st_size;
1234
1235 int fd = open(tmp_so_name.c_str(), O_RDONLY);
1236 ASSERT_TRUE(fd != -1);
1237
1238 void* map = mmap(nullptr, map_size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0);
1239 ASSERT_TRUE(map != MAP_FAILED);
1240 close(fd);
1241 ASSERT_TRUE(unlink(tmp_so_name.c_str()) != -1);
1242
1243 std::vector<std::string> found_functions;
1244 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS,
1245 BACKTRACE_CURRENT_THREAD));
1246 ASSERT_TRUE(backtrace.get() != nullptr);
1247
1248 // Needed before GetFunctionName will work.
1249 backtrace->Unwind(0);
1250
1251 // Loop through the entire map, and get every function we can find.
1252 map_size += reinterpret_cast<uint64_t>(map);
1253 std::string last_func;
1254 for (uint64_t read_addr = reinterpret_cast<uint64_t>(map); read_addr < map_size; read_addr += 4) {
1255 uint64_t offset;
1256 std::string func_name = backtrace->GetFunctionName(read_addr, &offset);
1257 if (!func_name.empty() && last_func != func_name) {
1258 found_functions.push_back(func_name);
1259 }
1260 last_func = func_name;
1261 }
1262
1263 ASSERT_TRUE(munmap(map, map_size - reinterpret_cast<uint64_t>(map)) == 0);
1264
1265 VerifyFunctionsFound(found_functions);
1266 }
1267
TEST_F(BacktraceTest,check_unreadable_elf_remote)1268 TEST_F(BacktraceTest, check_unreadable_elf_remote) {
1269 TemporaryDir td;
1270 std::string tmp_so_name;
1271 ASSERT_NO_FATAL_FAILURE(CopySharedLibrary(td.path, &tmp_so_name));
1272
1273 g_ready = 0;
1274
1275 struct stat buf;
1276 ASSERT_TRUE(stat(tmp_so_name.c_str(), &buf) != -1);
1277 uint64_t map_size = buf.st_size;
1278
1279 pid_t pid;
1280 if ((pid = fork()) == 0) {
1281 int fd = open(tmp_so_name.c_str(), O_RDONLY);
1282 if (fd == -1) {
1283 fprintf(stderr, "Failed to open file %s: %s\n", tmp_so_name.c_str(), strerror(errno));
1284 unlink(tmp_so_name.c_str());
1285 exit(0);
1286 }
1287
1288 void* map = mmap(nullptr, map_size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0);
1289 if (map == MAP_FAILED) {
1290 fprintf(stderr, "Failed to map in memory: %s\n", strerror(errno));
1291 unlink(tmp_so_name.c_str());
1292 exit(0);
1293 }
1294 close(fd);
1295 if (unlink(tmp_so_name.c_str()) == -1) {
1296 fprintf(stderr, "Failed to unlink: %s\n", strerror(errno));
1297 exit(0);
1298 }
1299
1300 g_addr = reinterpret_cast<uint64_t>(map);
1301 g_ready = 1;
1302 while (true) {
1303 usleep(US_PER_MSEC);
1304 }
1305 exit(0);
1306 }
1307 ASSERT_TRUE(pid > 0);
1308
1309 std::vector<std::string> found_functions;
1310 uint64_t start = NanoTime();
1311 while (true) {
1312 ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1313
1314 // Wait for the process to get to a stopping point.
1315 WaitForStop(pid);
1316
1317 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, BACKTRACE_CURRENT_THREAD));
1318 ASSERT_TRUE(backtrace.get() != nullptr);
1319
1320 uint64_t read_addr;
1321 ASSERT_EQ(sizeof(g_ready),
1322 backtrace->Read(reinterpret_cast<uint64_t>(&g_ready),
1323 reinterpret_cast<uint8_t*>(&read_addr), sizeof(g_ready)));
1324 if (read_addr) {
1325 ASSERT_EQ(sizeof(g_addr),
1326 backtrace->Read(reinterpret_cast<uint64_t>(&g_addr),
1327 reinterpret_cast<uint8_t*>(&read_addr), sizeof(uint64_t)));
1328
1329 // Needed before GetFunctionName will work.
1330 backtrace->Unwind(0);
1331
1332 // Loop through the entire map, and get every function we can find.
1333 map_size += read_addr;
1334 std::string last_func;
1335 for (; read_addr < map_size; read_addr += 4) {
1336 uint64_t offset;
1337 std::string func_name = backtrace->GetFunctionName(read_addr, &offset);
1338 if (!func_name.empty() && last_func != func_name) {
1339 found_functions.push_back(func_name);
1340 }
1341 last_func = func_name;
1342 }
1343 break;
1344 }
1345 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1346
1347 if ((NanoTime() - start) > 5 * NS_PER_SEC) {
1348 break;
1349 }
1350 usleep(US_PER_MSEC);
1351 }
1352
1353 kill(pid, SIGKILL);
1354 ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1355
1356 VerifyFunctionsFound(found_functions);
1357 }
1358
FindFuncFrameInBacktrace(Backtrace * backtrace,uint64_t test_func,size_t * frame_num)1359 static bool FindFuncFrameInBacktrace(Backtrace* backtrace, uint64_t test_func, size_t* frame_num) {
1360 backtrace_map_t map;
1361 backtrace->FillInMap(test_func, &map);
1362 if (!BacktraceMap::IsValid(map)) {
1363 return false;
1364 }
1365
1366 // Loop through the frames, and find the one that is in the map.
1367 *frame_num = 0;
1368 for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) {
1369 if (BacktraceMap::IsValid(it->map) && map.start == it->map.start &&
1370 it->pc >= test_func) {
1371 *frame_num = it->num;
1372 return true;
1373 }
1374 }
1375 return false;
1376 }
1377
VerifyUnreadableElfFrame(Backtrace * backtrace,uint64_t test_func,size_t frame_num)1378 static void VerifyUnreadableElfFrame(Backtrace* backtrace, uint64_t test_func, size_t frame_num) {
1379 ASSERT_LT(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
1380 << DumpFrames(backtrace);
1381
1382 ASSERT_TRUE(frame_num != 0) << DumpFrames(backtrace);
1383 // Make sure that there is at least one more frame above the test func call.
1384 ASSERT_LT(frame_num, backtrace->NumFrames()) << DumpFrames(backtrace);
1385
1386 uint64_t diff = backtrace->GetFrame(frame_num)->pc - test_func;
1387 ASSERT_LT(diff, 200U) << DumpFrames(backtrace);
1388 }
1389
VerifyUnreadableElfBacktrace(void * func)1390 static void VerifyUnreadableElfBacktrace(void* func) {
1391 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS,
1392 BACKTRACE_CURRENT_THREAD));
1393 ASSERT_TRUE(backtrace.get() != nullptr);
1394 ASSERT_TRUE(backtrace->Unwind(0));
1395 VERIFY_NO_ERROR(backtrace->GetError().error_code);
1396
1397 size_t frame_num;
1398 uint64_t test_func = reinterpret_cast<uint64_t>(func);
1399 ASSERT_TRUE(FindFuncFrameInBacktrace(backtrace.get(), test_func, &frame_num))
1400 << DumpFrames(backtrace.get());
1401
1402 VerifyUnreadableElfFrame(backtrace.get(), test_func, frame_num);
1403 }
1404
1405 typedef int (*test_func_t)(int, int, int, int, void (*)(void*), void*);
1406
TEST_F(BacktraceTest,unwind_through_unreadable_elf_local)1407 TEST_F(BacktraceTest, unwind_through_unreadable_elf_local) {
1408 TemporaryDir td;
1409 std::string tmp_so_name;
1410 ASSERT_NO_FATAL_FAILURE(CopySharedLibrary(td.path, &tmp_so_name));
1411
1412 void* lib_handle = dlopen(tmp_so_name.c_str(), RTLD_NOW);
1413 ASSERT_TRUE(lib_handle != nullptr);
1414 ASSERT_TRUE(unlink(tmp_so_name.c_str()) != -1);
1415
1416 test_func_t test_func;
1417 test_func = reinterpret_cast<test_func_t>(dlsym(lib_handle, "test_level_one"));
1418 ASSERT_TRUE(test_func != nullptr);
1419
1420 ASSERT_NE(test_func(1, 2, 3, 4, VerifyUnreadableElfBacktrace, reinterpret_cast<void*>(test_func)),
1421 0);
1422 }
1423
TEST_F(BacktraceTest,unwind_through_unreadable_elf_remote)1424 TEST_F(BacktraceTest, unwind_through_unreadable_elf_remote) {
1425 TemporaryDir td;
1426 std::string tmp_so_name;
1427 ASSERT_NO_FATAL_FAILURE(CopySharedLibrary(td.path, &tmp_so_name));
1428
1429 void* lib_handle = dlopen(tmp_so_name.c_str(), RTLD_NOW);
1430 ASSERT_TRUE(lib_handle != nullptr);
1431 ASSERT_TRUE(unlink(tmp_so_name.c_str()) != -1);
1432
1433 test_func_t test_func;
1434 test_func = reinterpret_cast<test_func_t>(dlsym(lib_handle, "test_level_one"));
1435 ASSERT_TRUE(test_func != nullptr);
1436
1437 pid_t pid;
1438 if ((pid = fork()) == 0) {
1439 test_func(1, 2, 3, 4, 0, 0);
1440 exit(0);
1441 }
1442 ASSERT_TRUE(pid > 0);
1443
1444 uint64_t start = NanoTime();
1445 bool done = false;
1446 while (!done) {
1447 ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1448
1449 // Wait for the process to get to a stopping point.
1450 WaitForStop(pid);
1451
1452 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, BACKTRACE_CURRENT_THREAD));
1453 ASSERT_TRUE(backtrace.get() != nullptr);
1454 ASSERT_TRUE(backtrace->Unwind(0));
1455 VERIFY_NO_ERROR(backtrace->GetError().error_code);
1456
1457 size_t frame_num;
1458 if (FindFuncFrameInBacktrace(backtrace.get(), reinterpret_cast<uint64_t>(test_func),
1459 &frame_num) &&
1460 frame_num != 0) {
1461 VerifyUnreadableElfFrame(backtrace.get(), reinterpret_cast<uint64_t>(test_func), frame_num);
1462 done = true;
1463 }
1464
1465 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1466
1467 if ((NanoTime() - start) > 5 * NS_PER_SEC) {
1468 break;
1469 }
1470 usleep(US_PER_MSEC);
1471 }
1472
1473 kill(pid, SIGKILL);
1474 ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1475
1476 ASSERT_TRUE(done) << "Test function never found in unwind.";
1477 }
1478
TEST_F(BacktraceTest,unwind_thread_doesnt_exist)1479 TEST_F(BacktraceTest, unwind_thread_doesnt_exist) {
1480 std::unique_ptr<Backtrace> backtrace(
1481 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, 99999999));
1482 ASSERT_TRUE(backtrace.get() != nullptr);
1483 ASSERT_FALSE(backtrace->Unwind(0));
1484 ASSERT_EQ(BACKTRACE_UNWIND_ERROR_THREAD_DOESNT_EXIST, backtrace->GetError().error_code);
1485 }
1486
TEST_F(BacktraceTest,local_get_function_name_before_unwind)1487 TEST_F(BacktraceTest, local_get_function_name_before_unwind) {
1488 std::unique_ptr<Backtrace> backtrace(
1489 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
1490 ASSERT_TRUE(backtrace.get() != nullptr);
1491
1492 // Verify that trying to get a function name before doing an unwind works.
1493 uint64_t cur_func_offset = reinterpret_cast<uint64_t>(test_level_one_) + 1;
1494 uint64_t offset;
1495 ASSERT_NE(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset));
1496 }
1497
TEST_F(BacktraceTest,remote_get_function_name_before_unwind)1498 TEST_F(BacktraceTest, remote_get_function_name_before_unwind) {
1499 pid_t pid;
1500 CreateRemoteProcess(&pid);
1501
1502 // Now create an unwind object.
1503 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
1504
1505 // Verify that trying to get a function name before doing an unwind works.
1506 uint64_t cur_func_offset = reinterpret_cast<uint64_t>(test_level_one_) + 1;
1507 uint64_t offset;
1508 ASSERT_NE(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset));
1509
1510 FinishRemoteProcess(pid);
1511 }
1512
SetUcontextSp(uint64_t sp,ucontext_t * ucontext)1513 static void SetUcontextSp(uint64_t sp, ucontext_t* ucontext) {
1514 #if defined(__arm__)
1515 ucontext->uc_mcontext.arm_sp = sp;
1516 #elif defined(__aarch64__)
1517 ucontext->uc_mcontext.sp = sp;
1518 #elif defined(__i386__)
1519 ucontext->uc_mcontext.gregs[REG_ESP] = sp;
1520 #elif defined(__x86_64__)
1521 ucontext->uc_mcontext.gregs[REG_RSP] = sp;
1522 #else
1523 UNUSED(sp);
1524 UNUSED(ucontext);
1525 ASSERT_TRUE(false) << "Unsupported architecture";
1526 #endif
1527 }
1528
SetUcontextPc(uint64_t pc,ucontext_t * ucontext)1529 static void SetUcontextPc(uint64_t pc, ucontext_t* ucontext) {
1530 #if defined(__arm__)
1531 ucontext->uc_mcontext.arm_pc = pc;
1532 #elif defined(__aarch64__)
1533 ucontext->uc_mcontext.pc = pc;
1534 #elif defined(__i386__)
1535 ucontext->uc_mcontext.gregs[REG_EIP] = pc;
1536 #elif defined(__x86_64__)
1537 ucontext->uc_mcontext.gregs[REG_RIP] = pc;
1538 #else
1539 UNUSED(pc);
1540 UNUSED(ucontext);
1541 ASSERT_TRUE(false) << "Unsupported architecture";
1542 #endif
1543 }
1544
SetUcontextLr(uint64_t lr,ucontext_t * ucontext)1545 static void SetUcontextLr(uint64_t lr, ucontext_t* ucontext) {
1546 #if defined(__arm__)
1547 ucontext->uc_mcontext.arm_lr = lr;
1548 #elif defined(__aarch64__)
1549 ucontext->uc_mcontext.regs[30] = lr;
1550 #elif defined(__i386__)
1551 // The lr is on the stack.
1552 ASSERT_TRUE(lr != 0);
1553 ASSERT_TRUE(ucontext != nullptr);
1554 #elif defined(__x86_64__)
1555 // The lr is on the stack.
1556 ASSERT_TRUE(lr != 0);
1557 ASSERT_TRUE(ucontext != nullptr);
1558 #else
1559 UNUSED(lr);
1560 UNUSED(ucontext);
1561 ASSERT_TRUE(false) << "Unsupported architecture";
1562 #endif
1563 }
1564
1565 static constexpr size_t DEVICE_MAP_SIZE = 1024;
1566
SetupDeviceMap(void ** device_map)1567 static void SetupDeviceMap(void** device_map) {
1568 // Make sure that anything in a device map will result in fails
1569 // to read.
1570 android::base::unique_fd device_fd(open("/dev/zero", O_RDONLY | O_CLOEXEC));
1571
1572 *device_map = mmap(nullptr, 1024, PROT_READ, MAP_PRIVATE, device_fd, 0);
1573 ASSERT_TRUE(*device_map != MAP_FAILED);
1574
1575 // Make sure the map is readable.
1576 ASSERT_EQ(0, reinterpret_cast<int*>(*device_map)[0]);
1577 }
1578
UnwindFromDevice(Backtrace * backtrace,void * device_map)1579 static void UnwindFromDevice(Backtrace* backtrace, void* device_map) {
1580 uint64_t device_map_uint = reinterpret_cast<uint64_t>(device_map);
1581
1582 backtrace_map_t map;
1583 backtrace->FillInMap(device_map_uint, &map);
1584 // Verify the flag is set.
1585 ASSERT_EQ(PROT_DEVICE_MAP, map.flags & PROT_DEVICE_MAP);
1586
1587 // Quick sanity checks.
1588 uint64_t offset;
1589 ASSERT_EQ(std::string(""), backtrace->GetFunctionName(device_map_uint, &offset));
1590 ASSERT_EQ(std::string(""), backtrace->GetFunctionName(device_map_uint, &offset, &map));
1591 ASSERT_EQ(std::string(""), backtrace->GetFunctionName(0, &offset));
1592
1593 uint64_t cur_func_offset = reinterpret_cast<uint64_t>(BacktraceTest::test_level_one_) + 1;
1594 // Now verify the device map flag actually causes the function name to be empty.
1595 backtrace->FillInMap(cur_func_offset, &map);
1596 ASSERT_TRUE((map.flags & PROT_DEVICE_MAP) == 0);
1597 ASSERT_NE(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset, &map));
1598 map.flags |= PROT_DEVICE_MAP;
1599 ASSERT_EQ(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset, &map));
1600
1601 ucontext_t ucontext;
1602
1603 // Create a context that has the pc in the device map, but the sp
1604 // in a non-device map.
1605 memset(&ucontext, 0, sizeof(ucontext));
1606 SetUcontextSp(reinterpret_cast<uint64_t>(&ucontext), &ucontext);
1607 SetUcontextPc(device_map_uint, &ucontext);
1608 SetUcontextLr(cur_func_offset, &ucontext);
1609
1610 ASSERT_TRUE(backtrace->Unwind(0, &ucontext));
1611
1612 // The buffer should only be a single element.
1613 ASSERT_EQ(1U, backtrace->NumFrames());
1614 const backtrace_frame_data_t* frame = backtrace->GetFrame(0);
1615 ASSERT_EQ(device_map_uint, frame->pc);
1616 ASSERT_EQ(reinterpret_cast<uint64_t>(&ucontext), frame->sp);
1617
1618 // Check what happens when skipping the first frame.
1619 ASSERT_TRUE(backtrace->Unwind(1, &ucontext));
1620 ASSERT_EQ(0U, backtrace->NumFrames());
1621
1622 // Create a context that has the sp in the device map, but the pc
1623 // in a non-device map.
1624 memset(&ucontext, 0, sizeof(ucontext));
1625 SetUcontextSp(device_map_uint, &ucontext);
1626 SetUcontextPc(cur_func_offset, &ucontext);
1627 SetUcontextLr(cur_func_offset, &ucontext);
1628
1629 ASSERT_TRUE(backtrace->Unwind(0, &ucontext));
1630
1631 // The buffer should only be a single element.
1632 ASSERT_EQ(1U, backtrace->NumFrames());
1633 frame = backtrace->GetFrame(0);
1634 ASSERT_EQ(cur_func_offset, frame->pc);
1635 ASSERT_EQ(device_map_uint, frame->sp);
1636
1637 // Check what happens when skipping the first frame.
1638 ASSERT_TRUE(backtrace->Unwind(1, &ucontext));
1639 ASSERT_EQ(0U, backtrace->NumFrames());
1640 }
1641
TEST_F(BacktraceTest,unwind_disallow_device_map_local)1642 TEST_F(BacktraceTest, unwind_disallow_device_map_local) {
1643 void* device_map;
1644 SetupDeviceMap(&device_map);
1645
1646 // Now create an unwind object.
1647 std::unique_ptr<Backtrace> backtrace(
1648 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
1649 ASSERT_TRUE(backtrace);
1650
1651 UnwindFromDevice(backtrace.get(), device_map);
1652
1653 munmap(device_map, DEVICE_MAP_SIZE);
1654 }
1655
TEST_F(BacktraceTest,unwind_disallow_device_map_remote)1656 TEST_F(BacktraceTest, unwind_disallow_device_map_remote) {
1657 void* device_map;
1658 SetupDeviceMap(&device_map);
1659
1660 // Fork a process to do a remote backtrace.
1661 pid_t pid;
1662 CreateRemoteProcess(&pid);
1663
1664 // Now create an unwind object.
1665 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
1666
1667 UnwindFromDevice(backtrace.get(), device_map);
1668
1669 FinishRemoteProcess(pid);
1670
1671 munmap(device_map, DEVICE_MAP_SIZE);
1672 }
1673
1674 class ScopedSignalHandler {
1675 public:
ScopedSignalHandler(int signal_number,void (* handler)(int))1676 ScopedSignalHandler(int signal_number, void (*handler)(int)) : signal_number_(signal_number) {
1677 memset(&action_, 0, sizeof(action_));
1678 action_.sa_handler = handler;
1679 sigaction(signal_number_, &action_, &old_action_);
1680 }
1681
ScopedSignalHandler(int signal_number,void (* action)(int,siginfo_t *,void *))1682 ScopedSignalHandler(int signal_number, void (*action)(int, siginfo_t*, void*))
1683 : signal_number_(signal_number) {
1684 memset(&action_, 0, sizeof(action_));
1685 action_.sa_flags = SA_SIGINFO;
1686 action_.sa_sigaction = action;
1687 sigaction(signal_number_, &action_, &old_action_);
1688 }
1689
~ScopedSignalHandler()1690 ~ScopedSignalHandler() { sigaction(signal_number_, &old_action_, nullptr); }
1691
1692 private:
1693 struct sigaction action_;
1694 struct sigaction old_action_;
1695 const int signal_number_;
1696 };
1697
SetValueAndLoop(void * data)1698 static void SetValueAndLoop(void* data) {
1699 volatile int* value = reinterpret_cast<volatile int*>(data);
1700
1701 *value = 1;
1702 for (volatile int i = 0;; i++)
1703 ;
1704 }
1705
UnwindThroughSignal(bool use_action,create_func_t create_func,map_create_func_t map_create_func)1706 static void UnwindThroughSignal(bool use_action, create_func_t create_func,
1707 map_create_func_t map_create_func) {
1708 volatile int value = 0;
1709 pid_t pid;
1710 if ((pid = fork()) == 0) {
1711 if (use_action) {
1712 ScopedSignalHandler ssh(SIGUSR1, BacktraceTest::test_signal_action_);
1713
1714 BacktraceTest::test_level_one_(1, 2, 3, 4, SetValueAndLoop, const_cast<int*>(&value));
1715 } else {
1716 ScopedSignalHandler ssh(SIGUSR1, BacktraceTest::test_signal_handler_);
1717
1718 BacktraceTest::test_level_one_(1, 2, 3, 4, SetValueAndLoop, const_cast<int*>(&value));
1719 }
1720 }
1721 ASSERT_NE(-1, pid);
1722
1723 int read_value = 0;
1724 uint64_t start = NanoTime();
1725 while (read_value == 0) {
1726 usleep(1000);
1727
1728 // Loop until the remote function gets into the final function.
1729 ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1730
1731 WaitForStop(pid);
1732
1733 std::unique_ptr<BacktraceMap> map(map_create_func(pid, false));
1734 std::unique_ptr<Backtrace> backtrace(create_func(pid, pid, map.get()));
1735
1736 size_t bytes_read = backtrace->Read(reinterpret_cast<uint64_t>(const_cast<int*>(&value)),
1737 reinterpret_cast<uint8_t*>(&read_value), sizeof(read_value));
1738 ASSERT_EQ(sizeof(read_value), bytes_read);
1739
1740 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1741
1742 ASSERT_TRUE(NanoTime() - start < 5 * NS_PER_SEC)
1743 << "Remote process did not execute far enough in 5 seconds.";
1744 }
1745
1746 // Now need to send a signal to the remote process.
1747 kill(pid, SIGUSR1);
1748
1749 // Wait for the process to get to the signal handler loop.
1750 Backtrace::const_iterator frame_iter;
1751 start = NanoTime();
1752 std::unique_ptr<BacktraceMap> map;
1753 std::unique_ptr<Backtrace> backtrace;
1754 while (true) {
1755 usleep(1000);
1756
1757 ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1758
1759 WaitForStop(pid);
1760
1761 map.reset(map_create_func(pid, false));
1762 ASSERT_TRUE(map.get() != nullptr);
1763 backtrace.reset(create_func(pid, pid, map.get()));
1764 ASSERT_TRUE(backtrace->Unwind(0));
1765 bool found = false;
1766 for (frame_iter = backtrace->begin(); frame_iter != backtrace->end(); ++frame_iter) {
1767 if (frame_iter->func_name == "test_loop_forever") {
1768 ++frame_iter;
1769 found = true;
1770 break;
1771 }
1772 }
1773 if (found) {
1774 break;
1775 }
1776
1777 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1778
1779 ASSERT_TRUE(NanoTime() - start < 5 * NS_PER_SEC)
1780 << "Remote process did not get in signal handler in 5 seconds." << std::endl
1781 << DumpFrames(backtrace.get());
1782 }
1783
1784 std::vector<std::string> names;
1785 // Loop through the frames, and save the function names.
1786 size_t frame = 0;
1787 for (; frame_iter != backtrace->end(); ++frame_iter) {
1788 if (frame_iter->func_name == "test_level_four") {
1789 frame = names.size() + 1;
1790 }
1791 names.push_back(frame_iter->func_name);
1792 }
1793 ASSERT_NE(0U, frame) << "Unable to find test_level_four in backtrace" << std::endl
1794 << DumpFrames(backtrace.get());
1795
1796 // The expected order of the frames:
1797 // test_loop_forever
1798 // test_signal_handler|test_signal_action
1799 // <OPTIONAL_FRAME> May or may not exist.
1800 // SetValueAndLoop (but the function name might be empty)
1801 // test_level_four
1802 // test_level_three
1803 // test_level_two
1804 // test_level_one
1805 ASSERT_LE(frame + 2, names.size()) << DumpFrames(backtrace.get());
1806 ASSERT_LE(2U, frame) << DumpFrames(backtrace.get());
1807 if (use_action) {
1808 ASSERT_EQ("test_signal_action", names[0]) << DumpFrames(backtrace.get());
1809 } else {
1810 ASSERT_EQ("test_signal_handler", names[0]) << DumpFrames(backtrace.get());
1811 }
1812 ASSERT_EQ("test_level_three", names[frame]) << DumpFrames(backtrace.get());
1813 ASSERT_EQ("test_level_two", names[frame + 1]) << DumpFrames(backtrace.get());
1814 ASSERT_EQ("test_level_one", names[frame + 2]) << DumpFrames(backtrace.get());
1815
1816 FinishRemoteProcess(pid);
1817 }
1818
TEST_F(BacktraceTest,unwind_remote_through_signal_using_handler)1819 TEST_F(BacktraceTest, unwind_remote_through_signal_using_handler) {
1820 UnwindThroughSignal(false, Backtrace::Create, BacktraceMap::Create);
1821 }
1822
TEST_F(BacktraceTest,unwind_remote_through_signal_using_action)1823 TEST_F(BacktraceTest, unwind_remote_through_signal_using_action) {
1824 UnwindThroughSignal(true, Backtrace::Create, BacktraceMap::Create);
1825 }
1826
TestFrameSkipNumbering(create_func_t create_func,map_create_func_t map_create_func)1827 static void TestFrameSkipNumbering(create_func_t create_func, map_create_func_t map_create_func) {
1828 std::unique_ptr<BacktraceMap> map(map_create_func(getpid(), false));
1829 std::unique_ptr<Backtrace> backtrace(
1830 create_func(getpid(), android::base::GetThreadId(), map.get()));
1831 backtrace->Unwind(1);
1832 ASSERT_NE(0U, backtrace->NumFrames());
1833 ASSERT_EQ(0U, backtrace->GetFrame(0)->num);
1834 }
1835
TEST_F(BacktraceTest,unwind_frame_skip_numbering)1836 TEST_F(BacktraceTest, unwind_frame_skip_numbering) {
1837 TestFrameSkipNumbering(Backtrace::Create, BacktraceMap::Create);
1838 }
1839
1840 #define MAX_LEAK_BYTES (32*1024UL)
1841
CheckForLeak(pid_t pid,pid_t tid)1842 static void CheckForLeak(pid_t pid, pid_t tid) {
1843 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(pid));
1844
1845 // Loop enough that even a small leak should be detectable.
1846 size_t first_allocated_bytes = 0;
1847 size_t last_allocated_bytes = 0;
1848 for (size_t i = 0; i < 4096; i++) {
1849 Backtrace* backtrace = Backtrace::Create(pid, tid, map.get());
1850 ASSERT_TRUE(backtrace != nullptr);
1851 ASSERT_TRUE(backtrace->Unwind(0));
1852 VERIFY_NO_ERROR(backtrace->GetError().error_code);
1853 delete backtrace;
1854
1855 size_t allocated_bytes = mallinfo().uordblks;
1856 if (first_allocated_bytes == 0) {
1857 first_allocated_bytes = allocated_bytes;
1858 } else if (last_allocated_bytes > first_allocated_bytes) {
1859 // Check that the memory did not increase too much over the first loop.
1860 ASSERT_LE(last_allocated_bytes - first_allocated_bytes, MAX_LEAK_BYTES);
1861 }
1862 last_allocated_bytes = allocated_bytes;
1863 }
1864 }
1865
TEST_F(BacktraceTest,check_for_leak_local)1866 TEST_F(BacktraceTest, check_for_leak_local) {
1867 CheckForLeak(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD);
1868 }
1869
TEST_F(BacktraceTest,check_for_leak_local_thread)1870 TEST_F(BacktraceTest, check_for_leak_local_thread) {
1871 thread_t thread_data = { 0, 0, 0, nullptr };
1872 pthread_t thread;
1873 ASSERT_TRUE(pthread_create(&thread, nullptr, ThreadLevelRun, &thread_data) == 0);
1874
1875 // Wait up to 2 seconds for the tid to be set.
1876 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
1877
1878 CheckForLeak(BACKTRACE_CURRENT_PROCESS, thread_data.tid);
1879
1880 // Tell the thread to exit its infinite loop.
1881 android_atomic_acquire_store(0, &thread_data.state);
1882
1883 ASSERT_TRUE(pthread_join(thread, nullptr) == 0);
1884 }
1885
TEST_F(BacktraceTest,check_for_leak_remote)1886 TEST_F(BacktraceTest, check_for_leak_remote) {
1887 pid_t pid;
1888 CreateRemoteProcess(&pid);
1889
1890 CheckForLeak(pid, BACKTRACE_CURRENT_THREAD);
1891
1892 FinishRemoteProcess(pid);
1893 }
1894