1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/debug/stack_trace.h"
6
7 #include <string.h>
8
9 #include <algorithm>
10 #include <sstream>
11
12 #include "base/check_op.h"
13 #include "build/build_config.h"
14 #include "build/config/compiler/compiler_buildflags.h"
15
16 #if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
17 #include "third_party/abseil-cpp/absl/types/optional.h"
18
19 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
20 #include <pthread.h>
21
22 #include "base/process/process_handle.h"
23 #include "base/threading/platform_thread.h"
24 #endif
25
26 #if BUILDFLAG(IS_APPLE)
27 #include <pthread.h>
28 #endif
29
30 #if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && defined(__GLIBC__)
31 extern "C" void* __libc_stack_end;
32 #endif
33
34 #endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
35
36 namespace base {
37 namespace debug {
38
39 namespace {
40
41 #if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
42
43 #if defined(__arm__) && defined(__GNUC__) && !defined(__clang__)
44 // GCC and LLVM generate slightly different frames on ARM, see
45 // https://llvm.org/bugs/show_bug.cgi?id=18505 - LLVM generates
46 // x86-compatible frame, while GCC needs adjustment.
47 constexpr size_t kStackFrameAdjustment = sizeof(uintptr_t);
48 #else
49 constexpr size_t kStackFrameAdjustment = 0;
50 #endif
51
52 // On Arm-v8.3+ systems with pointer authentication codes (PAC), signature bits
53 // are set in the top bits of the pointer, which confuses test assertions.
54 // Because the signature size can vary based on the system configuration, use
55 // the xpaclri instruction to remove the signature.
StripPointerAuthenticationBits(uintptr_t ptr)56 static uintptr_t StripPointerAuthenticationBits(uintptr_t ptr) {
57 #if defined(ARCH_CPU_ARM64)
58 // A single Chromium binary currently spans all Arm systems (including those
59 // with and without pointer authentication). xpaclri is used here because it's
60 // in the HINT space and treated as a no-op on older Arm cores (unlike the
61 // more generic xpaci which has a new encoding). The downside is that ptr has
62 // to be moved to x30 to use this instruction. TODO(richard.townsend@arm.com):
63 // replace with an intrinsic once that is available.
64 register uintptr_t x30 __asm("x30") = ptr;
65 asm("xpaclri" : "+r"(x30));
66 return x30;
67 #else
68 // No-op on other platforms.
69 return ptr;
70 #endif
71 }
72
GetNextStackFrame(uintptr_t fp)73 uintptr_t GetNextStackFrame(uintptr_t fp) {
74 const uintptr_t* fp_addr = reinterpret_cast<const uintptr_t*>(fp);
75 MSAN_UNPOISON(fp_addr, sizeof(uintptr_t));
76 return fp_addr[0] - kStackFrameAdjustment;
77 }
78
GetStackFramePC(uintptr_t fp)79 uintptr_t GetStackFramePC(uintptr_t fp) {
80 const uintptr_t* fp_addr = reinterpret_cast<const uintptr_t*>(fp);
81 MSAN_UNPOISON(&fp_addr[1], sizeof(uintptr_t));
82 return StripPointerAuthenticationBits(fp_addr[1]);
83 }
84
IsStackFrameValid(uintptr_t fp,uintptr_t prev_fp,uintptr_t stack_end)85 bool IsStackFrameValid(uintptr_t fp, uintptr_t prev_fp, uintptr_t stack_end) {
86 // With the stack growing downwards, older stack frame must be
87 // at a greater address that the current one.
88 if (fp <= prev_fp) return false;
89
90 // Assume huge stack frames are bogus.
91 if (fp - prev_fp > 100000) return false;
92
93 // Check alignment.
94 if (fp & (sizeof(uintptr_t) - 1)) return false;
95
96 if (stack_end) {
97 // Both fp[0] and fp[1] must be within the stack.
98 if (fp > stack_end - 2 * sizeof(uintptr_t)) return false;
99
100 // Additional check to filter out false positives.
101 if (GetStackFramePC(fp) < 32768) return false;
102 }
103
104 return true;
105 }
106
107 // ScanStackForNextFrame() scans the stack for a valid frame to allow unwinding
108 // past system libraries. Only supported on Linux where system libraries are
109 // usually in the middle of the trace:
110 //
111 // TraceStackFramePointers
112 // <more frames from Chrome>
113 // base::WorkSourceDispatch <-- unwinding stops (next frame is invalid),
114 // g_main_context_dispatch ScanStackForNextFrame() is called
115 // <more frames from glib>
116 // g_main_context_iteration
117 // base::MessagePumpGlib::Run <-- ScanStackForNextFrame() finds valid frame,
118 // base::RunLoop::Run unwinding resumes
119 // <more frames from Chrome>
120 // __libc_start_main
121 //
122 // ScanStackForNextFrame() returns 0 if it couldn't find a valid frame
123 // (or if stack scanning is not supported on the current platform).
ScanStackForNextFrame(uintptr_t fp,uintptr_t stack_end)124 uintptr_t ScanStackForNextFrame(uintptr_t fp, uintptr_t stack_end) {
125 // Enough to resume almost all prematurely terminated traces.
126 constexpr size_t kMaxStackScanArea = 8192;
127
128 if (!stack_end) {
129 // Too dangerous to scan without knowing where the stack ends.
130 return 0;
131 }
132
133 fp += sizeof(uintptr_t); // current frame is known to be invalid
134 uintptr_t last_fp_to_scan = std::min(fp + kMaxStackScanArea, stack_end) -
135 sizeof(uintptr_t);
136 for (;fp <= last_fp_to_scan; fp += sizeof(uintptr_t)) {
137 uintptr_t next_fp = GetNextStackFrame(fp);
138 if (IsStackFrameValid(next_fp, fp, stack_end)) {
139 // Check two frames deep. Since stack frame is just a pointer to
140 // a higher address on the stack, it's relatively easy to find
141 // something that looks like one. However two linked frames are
142 // far less likely to be bogus.
143 uintptr_t next2_fp = GetNextStackFrame(next_fp);
144 if (IsStackFrameValid(next2_fp, next_fp, stack_end)) {
145 return fp;
146 }
147 }
148 }
149
150 return 0;
151 }
152
153 // Links stack frame |fp| to |parent_fp|, so that during stack unwinding
154 // TraceStackFramePointers() visits |parent_fp| after visiting |fp|.
155 // Both frame pointers must come from __builtin_frame_address().
156 // Returns previous stack frame |fp| was linked to.
LinkStackFrames(void * fpp,void * parent_fp)157 void* LinkStackFrames(void* fpp, void* parent_fp) {
158 uintptr_t fp = reinterpret_cast<uintptr_t>(fpp) - kStackFrameAdjustment;
159 void* prev_parent_fp = reinterpret_cast<void**>(fp)[0];
160 reinterpret_cast<void**>(fp)[0] = parent_fp;
161 return prev_parent_fp;
162 }
163
164 #endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
165
166 } // namespace
167
168 #if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
GetStackEnd()169 uintptr_t GetStackEnd() {
170 #if BUILDFLAG(IS_ANDROID)
171 // Bionic reads proc/maps on every call to pthread_getattr_np() when called
172 // from the main thread. So we need to cache end of stack in that case to get
173 // acceptable performance.
174 // For all other threads pthread_getattr_np() is fast enough as it just reads
175 // values from its pthread_t argument.
176 static uintptr_t main_stack_end = 0;
177
178 bool is_main_thread = GetCurrentProcId() == PlatformThread::CurrentId();
179 if (is_main_thread && main_stack_end) {
180 return main_stack_end;
181 }
182
183 uintptr_t stack_begin = 0;
184 size_t stack_size = 0;
185 pthread_attr_t attributes;
186 int error = pthread_getattr_np(pthread_self(), &attributes);
187 if (!error) {
188 error = pthread_attr_getstack(
189 &attributes, reinterpret_cast<void**>(&stack_begin), &stack_size);
190 pthread_attr_destroy(&attributes);
191 }
192 DCHECK(!error);
193
194 uintptr_t stack_end = stack_begin + stack_size;
195 if (is_main_thread) {
196 main_stack_end = stack_end;
197 }
198 return stack_end; // 0 in case of error
199 #elif BUILDFLAG(IS_APPLE)
200 // No easy way to get end of the stack for non-main threads,
201 // see crbug.com/617730.
202 return reinterpret_cast<uintptr_t>(pthread_get_stackaddr_np(pthread_self()));
203 #else
204
205 #if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && defined(__GLIBC__)
206 if (GetCurrentProcId() == PlatformThread::CurrentId()) {
207 // For the main thread we have a shortcut.
208 return reinterpret_cast<uintptr_t>(__libc_stack_end);
209 }
210 #endif
211
212 // Don't know how to get end of the stack.
213 return 0;
214 #endif
215 }
216 #endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
217
StackTrace()218 StackTrace::StackTrace() : StackTrace(std::size(trace_)) {}
219
StackTrace(size_t count)220 StackTrace::StackTrace(size_t count) {
221 count_ = CollectStackTrace(trace_, std::min(count, std::size(trace_)));
222 }
223
StackTrace(const void * const * trace,size_t count)224 StackTrace::StackTrace(const void* const* trace, size_t count) {
225 count = std::min(count, std::size(trace_));
226 if (count)
227 memcpy(trace_, trace, count * sizeof(trace_[0]));
228 count_ = count;
229 }
230
231 // static
WillSymbolizeToStreamForTesting()232 bool StackTrace::WillSymbolizeToStreamForTesting() {
233 #if BUILDFLAG(SYMBOL_LEVEL) == 0
234 // Symbols are not expected to be reliable when gn args specifies
235 // symbol_level=0.
236 return false;
237 #elif defined(__UCLIBC__) || defined(_AIX)
238 // StackTrace::OutputToStream() is not implemented under uclibc, nor AIX.
239 // See https://crbug.com/706728
240 return false;
241 #elif defined(OFFICIAL_BUILD) && \
242 ((BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE)) || BUILDFLAG(IS_FUCHSIA))
243 // On some platforms stack traces require an extra data table that bloats our
244 // binaries, so they're turned off for official builds.
245 return false;
246 #elif defined(OFFICIAL_BUILD) && BUILDFLAG(IS_APPLE)
247 // Official Mac OS X builds contain enough information to unwind the stack,
248 // but not enough to symbolize the output.
249 return false;
250 #elif BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_ANDROID)
251 // Under Fuchsia and Android, StackTrace emits executable build-Ids and
252 // address offsets which are symbolized on the test host system, rather than
253 // being symbolized in-process.
254 return false;
255 #elif defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || \
256 defined(MEMORY_SANITIZER)
257 // Sanitizer configurations (ASan, TSan, MSan) emit unsymbolized stacks.
258 return false;
259 #else
260 return true;
261 #endif
262 }
263
Addresses(size_t * count) const264 const void *const *StackTrace::Addresses(size_t* count) const {
265 *count = count_;
266 if (count_)
267 return trace_;
268 return nullptr;
269 }
270
Print() const271 void StackTrace::Print() const {
272 PrintWithPrefix(nullptr);
273 }
274
OutputToStream(std::ostream * os) const275 void StackTrace::OutputToStream(std::ostream* os) const {
276 OutputToStreamWithPrefix(os, nullptr);
277 }
278
ToString() const279 std::string StackTrace::ToString() const {
280 return ToStringWithPrefix(nullptr);
281 }
ToStringWithPrefix(const char * prefix_string) const282 std::string StackTrace::ToStringWithPrefix(const char* prefix_string) const {
283 std::stringstream stream;
284 #if !defined(__UCLIBC__) && !defined(_AIX)
285 OutputToStreamWithPrefix(&stream, prefix_string);
286 #endif
287 return stream.str();
288 }
289
operator <<(std::ostream & os,const StackTrace & s)290 std::ostream& operator<<(std::ostream& os, const StackTrace& s) {
291 #if !defined(__UCLIBC__) && !defined(_AIX)
292 s.OutputToStream(&os);
293 #else
294 os << "StackTrace::OutputToStream not implemented.";
295 #endif
296 return os;
297 }
298
299 #if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
300
301 struct AddressRange {
302 uintptr_t start;
303 uintptr_t end;
304 };
305
IsWithinRange(uintptr_t address,const AddressRange & range)306 bool IsWithinRange(uintptr_t address, const AddressRange& range) {
307 return address >= range.start && address <= range.end;
308 }
309
310 // We force this function to be inlined into its callers (e.g.
311 // TraceStackFramePointers()) in all build modes so we don't have to worry about
312 // conditionally skipping a frame based on potential inlining or tail calls.
TraceStackFramePointersInternal(uintptr_t fp,uintptr_t stack_end,size_t max_depth,size_t skip_initial,bool enable_scanning,const void ** out_trace)313 __attribute__((always_inline)) size_t TraceStackFramePointersInternal(
314 uintptr_t fp,
315 uintptr_t stack_end,
316 size_t max_depth,
317 size_t skip_initial,
318 bool enable_scanning,
319 const void** out_trace) {
320 size_t depth = 0;
321 while (depth < max_depth) {
322 uintptr_t pc = GetStackFramePC(fp);
323 if (skip_initial != 0) {
324 skip_initial--;
325 } else {
326 out_trace[depth++] = reinterpret_cast<const void*>(pc);
327 }
328
329 uintptr_t next_fp = GetNextStackFrame(fp);
330 if (IsStackFrameValid(next_fp, fp, stack_end)) {
331 fp = next_fp;
332 continue;
333 }
334
335 if (!enable_scanning)
336 break;
337
338 next_fp = ScanStackForNextFrame(fp, stack_end);
339 if (next_fp) {
340 fp = next_fp;
341 } else {
342 break;
343 }
344 }
345
346 return depth;
347 }
348
TraceStackFramePointers(const void ** out_trace,size_t max_depth,size_t skip_initial,bool enable_scanning)349 NOINLINE size_t TraceStackFramePointers(const void** out_trace,
350 size_t max_depth,
351 size_t skip_initial,
352 bool enable_scanning) {
353 return TraceStackFramePointersInternal(
354 reinterpret_cast<uintptr_t>(__builtin_frame_address(0)) -
355 kStackFrameAdjustment,
356 GetStackEnd(), max_depth, skip_initial, enable_scanning, out_trace);
357 }
358
TraceStackFramePointersFromBuffer(uintptr_t fp,uintptr_t stack_end,const void ** out_trace,size_t max_depth,size_t skip_initial,bool enable_scanning)359 NOINLINE size_t TraceStackFramePointersFromBuffer(uintptr_t fp,
360 uintptr_t stack_end,
361 const void** out_trace,
362 size_t max_depth,
363 size_t skip_initial,
364 bool enable_scanning) {
365 return TraceStackFramePointersInternal(fp, stack_end, max_depth, skip_initial,
366 enable_scanning, out_trace);
367 }
368
ScopedStackFrameLinker(void * fp,void * parent_fp)369 ScopedStackFrameLinker::ScopedStackFrameLinker(void* fp, void* parent_fp)
370 : fp_(fp),
371 parent_fp_(parent_fp),
372 original_parent_fp_(LinkStackFrames(fp, parent_fp)) {}
373
~ScopedStackFrameLinker()374 ScopedStackFrameLinker::~ScopedStackFrameLinker() {
375 void* previous_parent_fp = LinkStackFrames(fp_, original_parent_fp_);
376 CHECK_EQ(parent_fp_, previous_parent_fp)
377 << "Stack frame's parent pointer has changed!";
378 }
379
380 #endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
381
382 } // namespace debug
383 } // namespace base
384