1#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_ 2#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_ 3 4// Generate stack tracer for aarch64 5 6#if defined(__linux__) 7#include <signal.h> 8#include <sys/mman.h> 9#include <ucontext.h> 10#include <unistd.h> 11#endif 12 13#include <atomic> 14#include <cassert> 15#include <cstdint> 16#include <iostream> 17#include <limits> 18 19#include "absl/base/attributes.h" 20#include "absl/debugging/internal/address_is_readable.h" 21#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems 22#include "absl/debugging/stacktrace.h" 23 24static const size_t kUnknownFrameSize = 0; 25// Stack end to use when we don't know the actual stack end 26// (effectively just the end of address space). 27constexpr uintptr_t kUnknownStackEnd = 28 std::numeric_limits<size_t>::max() - sizeof(void *); 29 30#if defined(__linux__) 31// Returns the address of the VDSO __kernel_rt_sigreturn function, if present. 32static const unsigned char* GetKernelRtSigreturnAddress() { 33 constexpr uintptr_t kImpossibleAddress = 1; 34 ABSL_CONST_INIT static std::atomic<uintptr_t> memoized{kImpossibleAddress}; 35 uintptr_t address = memoized.load(std::memory_order_relaxed); 36 if (address != kImpossibleAddress) { 37 return reinterpret_cast<const unsigned char*>(address); 38 } 39 40 address = reinterpret_cast<uintptr_t>(nullptr); 41 42#ifdef ABSL_HAVE_VDSO_SUPPORT 43 absl::debugging_internal::VDSOSupport vdso; 44 if (vdso.IsPresent()) { 45 absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info; 46 auto lookup = [&](int type) { 47 return vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", type, 48 &symbol_info); 49 }; 50 if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) || 51 symbol_info.address == nullptr) { 52 // Unexpected: VDSO is present, yet the expected symbol is missing 53 // or null. 54 assert(false && "VDSO is present, but doesn't have expected symbol"); 55 } else { 56 if (reinterpret_cast<uintptr_t>(symbol_info.address) != 57 kImpossibleAddress) { 58 address = reinterpret_cast<uintptr_t>(symbol_info.address); 59 } else { 60 assert(false && "VDSO returned invalid address"); 61 } 62 } 63 } 64#endif 65 66 memoized.store(address, std::memory_order_relaxed); 67 return reinterpret_cast<const unsigned char*>(address); 68} 69#endif // __linux__ 70 71// Compute the size of a stack frame in [low..high). We assume that 72// low < high. Return size of kUnknownFrameSize. 73template<typename T> 74static size_t ComputeStackFrameSize(const T* low, 75 const T* high) { 76 const char* low_char_ptr = reinterpret_cast<const char *>(low); 77 const char* high_char_ptr = reinterpret_cast<const char *>(high); 78 return low < high ? static_cast<size_t>(high_char_ptr - low_char_ptr) 79 : kUnknownFrameSize; 80} 81 82// Saves stack info that is expensive to calculate to avoid recalculating per frame. 83struct StackInfo { 84 uintptr_t stack_low; 85 uintptr_t stack_high; 86 uintptr_t sig_stack_low; 87 uintptr_t sig_stack_high; 88}; 89 90static bool InsideSignalStack(void** ptr, const StackInfo* stack_info) { 91 uintptr_t comparable_ptr = reinterpret_cast<uintptr_t>(ptr); 92 return (comparable_ptr >= stack_info->sig_stack_low && 93 comparable_ptr < stack_info->sig_stack_high); 94} 95 96// Given a pointer to a stack frame, locate and return the calling 97// stackframe, or return null if no stackframe can be found. Perform sanity 98// checks (the strictness of which is controlled by the boolean parameter 99// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. 100template<bool STRICT_UNWINDING, bool WITH_CONTEXT> 101ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. 102ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. 103static void **NextStackFrame(void **old_frame_pointer, const void *uc, 104 const StackInfo *stack_info) { 105 void **new_frame_pointer = reinterpret_cast<void**>(*old_frame_pointer); 106 107#if defined(__linux__) 108 if (WITH_CONTEXT && uc != nullptr) { 109 // Check to see if next frame's return address is __kernel_rt_sigreturn. 110 if (old_frame_pointer[1] == GetKernelRtSigreturnAddress()) { 111 const ucontext_t *ucv = static_cast<const ucontext_t *>(uc); 112 // old_frame_pointer[0] is not suitable for unwinding, look at 113 // ucontext to discover frame pointer before signal. 114 void **const pre_signal_frame_pointer = 115 reinterpret_cast<void **>(ucv->uc_mcontext.regs[29]); 116 117 // The most recent signal always needs special handling to find the frame 118 // pointer, but a nested signal does not. If pre_signal_frame_pointer is 119 // earlier in the stack than the old_frame_pointer, then use it. If it is 120 // later, then we have already unwound through it and it needs no special 121 // handling. 122 if (pre_signal_frame_pointer >= old_frame_pointer) { 123 new_frame_pointer = pre_signal_frame_pointer; 124 } 125 // Check that alleged frame pointer is actually readable. This is to 126 // prevent "double fault" in case we hit the first fault due to e.g. 127 // stack corruption. 128 if (!absl::debugging_internal::AddressIsReadable( 129 new_frame_pointer)) 130 return nullptr; 131 } 132 } 133#endif 134 135 // The frame pointer should be 8-byte aligned. 136 if ((reinterpret_cast<uintptr_t>(new_frame_pointer) & 7) != 0) 137 return nullptr; 138 139 // Only check the size if both frames are in the same stack. 140 if (InsideSignalStack(new_frame_pointer, stack_info) == 141 InsideSignalStack(old_frame_pointer, stack_info)) { 142 // Check frame size. In strict mode, we assume frames to be under 143 // 100,000 bytes. In non-strict mode, we relax the limit to 1MB. 144 const size_t max_size = STRICT_UNWINDING ? 100000 : 1000000; 145 const size_t frame_size = 146 ComputeStackFrameSize(old_frame_pointer, new_frame_pointer); 147 if (frame_size == kUnknownFrameSize) 148 return nullptr; 149 // A very large frame may mean corrupt memory or an erroneous frame 150 // pointer. But also maybe just a plain-old large frame. Assume that if the 151 // frame is within a known stack, then it is valid. 152 if (frame_size > max_size) { 153 size_t stack_low = stack_info->stack_low; 154 size_t stack_high = stack_info->stack_high; 155 if (InsideSignalStack(new_frame_pointer, stack_info)) { 156 stack_low = stack_info->sig_stack_low; 157 stack_high = stack_info->sig_stack_high; 158 } 159 if (stack_high < kUnknownStackEnd && 160 static_cast<size_t>(getpagesize()) < stack_low) { 161 const uintptr_t new_fp_u = 162 reinterpret_cast<uintptr_t>(new_frame_pointer); 163 // Stack bounds are known. 164 if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) { 165 // new_frame_pointer is not within a known stack. 166 return nullptr; 167 } 168 } else { 169 // Stack bounds are unknown, prefer truncated stack to possible crash. 170 return nullptr; 171 } 172 } 173 } 174 175 return new_frame_pointer; 176} 177 178template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT> 179// We count on the bottom frame being this one. See the comment 180// at prev_return_address 181ABSL_ATTRIBUTE_NOINLINE 182ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. 183ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. 184static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, 185 const void *ucp, int *min_dropped_frames) { 186#ifdef __GNUC__ 187 void **frame_pointer = reinterpret_cast<void**>(__builtin_frame_address(0)); 188#else 189# error reading stack point not yet supported on this platform. 190#endif 191 skip_count++; // Skip the frame for this function. 192 int n = 0; 193 194 // Assume that the first page is not stack. 195 StackInfo stack_info; 196 stack_info.stack_low = static_cast<uintptr_t>(getpagesize()); 197 stack_info.stack_high = kUnknownStackEnd; 198 stack_info.sig_stack_low = stack_info.stack_low; 199 stack_info.sig_stack_high = kUnknownStackEnd; 200 201 // The frame pointer points to low address of a frame. The first 64-bit 202 // word of a frame points to the next frame up the call chain, which normally 203 // is just after the high address of the current frame. The second word of 204 // a frame contains return address of to the caller. To find a pc value 205 // associated with the current frame, we need to go down a level in the call 206 // chain. So we remember return the address of the last frame seen. This 207 // does not work for the first stack frame, which belongs to UnwindImp() but 208 // we skip the frame for UnwindImp() anyway. 209 void* prev_return_address = nullptr; 210 // The nth frame size is the difference between the nth frame pointer and the 211 // the frame pointer below it in the call chain. There is no frame below the 212 // leaf frame, but this function is the leaf anyway, and we skip it. 213 void** prev_frame_pointer = nullptr; 214 215 while (frame_pointer && n < max_depth) { 216 if (skip_count > 0) { 217 skip_count--; 218 } else { 219 result[n] = prev_return_address; 220 if (IS_STACK_FRAMES) { 221 sizes[n] = static_cast<int>( 222 ComputeStackFrameSize(prev_frame_pointer, frame_pointer)); 223 } 224 n++; 225 } 226 prev_return_address = frame_pointer[1]; 227 prev_frame_pointer = frame_pointer; 228 // The absl::GetStackFrames routine is called when we are in some 229 // informational context (the failure signal handler for example). 230 // Use the non-strict unwinding rules to produce a stack trace 231 // that is as complete as possible (even if it contains a few bogus 232 // entries in some rare cases). 233 frame_pointer = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>( 234 frame_pointer, ucp, &stack_info); 235 } 236 237 if (min_dropped_frames != nullptr) { 238 // Implementation detail: we clamp the max of frames we are willing to 239 // count, so as not to spend too much time in the loop below. 240 const int kMaxUnwind = 200; 241 int num_dropped_frames = 0; 242 for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) { 243 if (skip_count > 0) { 244 skip_count--; 245 } else { 246 num_dropped_frames++; 247 } 248 frame_pointer = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>( 249 frame_pointer, ucp, &stack_info); 250 } 251 *min_dropped_frames = num_dropped_frames; 252 } 253 return n; 254} 255 256namespace absl { 257ABSL_NAMESPACE_BEGIN 258namespace debugging_internal { 259bool StackTraceWorksForTest() { 260 return true; 261} 262} // namespace debugging_internal 263ABSL_NAMESPACE_END 264} // namespace absl 265 266#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_ 267