1#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_ 2#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_ 3 4// Generate stack tracer for aarch64 5 6#if defined(__linux__) 7#include <signal.h> 8#include <sys/mman.h> 9#include <ucontext.h> 10#include <unistd.h> 11#endif 12 13#include <atomic> 14#include <cassert> 15#include <cstdint> 16#include <iostream> 17#include <limits> 18 19#include "absl/base/attributes.h" 20#include "absl/debugging/internal/address_is_readable.h" 21#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems 22#include "absl/debugging/stacktrace.h" 23 24static const size_t kUnknownFrameSize = 0; 25// Stack end to use when we don't know the actual stack end 26// (effectively just the end of address space). 27constexpr uintptr_t kUnknownStackEnd = 28 std::numeric_limits<size_t>::max() - sizeof(void *); 29 30#if defined(__linux__) 31// Returns the address of the VDSO __kernel_rt_sigreturn function, if present. 32static const unsigned char* GetKernelRtSigreturnAddress() { 33 constexpr uintptr_t kImpossibleAddress = 1; 34 ABSL_CONST_INIT static std::atomic<uintptr_t> memoized{kImpossibleAddress}; 35 uintptr_t address = memoized.load(std::memory_order_relaxed); 36 if (address != kImpossibleAddress) { 37 return reinterpret_cast<const unsigned char*>(address); 38 } 39 40 address = reinterpret_cast<uintptr_t>(nullptr); 41 42#ifdef ABSL_HAVE_VDSO_SUPPORT 43 absl::debugging_internal::VDSOSupport vdso; 44 if (vdso.IsPresent()) { 45 absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info; 46 auto lookup = [&](int type) { 47 return vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", type, 48 &symbol_info); 49 }; 50 if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) || 51 symbol_info.address == nullptr) { 52 // Unexpected: VDSO is present, yet the expected symbol is missing 53 // or null. 54 assert(false && "VDSO is present, but doesn't have expected symbol"); 55 } else { 56 if (reinterpret_cast<uintptr_t>(symbol_info.address) != 57 kImpossibleAddress) { 58 address = reinterpret_cast<uintptr_t>(symbol_info.address); 59 } else { 60 assert(false && "VDSO returned invalid address"); 61 } 62 } 63 } 64#endif 65 66 memoized.store(address, std::memory_order_relaxed); 67 return reinterpret_cast<const unsigned char*>(address); 68} 69#endif // __linux__ 70 71// Compute the size of a stack frame in [low..high). We assume that 72// low < high. Return size of kUnknownFrameSize. 73template<typename T> 74static size_t ComputeStackFrameSize(const T* low, 75 const T* high) { 76 const char* low_char_ptr = reinterpret_cast<const char *>(low); 77 const char* high_char_ptr = reinterpret_cast<const char *>(high); 78 return low < high ? static_cast<size_t>(high_char_ptr - low_char_ptr) 79 : kUnknownFrameSize; 80} 81 82// Saves stack info that is expensive to calculate to avoid recalculating per frame. 83struct StackInfo { 84 uintptr_t stack_low; 85 uintptr_t stack_high; 86 uintptr_t sig_stack_low; 87 uintptr_t sig_stack_high; 88}; 89 90static bool InsideSignalStack(void** ptr, const StackInfo* stack_info) { 91 uintptr_t comparable_ptr = reinterpret_cast<uintptr_t>(ptr); 92 if (stack_info->sig_stack_high == kUnknownStackEnd) 93 return false; 94 return (comparable_ptr >= stack_info->sig_stack_low && 95 comparable_ptr < stack_info->sig_stack_high); 96} 97 98// Given a pointer to a stack frame, locate and return the calling 99// stackframe, or return null if no stackframe can be found. Perform sanity 100// checks (the strictness of which is controlled by the boolean parameter 101// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. 102template<bool STRICT_UNWINDING, bool WITH_CONTEXT> 103ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. 104ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. 105static void **NextStackFrame(void **old_frame_pointer, const void *uc, 106 const StackInfo *stack_info) { 107 void **new_frame_pointer = reinterpret_cast<void**>(*old_frame_pointer); 108 109#if defined(__linux__) 110 if (WITH_CONTEXT && uc != nullptr) { 111 // Check to see if next frame's return address is __kernel_rt_sigreturn. 112 if (old_frame_pointer[1] == GetKernelRtSigreturnAddress()) { 113 const ucontext_t *ucv = static_cast<const ucontext_t *>(uc); 114 // old_frame_pointer[0] is not suitable for unwinding, look at 115 // ucontext to discover frame pointer before signal. 116 void **const pre_signal_frame_pointer = 117 reinterpret_cast<void **>(ucv->uc_mcontext.regs[29]); 118 119 // The most recent signal always needs special handling to find the frame 120 // pointer, but a nested signal does not. If pre_signal_frame_pointer is 121 // earlier in the stack than the old_frame_pointer, then use it. If it is 122 // later, then we have already unwound through it and it needs no special 123 // handling. 124 if (pre_signal_frame_pointer >= old_frame_pointer) { 125 new_frame_pointer = pre_signal_frame_pointer; 126 } 127 } 128#endif 129 130 // The frame pointer should be 8-byte aligned. 131 if ((reinterpret_cast<uintptr_t>(new_frame_pointer) & 7) != 0) 132 return nullptr; 133 134 // Check that alleged frame pointer is actually readable. This is to 135 // prevent "double fault" in case we hit the first fault due to e.g. 136 // stack corruption. 137 if (!absl::debugging_internal::AddressIsReadable( 138 new_frame_pointer)) 139 return nullptr; 140 } 141 142 // Only check the size if both frames are in the same stack. 143 if (InsideSignalStack(new_frame_pointer, stack_info) == 144 InsideSignalStack(old_frame_pointer, stack_info)) { 145 // Check frame size. In strict mode, we assume frames to be under 146 // 100,000 bytes. In non-strict mode, we relax the limit to 1MB. 147 const size_t max_size = STRICT_UNWINDING ? 100000 : 1000000; 148 const size_t frame_size = 149 ComputeStackFrameSize(old_frame_pointer, new_frame_pointer); 150 if (frame_size == kUnknownFrameSize) 151 return nullptr; 152 // A very large frame may mean corrupt memory or an erroneous frame 153 // pointer. But also maybe just a plain-old large frame. Assume that if the 154 // frame is within a known stack, then it is valid. 155 if (frame_size > max_size) { 156 size_t stack_low = stack_info->stack_low; 157 size_t stack_high = stack_info->stack_high; 158 if (InsideSignalStack(new_frame_pointer, stack_info)) { 159 stack_low = stack_info->sig_stack_low; 160 stack_high = stack_info->sig_stack_high; 161 } 162 if (stack_high < kUnknownStackEnd && 163 static_cast<size_t>(getpagesize()) < stack_low) { 164 const uintptr_t new_fp_u = 165 reinterpret_cast<uintptr_t>(new_frame_pointer); 166 // Stack bounds are known. 167 if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) { 168 // new_frame_pointer is not within a known stack. 169 return nullptr; 170 } 171 } else { 172 // Stack bounds are unknown, prefer truncated stack to possible crash. 173 return nullptr; 174 } 175 } 176 } 177 178 return new_frame_pointer; 179} 180 181// When PAC-RET (-mbranch-protection=pac-ret) is enabled, return addresses 182// stored on the stack will be signed, which means that pointer bits outside of 183// the VA range are potentially set. Since the stacktrace code is expected to 184// return normal code pointers, this function clears those bits. 185inline void* ClearPacBits(void* ptr) { 186 register void* x30 __asm__("x30") = ptr; 187 // The normal instruction for clearing PAC bits is XPACI, but for 188 // compatibility with ARM platforms that do not support pointer 189 // authentication, we use the hint space instruction XPACLRI instead. Hint 190 // space instructions behave as NOPs on unsupported platforms. 191 asm("xpaclri" : "+r"(x30)); 192 return x30; 193} 194 195template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT> 196// We count on the bottom frame being this one. See the comment 197// at prev_return_address 198ABSL_ATTRIBUTE_NOINLINE 199ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. 200ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. 201static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, 202 const void *ucp, int *min_dropped_frames) { 203#ifdef __GNUC__ 204 void **frame_pointer = reinterpret_cast<void**>(__builtin_frame_address(0)); 205#else 206# error reading stack point not yet supported on this platform. 207#endif 208 skip_count++; // Skip the frame for this function. 209 int n = 0; 210 211 // Assume that the first page is not stack. 212 StackInfo stack_info; 213 stack_info.stack_low = static_cast<uintptr_t>(getpagesize()); 214 stack_info.stack_high = kUnknownStackEnd; 215 stack_info.sig_stack_low = stack_info.stack_low; 216 stack_info.sig_stack_high = kUnknownStackEnd; 217 218 // The frame pointer points to low address of a frame. The first 64-bit 219 // word of a frame points to the next frame up the call chain, which normally 220 // is just after the high address of the current frame. The second word of 221 // a frame contains return address of to the caller. To find a pc value 222 // associated with the current frame, we need to go down a level in the call 223 // chain. So we remember return the address of the last frame seen. This 224 // does not work for the first stack frame, which belongs to UnwindImp() but 225 // we skip the frame for UnwindImp() anyway. 226 void* prev_return_address = nullptr; 227 // The nth frame size is the difference between the nth frame pointer and the 228 // the frame pointer below it in the call chain. There is no frame below the 229 // leaf frame, but this function is the leaf anyway, and we skip it. 230 void** prev_frame_pointer = nullptr; 231 232 while (frame_pointer && n < max_depth) { 233 if (skip_count > 0) { 234 skip_count--; 235 } else { 236 result[n] = ClearPacBits(prev_return_address); 237 if (IS_STACK_FRAMES) { 238 sizes[n] = static_cast<int>( 239 ComputeStackFrameSize(prev_frame_pointer, frame_pointer)); 240 } 241 n++; 242 } 243 prev_return_address = frame_pointer[1]; 244 prev_frame_pointer = frame_pointer; 245 // The absl::GetStackFrames routine is called when we are in some 246 // informational context (the failure signal handler for example). 247 // Use the non-strict unwinding rules to produce a stack trace 248 // that is as complete as possible (even if it contains a few bogus 249 // entries in some rare cases). 250 frame_pointer = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>( 251 frame_pointer, ucp, &stack_info); 252 } 253 254 if (min_dropped_frames != nullptr) { 255 // Implementation detail: we clamp the max of frames we are willing to 256 // count, so as not to spend too much time in the loop below. 257 const int kMaxUnwind = 200; 258 int num_dropped_frames = 0; 259 for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) { 260 if (skip_count > 0) { 261 skip_count--; 262 } else { 263 num_dropped_frames++; 264 } 265 frame_pointer = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>( 266 frame_pointer, ucp, &stack_info); 267 } 268 *min_dropped_frames = num_dropped_frames; 269 } 270 return n; 271} 272 273namespace absl { 274ABSL_NAMESPACE_BEGIN 275namespace debugging_internal { 276bool StackTraceWorksForTest() { 277 return true; 278} 279} // namespace debugging_internal 280ABSL_NAMESPACE_END 281} // namespace absl 282 283#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_ 284