• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2021 The Abseil Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_RISCV_INL_H_
16#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_RISCV_INL_H_
17
18// Generate stack trace for riscv
19
20#include <sys/ucontext.h>
21
22#include "absl/base/config.h"
23#if defined(__linux__)
24#include <sys/mman.h>
25#include <ucontext.h>
26#include <unistd.h>
27#endif
28
29#include <atomic>
30#include <cassert>
31#include <cstdint>
32#include <iostream>
33
34#include "absl/base/attributes.h"
35#include "absl/debugging/internal/address_is_readable.h"
36#include "absl/debugging/internal/vdso_support.h"
37#include "absl/debugging/stacktrace.h"
38
39static const uintptr_t kUnknownFrameSize = 0;
40
41#if defined(__linux__)
42// Returns the address of the VDSO __kernel_rt_sigreturn function, if present.
43static const unsigned char *GetKernelRtSigreturnAddress() {
44  constexpr uintptr_t kImpossibleAddress = 0;
45  ABSL_CONST_INIT static std::atomic<uintptr_t> memoized(kImpossibleAddress);
46  uintptr_t address = memoized.load(std::memory_order_relaxed);
47  if (address != kImpossibleAddress) {
48    return reinterpret_cast<const unsigned char *>(address);
49  }
50
51  address = reinterpret_cast<uintptr_t>(nullptr);
52
53#if ABSL_HAVE_VDSO_SUPPORT
54  absl::debugging_internal::VDSOSupport vdso;
55  if (vdso.IsPresent()) {
56    absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info;
57    // Symbol versioning pulled from arch/riscv/kernel/vdso/vdso.lds at v5.10.
58    auto lookup = [&](int type) {
59      return vdso.LookupSymbol("__vdso_rt_sigreturn", "LINUX_4.15", type,
60                               &symbol_info);
61    };
62    if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) ||
63        symbol_info.address == nullptr) {
64      // Unexpected: VDSO is present, yet the expected symbol is missing or
65      // null.
66      assert(false && "VDSO is present, but doesn't have expected symbol");
67    } else {
68      if (reinterpret_cast<uintptr_t>(symbol_info.address) !=
69          kImpossibleAddress) {
70        address = reinterpret_cast<uintptr_t>(symbol_info.address);
71      } else {
72        assert(false && "VDSO returned invalid address");
73      }
74    }
75  }
76#endif
77
78  memoized.store(address, std::memory_order_relaxed);
79  return reinterpret_cast<const unsigned char *>(address);
80}
81#endif  // __linux__
82
83// Compute the size of a stack frame in [low..high).  We assume that low < high.
84// Return size of kUnknownFrameSize.
85template <typename T>
86static inline uintptr_t ComputeStackFrameSize(const T *low, const T *high) {
87  const char *low_char_ptr = reinterpret_cast<const char *>(low);
88  const char *high_char_ptr = reinterpret_cast<const char *>(high);
89  return low < high ? high_char_ptr - low_char_ptr : kUnknownFrameSize;
90}
91
92// Given a pointer to a stack frame, locate and return the calling stackframe,
93// or return null if no stackframe can be found. Perform sanity checks (the
94// strictness of which is controlled by the boolean parameter
95// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
96template <bool STRICT_UNWINDING, bool WITH_CONTEXT>
97ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS  // May read random elements from stack.
98ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY   // May read random elements from stack.
99static void ** NextStackFrame(void **old_frame_pointer, const void *uc) {
100  //               .
101  //               .
102  //               .
103  //   +-> +----------------+
104  //   |   | return address |
105  //   |   |   previous fp  |
106  //   |   |      ...       |
107  //   |   +----------------+ <-+
108  //   |   | return address |   |
109  //   +---|-  previous fp  |   |
110  //       |      ...       |   |
111  // $fp ->|----------------+   |
112  //       | return address |   |
113  //       |   previous fp -|---+
114  // $sp ->|      ...       |
115  //       +----------------+
116  void **new_frame_pointer = reinterpret_cast<void **>(old_frame_pointer[-2]);
117  bool check_frame_size = true;
118
119#if defined(__linux__)
120  if (WITH_CONTEXT && uc != nullptr) {
121    // Check to see if next frame's return address is __kernel_rt_sigreturn.
122    if (old_frame_pointer[-1] == GetKernelRtSigreturnAddress()) {
123      const ucontext_t *ucv = static_cast<const ucontext_t *>(uc);
124      // old_frame_pointer is not suitable for unwinding, look at ucontext to
125      // discover frame pointer before signal.
126      //
127      // RISCV ELF psABI has the frame pointer at x8/fp/s0.
128      // -- RISCV psABI Table 18.2
129      void **const pre_signal_frame_pointer =
130          reinterpret_cast<void **>(ucv->uc_mcontext.__gregs[8]);
131
132      // Check the alleged frame pointer is actually readable. This is to
133      // prevent "double fault" in case we hit the first fault due to stack
134      // corruption.
135      if (!absl::debugging_internal::AddressIsReadable(
136              pre_signal_frame_pointer))
137        return nullptr;
138
139      // Alleged frame pointer is readable, use it for further unwinding.
140      new_frame_pointer = pre_signal_frame_pointer;
141
142      // Skip frame size check if we return from a signal.  We may be using an
143      // alterate stack for signals.
144      check_frame_size = false;
145    }
146  }
147#endif
148
149  // The RISCV ELF psABI mandates that the stack pointer is always 16-byte
150  // aligned.
151  // FIXME(abdulras) this doesn't hold for ILP32E which only mandates a 4-byte
152  // alignment.
153  if ((reinterpret_cast<uintptr_t>(new_frame_pointer) & 15) != 0)
154    return nullptr;
155
156  // Check frame size.  In strict mode, we assume frames to be under 100,000
157  // bytes.  In non-strict mode, we relax the limit to 1MB.
158  if (check_frame_size) {
159    const uintptr_t max_size = STRICT_UNWINDING ? 100000 : 1000000;
160    const uintptr_t frame_size =
161        ComputeStackFrameSize(old_frame_pointer, new_frame_pointer);
162    if (frame_size == kUnknownFrameSize || frame_size > max_size)
163      return nullptr;
164  }
165
166  return new_frame_pointer;
167}
168
169template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
170ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS  // May read random elements from stack.
171ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY   // May read random elements from stack.
172static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
173                      const void *ucp, int *min_dropped_frames) {
174  // The `frame_pointer` that is computed here points to the top of the frame.
175  // The two words preceding the address are the return address and the previous
176  // frame pointer.
177#if defined(__GNUC__)
178  void **frame_pointer = reinterpret_cast<void **>(__builtin_frame_address(0));
179#else
180#error reading stack pointer not yet supported on this platform
181#endif
182
183  int n = 0;
184  void *return_address = nullptr;
185  while (frame_pointer && n < max_depth) {
186    return_address = frame_pointer[-1];
187
188    // The absl::GetStackFrames routine is called when we are in some
189    // informational context (the failure signal handler for example).  Use the
190    // non-strict unwinding rules to produce a stack trace that is as complete
191    // as possible (even if it contains a few bogus entries in some rare cases).
192    void **next_frame_pointer =
193        NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
194
195    if (skip_count > 0) {
196      skip_count--;
197    } else {
198      result[n] = return_address;
199      if (IS_STACK_FRAMES) {
200        sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer);
201      }
202      n++;
203    }
204
205    frame_pointer = next_frame_pointer;
206  }
207
208  if (min_dropped_frames != nullptr) {
209    // Implementation detail: we clamp the max of frames we are willing to
210    // count, so as not to spend too much time in the loop below.
211    const int kMaxUnwind = 200;
212    int num_dropped_frames = 0;
213    for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) {
214      if (skip_count > 0) {
215        skip_count--;
216      } else {
217        num_dropped_frames++;
218      }
219      frame_pointer =
220          NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
221    }
222    *min_dropped_frames = num_dropped_frames;
223  }
224
225  return n;
226}
227
228namespace absl {
229ABSL_NAMESPACE_BEGIN
230namespace debugging_internal {
231bool StackTraceWorksForTest() { return true; }
232}  // namespace debugging_internal
233ABSL_NAMESPACE_END
234}  // namespace absl
235
236#endif
237