1 // Copyright 2019 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/profiler/stack_copier_signal.h"
6
7 #include <linux/futex.h>
8 #include <signal.h>
9 #include <sys/ucontext.h>
10 #include <syscall.h>
11
12 #include <atomic>
13 #include <cstring>
14
15 #include "base/memory/raw_ptr.h"
16 #include "base/notreached.h"
17 #include "base/profiler/register_context.h"
18 #include "base/profiler/stack_buffer.h"
19 #include "base/profiler/suspendable_thread_delegate.h"
20 #include "base/time/time_override.h"
21 #include "base/trace_event/base_tracing.h"
22 #include "build/build_config.h"
23 #include "third_party/abseil-cpp/absl/types/optional.h"
24
25 namespace base {
26
27 namespace {
28
29 // Waitable event implementation with futex and without DCHECK(s), since signal
30 // handlers cannot allocate memory or use pthread api.
31 class AsyncSafeWaitableEvent {
32 public:
AsyncSafeWaitableEvent()33 AsyncSafeWaitableEvent() { futex_.store(0, std::memory_order_release); }
~AsyncSafeWaitableEvent()34 ~AsyncSafeWaitableEvent() {}
35
Wait()36 bool Wait() {
37 // futex() can wake up spuriously if this memory address was previously used
38 // for a pthread mutex. So, also check the condition.
39 while (true) {
40 long res =
41 syscall(SYS_futex, futex_int_ptr(), FUTEX_WAIT | FUTEX_PRIVATE_FLAG,
42 0, nullptr, nullptr, 0);
43 if (futex_.load(std::memory_order_acquire) != 0)
44 return true;
45 if (res != 0)
46 return false;
47 }
48 }
49
Signal()50 void Signal() {
51 futex_.store(1, std::memory_order_release);
52 syscall(SYS_futex, futex_int_ptr(), FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1,
53 nullptr, nullptr, 0);
54 }
55
56 private:
57 // Provides a pointer to the atomic's storage. std::atomic_int has standard
58 // layout so its address can be used for the pointer as long as it only
59 // contains the int.
futex_int_ptr()60 int* futex_int_ptr() {
61 static_assert(sizeof(futex_) == sizeof(int),
62 "Expected std::atomic_int to be the same size as int");
63 return reinterpret_cast<int*>(&futex_);
64 }
65
66 std::atomic_int futex_{0};
67 };
68
69 // Scoped signal event that calls Signal on the AsyncSafeWaitableEvent at
70 // destructor.
71 class ScopedEventSignaller {
72 public:
ScopedEventSignaller(AsyncSafeWaitableEvent * event)73 ScopedEventSignaller(AsyncSafeWaitableEvent* event) : event_(event) {}
~ScopedEventSignaller()74 ~ScopedEventSignaller() { event_->Signal(); }
75
76 private:
77 raw_ptr<AsyncSafeWaitableEvent> event_;
78 };
79
80 // Struct to store the arguments to the signal handler.
81 struct HandlerParams {
82 uintptr_t stack_base_address;
83
84 // The event is signalled when signal handler is done executing.
85 raw_ptr<AsyncSafeWaitableEvent> event;
86
87 // Return values:
88
89 // Successfully copied the stack segment.
90 raw_ptr<bool> success;
91
92 // The thread context of the leaf function.
93 raw_ptr<mcontext_t> context;
94
95 // Buffer to copy the stack segment.
96 raw_ptr<StackBuffer> stack_buffer;
97 raw_ptr<const uint8_t*> stack_copy_bottom;
98
99 // The timestamp when the stack was copied.
100 raw_ptr<absl::optional<TimeTicks>> maybe_timestamp;
101
102 // The delegate provided to the StackCopier.
103 raw_ptr<StackCopier::Delegate> stack_copier_delegate;
104 };
105
106 // Pointer to the parameters to be "passed" to the CopyStackSignalHandler() from
107 // the sampling thread to the sampled (stopped) thread. This value is set just
108 // before sending the signal to the thread and reset when the handler is done.
109 std::atomic<HandlerParams*> g_handler_params;
110
111 // CopyStackSignalHandler is invoked on the stopped thread and records the
112 // thread's stack and register context at the time the signal was received. This
113 // function may only call reentrant code.
CopyStackSignalHandler(int n,siginfo_t * siginfo,void * sigcontext)114 void CopyStackSignalHandler(int n, siginfo_t* siginfo, void* sigcontext) {
115 HandlerParams* params = g_handler_params.load(std::memory_order_acquire);
116
117 // MaybeTimeTicksNowIgnoringOverride() is implemented in terms of
118 // clock_gettime on Linux, which is signal safe per the signal-safety(7) man
119 // page, but is not garanteed to succeed, in which case absl::nullopt is
120 // returned. TimeTicks::Now() can't be used because it expects clock_gettime
121 // to always succeed and is thus not signal-safe.
122 *params->maybe_timestamp = subtle::MaybeTimeTicksNowIgnoringOverride();
123
124 ScopedEventSignaller e(params->event);
125 *params->success = false;
126
127 const ucontext_t* ucontext = static_cast<ucontext_t*>(sigcontext);
128 std::memcpy(params->context, &ucontext->uc_mcontext, sizeof(mcontext_t));
129
130 const uintptr_t bottom = RegisterContextStackPointer(params->context);
131 const uintptr_t top = params->stack_base_address;
132 if ((top - bottom) > params->stack_buffer->size()) {
133 // The stack exceeds the size of the allocated buffer. The buffer is sized
134 // such that this shouldn't happen under typical execution so we can safely
135 // punt in this situation.
136 return;
137 }
138
139 params->stack_copier_delegate->OnStackCopy();
140
141 *params->stack_copy_bottom =
142 StackCopierSignal::CopyStackContentsAndRewritePointers(
143 reinterpret_cast<uint8_t*>(bottom), reinterpret_cast<uintptr_t*>(top),
144 StackBuffer::kPlatformStackAlignment, params->stack_buffer->buffer());
145
146 *params->success = true;
147 }
148
149 // Sets the global handler params for the signal handler function.
150 class ScopedSetSignalHandlerParams {
151 public:
ScopedSetSignalHandlerParams(HandlerParams * params)152 ScopedSetSignalHandlerParams(HandlerParams* params) {
153 g_handler_params.store(params, std::memory_order_release);
154 }
155
~ScopedSetSignalHandlerParams()156 ~ScopedSetSignalHandlerParams() {
157 g_handler_params.store(nullptr, std::memory_order_release);
158 }
159 };
160
161 class ScopedSigaction {
162 public:
ScopedSigaction(int signal,struct sigaction * action,struct sigaction * original_action)163 ScopedSigaction(int signal,
164 struct sigaction* action,
165 struct sigaction* original_action)
166 : signal_(signal),
167 action_(action),
168 original_action_(original_action),
169 succeeded_(sigaction(signal, action, original_action) == 0) {}
170
succeeded() const171 bool succeeded() const { return succeeded_; }
172
~ScopedSigaction()173 ~ScopedSigaction() {
174 if (!succeeded_)
175 return;
176
177 bool reset_succeeded = sigaction(signal_, original_action_, action_) == 0;
178 DCHECK(reset_succeeded);
179 }
180
181 private:
182 const int signal_;
183 const raw_ptr<struct sigaction> action_;
184 const raw_ptr<struct sigaction> original_action_;
185 const bool succeeded_;
186 };
187
188 } // namespace
189
StackCopierSignal(std::unique_ptr<ThreadDelegate> thread_delegate)190 StackCopierSignal::StackCopierSignal(
191 std::unique_ptr<ThreadDelegate> thread_delegate)
192 : thread_delegate_(std::move(thread_delegate)) {}
193
194 StackCopierSignal::~StackCopierSignal() = default;
195
CopyStack(StackBuffer * stack_buffer,uintptr_t * stack_top,TimeTicks * timestamp,RegisterContext * thread_context,Delegate * delegate)196 bool StackCopierSignal::CopyStack(StackBuffer* stack_buffer,
197 uintptr_t* stack_top,
198 TimeTicks* timestamp,
199 RegisterContext* thread_context,
200 Delegate* delegate) {
201 AsyncSafeWaitableEvent wait_event;
202 bool copied = false;
203 const uint8_t* stack_copy_bottom = nullptr;
204 const uintptr_t stack_base_address = thread_delegate_->GetStackBaseAddress();
205 absl::optional<TimeTicks> maybe_timestamp;
206 HandlerParams params = {stack_base_address, &wait_event, &copied,
207 thread_context, stack_buffer, &stack_copy_bottom,
208 &maybe_timestamp, delegate};
209 {
210 ScopedSetSignalHandlerParams scoped_handler_params(¶ms);
211
212 // Set the signal handler for the thread to the stack copy function.
213 struct sigaction action;
214 struct sigaction original_action;
215 memset(&action, 0, sizeof(action));
216 action.sa_sigaction = CopyStackSignalHandler;
217 action.sa_flags = SA_RESTART | SA_SIGINFO;
218 sigemptyset(&action.sa_mask);
219 TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("cpu_profiler.debug"),
220 "StackCopierSignal copy stack");
221 // SIGURG is chosen here because we observe no crashes with this signal and
222 // neither Chrome or the AOSP sets up a special handler for this signal.
223 ScopedSigaction scoped_sigaction(SIGURG, &action, &original_action);
224 if (!scoped_sigaction.succeeded())
225 return false;
226
227 if (syscall(SYS_tgkill, getpid(), thread_delegate_->GetThreadId(),
228 SIGURG) != 0) {
229 NOTREACHED();
230 return false;
231 }
232 bool finished_waiting = wait_event.Wait();
233 TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("cpu_profiler.debug"),
234 "StackCopierSignal copy stack");
235 if (!finished_waiting) {
236 NOTREACHED();
237 return false;
238 }
239 // Ideally, an accurate timestamp is captured while the sampled thread is
240 // paused. In rare cases, this may fail, in which case we resort to
241 // capturing an delayed timestamp here instead.
242 if (maybe_timestamp.has_value())
243 *timestamp = maybe_timestamp.value();
244 else {
245 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cpu_profiler.debug"),
246 "Fallback on TimeTicks::Now()");
247 *timestamp = TimeTicks::Now();
248 }
249 }
250
251 const uintptr_t bottom = RegisterContextStackPointer(params.context);
252 for (uintptr_t* reg :
253 thread_delegate_->GetRegistersToRewrite(thread_context)) {
254 *reg = StackCopierSignal::RewritePointerIfInOriginalStack(
255 reinterpret_cast<uint8_t*>(bottom),
256 reinterpret_cast<uintptr_t*>(stack_base_address), stack_copy_bottom,
257 *reg);
258 }
259
260 *stack_top = reinterpret_cast<uintptr_t>(stack_copy_bottom) +
261 (stack_base_address - bottom);
262
263 return copied;
264 }
265
266 } // namespace base
267