• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/libsampler/sampler.h"
6 
7 #ifdef USE_SIGNALS
8 
9 #include <errno.h>
10 #include <pthread.h>
11 #include <signal.h>
12 #include <sys/time.h>
13 #include <atomic>
14 
15 #if !V8_OS_QNX && !V8_OS_AIX
16 #include <sys/syscall.h>  // NOLINT
17 #endif
18 
19 #if V8_OS_MACOSX
20 #include <mach/mach.h>
21 // OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
22 // and is a typedef for struct sigcontext. There is no uc_mcontext.
23 #elif !V8_OS_OPENBSD
24 #include <ucontext.h>
25 #endif
26 
27 #include <unistd.h>
28 
29 #elif V8_OS_WIN || V8_OS_CYGWIN
30 
31 #include "src/base/win32-headers.h"
32 
33 #elif V8_OS_FUCHSIA
34 
35 #include <zircon/process.h>
36 #include <zircon/syscalls.h>
37 #include <zircon/syscalls/debug.h>
38 #include <zircon/types.h>
39 
40 // TODO(wez): Remove this once the Fuchsia SDK has rolled.
41 #if defined(ZX_THREAD_STATE_REGSET0)
42 #define ZX_THREAD_STATE_GENERAL_REGS ZX_THREAD_STATE_REGSET0
zx_thread_read_state(zx_handle_t h,uint32_t k,void * b,size_t l)43 zx_status_t zx_thread_read_state(zx_handle_t h, uint32_t k, void* b, size_t l) {
44   uint32_t dummy_out_len = 0;
45   return zx_thread_read_state(h, k, b, static_cast<uint32_t>(l),
46                               &dummy_out_len);
47 }
48 #if defined(__x86_64__)
49 using zx_thread_state_general_regs_t = zx_x86_64_general_regs_t;
50 #else
51 using zx_thread_state_general_regs_t = zx_arm64_general_regs_t;
52 #endif
53 #endif  // !defined(ZX_THREAD_STATE_GENERAL_REGS)
54 
55 #endif
56 
57 #include <algorithm>
58 #include <vector>
59 
60 #include "src/base/atomic-utils.h"
61 #include "src/base/platform/platform.h"
62 
63 #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
64 
65 // Not all versions of Android's C library provide ucontext_t.
66 // Detect this and provide custom but compatible definitions. Note that these
67 // follow the GLibc naming convention to access register values from
68 // mcontext_t.
69 //
70 // See http://code.google.com/p/android/issues/detail?id=34784
71 
72 #if defined(__arm__)
73 
74 using mcontext_t = struct sigcontext;
75 
76 struct ucontext_t {
77   uint32_t uc_flags;
78   struct ucontext* uc_link;
79   stack_t uc_stack;
80   mcontext_t uc_mcontext;
81   // Other fields are not used by V8, don't define them here.
82 };
83 
84 #elif defined(__aarch64__)
85 
86 using mcontext_t = struct sigcontext;
87 
88 struct ucontext_t {
89   uint64_t uc_flags;
90   struct ucontext *uc_link;
91   stack_t uc_stack;
92   mcontext_t uc_mcontext;
93   // Other fields are not used by V8, don't define them here.
94 };
95 
96 #elif defined(__mips__)
97 // MIPS version of sigcontext, for Android bionic.
98 struct mcontext_t {
99   uint32_t regmask;
100   uint32_t status;
101   uint64_t pc;
102   uint64_t gregs[32];
103   uint64_t fpregs[32];
104   uint32_t acx;
105   uint32_t fpc_csr;
106   uint32_t fpc_eir;
107   uint32_t used_math;
108   uint32_t dsp;
109   uint64_t mdhi;
110   uint64_t mdlo;
111   uint32_t hi1;
112   uint32_t lo1;
113   uint32_t hi2;
114   uint32_t lo2;
115   uint32_t hi3;
116   uint32_t lo3;
117 };
118 
119 struct ucontext_t {
120   uint32_t uc_flags;
121   struct ucontext* uc_link;
122   stack_t uc_stack;
123   mcontext_t uc_mcontext;
124   // Other fields are not used by V8, don't define them here.
125 };
126 
127 #elif defined(__i386__)
128 // x86 version for Android.
129 struct mcontext_t {
130   uint32_t gregs[19];
131   void* fpregs;
132   uint32_t oldmask;
133   uint32_t cr2;
134 };
135 
136 using kernel_sigset_t = uint32_t[2];  // x86 kernel uses 64-bit signal masks
137 struct ucontext_t {
138   uint32_t uc_flags;
139   struct ucontext* uc_link;
140   stack_t uc_stack;
141   mcontext_t uc_mcontext;
142   // Other fields are not used by V8, don't define them here.
143 };
144 enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
145 
146 #elif defined(__x86_64__)
147 // x64 version for Android.
148 struct mcontext_t {
149   uint64_t gregs[23];
150   void* fpregs;
151   uint64_t __reserved1[8];
152 };
153 
154 struct ucontext_t {
155   uint64_t uc_flags;
156   struct ucontext *uc_link;
157   stack_t uc_stack;
158   mcontext_t uc_mcontext;
159   // Other fields are not used by V8, don't define them here.
160 };
161 enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
162 #endif
163 
164 #endif  // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
165 
166 
167 namespace v8 {
168 namespace sampler {
169 
170 #if defined(USE_SIGNALS)
171 
AtomicGuard(AtomicMutex * atomic,bool is_blocking)172 AtomicGuard::AtomicGuard(AtomicMutex* atomic, bool is_blocking)
173     : atomic_(atomic), is_success_(false) {
174   do {
175     bool expected = false;
176     // We have to use the strong version here for the case where is_blocking
177     // is false, and we will only attempt the exchange once.
178     is_success_ = atomic->compare_exchange_strong(expected, true);
179   } while (is_blocking && !is_success_);
180 }
181 
~AtomicGuard()182 AtomicGuard::~AtomicGuard() {
183   if (!is_success_) return;
184   atomic_->store(false);
185 }
186 
is_success() const187 bool AtomicGuard::is_success() const { return is_success_; }
188 
189 class Sampler::PlatformData {
190  public:
PlatformData()191   PlatformData() : vm_tid_(pthread_self()) {}
vm_tid() const192   pthread_t vm_tid() const { return vm_tid_; }
193 
194  private:
195   pthread_t vm_tid_;
196 };
197 
AddSampler(Sampler * sampler)198 void SamplerManager::AddSampler(Sampler* sampler) {
199   AtomicGuard atomic_guard(&samplers_access_counter_);
200   DCHECK(sampler->IsActive());
201   pthread_t thread_id = sampler->platform_data()->vm_tid();
202   auto it = sampler_map_.find(thread_id);
203   if (it == sampler_map_.end()) {
204     SamplerList samplers;
205     samplers.push_back(sampler);
206     sampler_map_.emplace(thread_id, std::move(samplers));
207   } else {
208     SamplerList& samplers = it->second;
209     auto it = std::find(samplers.begin(), samplers.end(), sampler);
210     if (it == samplers.end()) samplers.push_back(sampler);
211   }
212 }
213 
RemoveSampler(Sampler * sampler)214 void SamplerManager::RemoveSampler(Sampler* sampler) {
215   AtomicGuard atomic_guard(&samplers_access_counter_);
216   DCHECK(sampler->IsActive());
217   pthread_t thread_id = sampler->platform_data()->vm_tid();
218   auto it = sampler_map_.find(thread_id);
219   DCHECK_NE(it, sampler_map_.end());
220   SamplerList& samplers = it->second;
221   samplers.erase(std::remove(samplers.begin(), samplers.end(), sampler),
222                  samplers.end());
223   if (samplers.empty()) {
224     sampler_map_.erase(it);
225   }
226 }
227 
DoSample(const v8::RegisterState & state)228 void SamplerManager::DoSample(const v8::RegisterState& state) {
229   AtomicGuard atomic_guard(&samplers_access_counter_, false);
230   // TODO(petermarshall): Add stat counters for the bailouts here.
231   if (!atomic_guard.is_success()) return;
232   pthread_t thread_id = pthread_self();
233   auto it = sampler_map_.find(thread_id);
234   if (it == sampler_map_.end()) return;
235   SamplerList& samplers = it->second;
236 
237   for (Sampler* sampler : samplers) {
238     if (!sampler->ShouldRecordSample()) continue;
239     Isolate* isolate = sampler->isolate();
240     // We require a fully initialized and entered isolate.
241     if (isolate == nullptr || !isolate->IsInUse()) continue;
242     sampler->SampleStack(state);
243   }
244 }
245 
instance()246 SamplerManager* SamplerManager::instance() {
247   static base::LeakyObject<SamplerManager> instance;
248   return instance.get();
249 }
250 
251 #elif V8_OS_WIN || V8_OS_CYGWIN
252 
253 // ----------------------------------------------------------------------------
254 // Win32 profiler support. On Cygwin we use the same sampler implementation as
255 // on Win32.
256 
257 class Sampler::PlatformData {
258  public:
259   // Get a handle to the calling thread. This is the thread that we are
260   // going to profile. We need to make a copy of the handle because we are
261   // going to use it in the sampler thread. Using GetThreadHandle() will
262   // not work in this case. We're using OpenThread because DuplicateHandle
263   // for some reason doesn't work in Chrome's sandbox.
264   PlatformData()
265       : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
266                                     THREAD_SUSPEND_RESUME |
267                                     THREAD_QUERY_INFORMATION,
268                                     false,
269                                     GetCurrentThreadId())) {}
270 
271   ~PlatformData() {
272     if (profiled_thread_ != nullptr) {
273       CloseHandle(profiled_thread_);
274       profiled_thread_ = nullptr;
275     }
276   }
277 
278   HANDLE profiled_thread() { return profiled_thread_; }
279 
280  private:
281   HANDLE profiled_thread_;
282 };
283 
284 #elif V8_OS_FUCHSIA
285 
286 class Sampler::PlatformData {
287  public:
288   PlatformData() {
289     zx_handle_duplicate(zx_thread_self(), ZX_RIGHT_SAME_RIGHTS,
290                         &profiled_thread_);
291   }
292   ~PlatformData() {
293     if (profiled_thread_ != ZX_HANDLE_INVALID) {
294       zx_handle_close(profiled_thread_);
295       profiled_thread_ = ZX_HANDLE_INVALID;
296     }
297   }
298 
299   zx_handle_t profiled_thread() { return profiled_thread_; }
300 
301  private:
302   zx_handle_t profiled_thread_ = ZX_HANDLE_INVALID;
303 };
304 
305 #endif  // USE_SIGNALS
306 
307 
308 #if defined(USE_SIGNALS)
309 class SignalHandler {
310  public:
IncreaseSamplerCount()311   static void IncreaseSamplerCount() {
312     base::MutexGuard lock_guard(mutex_.Pointer());
313     if (++client_count_ == 1) Install();
314   }
315 
DecreaseSamplerCount()316   static void DecreaseSamplerCount() {
317     base::MutexGuard lock_guard(mutex_.Pointer());
318     if (--client_count_ == 0) Restore();
319   }
320 
Installed()321   static bool Installed() {
322     base::MutexGuard lock_guard(mutex_.Pointer());
323     return signal_handler_installed_;
324   }
325 
326  private:
Install()327   static void Install() {
328     struct sigaction sa;
329     sa.sa_sigaction = &HandleProfilerSignal;
330     sigemptyset(&sa.sa_mask);
331 #if V8_OS_QNX
332     sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
333 #else
334     sa.sa_flags = SA_RESTART | SA_SIGINFO | SA_ONSTACK;
335 #endif
336     signal_handler_installed_ =
337         (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
338   }
339 
Restore()340   static void Restore() {
341     if (signal_handler_installed_) {
342       sigaction(SIGPROF, &old_signal_handler_, nullptr);
343       signal_handler_installed_ = false;
344     }
345   }
346 
347   static void FillRegisterState(void* context, RegisterState* regs);
348   static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
349 
350   // Protects the process wide state below.
351   static base::LazyMutex mutex_;
352   static int client_count_;
353   static bool signal_handler_installed_;
354   static struct sigaction old_signal_handler_;
355 };
356 
357 base::LazyMutex SignalHandler::mutex_ = LAZY_MUTEX_INITIALIZER;
358 int SignalHandler::client_count_ = 0;
359 struct sigaction SignalHandler::old_signal_handler_;
360 bool SignalHandler::signal_handler_installed_ = false;
361 
362 
HandleProfilerSignal(int signal,siginfo_t * info,void * context)363 void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
364                                          void* context) {
365   USE(info);
366   if (signal != SIGPROF) return;
367   v8::RegisterState state;
368   FillRegisterState(context, &state);
369   SamplerManager::instance()->DoSample(state);
370 }
371 
FillRegisterState(void * context,RegisterState * state)372 void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
373   // Extracting the sample from the context is extremely machine dependent.
374   ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
375 #if !(V8_OS_OPENBSD || \
376       (V8_OS_LINUX &&  \
377        (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390 || V8_HOST_ARCH_PPC64)))
378   mcontext_t& mcontext = ucontext->uc_mcontext;
379 #endif
380 #if V8_OS_LINUX
381 #if V8_HOST_ARCH_IA32
382   state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_EIP]);
383   state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_ESP]);
384   state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_EBP]);
385 #elif V8_HOST_ARCH_X64
386   state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_RIP]);
387   state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_RSP]);
388   state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_RBP]);
389 #elif V8_HOST_ARCH_ARM
390 #if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
391   // Old GLibc ARM versions used a gregs[] array to access the register
392   // values from mcontext_t.
393   state->pc = reinterpret_cast<void*>(mcontext.gregs[R15]);
394   state->sp = reinterpret_cast<void*>(mcontext.gregs[R13]);
395   state->fp = reinterpret_cast<void*>(mcontext.gregs[R11]);
396   state->lr = reinterpret_cast<void*>(mcontext.gregs[R14]);
397 #else
398   state->pc = reinterpret_cast<void*>(mcontext.arm_pc);
399   state->sp = reinterpret_cast<void*>(mcontext.arm_sp);
400   state->fp = reinterpret_cast<void*>(mcontext.arm_fp);
401   state->lr = reinterpret_cast<void*>(mcontext.arm_lr);
402 #endif  // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
403 #elif V8_HOST_ARCH_ARM64
404   state->pc = reinterpret_cast<void*>(mcontext.pc);
405   state->sp = reinterpret_cast<void*>(mcontext.sp);
406   // FP is an alias for x29.
407   state->fp = reinterpret_cast<void*>(mcontext.regs[29]);
408   // LR is an alias for x30.
409   state->lr = reinterpret_cast<void*>(mcontext.regs[30]);
410 #elif V8_HOST_ARCH_MIPS
411   state->pc = reinterpret_cast<void*>(mcontext.pc);
412   state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
413   state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
414 #elif V8_HOST_ARCH_MIPS64
415   state->pc = reinterpret_cast<void*>(mcontext.pc);
416   state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
417   state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
418 #elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
419 #if V8_LIBC_GLIBC
420   state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->nip);
421   state->sp =
422       reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
423   state->fp =
424       reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
425   state->lr = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->link);
426 #else
427   // Some C libraries, notably Musl, define the regs member as a void pointer
428   state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[32]);
429   state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[1]);
430   state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[31]);
431   state->lr = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[36]);
432 #endif
433 #elif V8_HOST_ARCH_S390
434 #if V8_TARGET_ARCH_32_BIT
435   // 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing
436   // mode.  This bit needs to be masked out to resolve actual address.
437   state->pc =
438       reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF);
439 #else
440   state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr);
441 #endif  // V8_TARGET_ARCH_32_BIT
442   state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[15]);
443   state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[11]);
444   state->lr = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[14]);
445 #endif  // V8_HOST_ARCH_*
446 #elif V8_OS_IOS
447 
448 #if V8_TARGET_ARCH_ARM64
449   // Building for the iOS device.
450   state->pc = reinterpret_cast<void*>(mcontext->__ss.__pc);
451   state->sp = reinterpret_cast<void*>(mcontext->__ss.__sp);
452   state->fp = reinterpret_cast<void*>(mcontext->__ss.__fp);
453 #elif V8_TARGET_ARCH_X64
454   // Building for the iOS simulator.
455   state->pc = reinterpret_cast<void*>(mcontext->__ss.__rip);
456   state->sp = reinterpret_cast<void*>(mcontext->__ss.__rsp);
457   state->fp = reinterpret_cast<void*>(mcontext->__ss.__rbp);
458 #else
459 #error Unexpected iOS target architecture.
460 #endif  // V8_TARGET_ARCH_ARM64
461 
462 #elif V8_OS_MACOSX
463 #if V8_HOST_ARCH_X64
464   state->pc = reinterpret_cast<void*>(mcontext->__ss.__rip);
465   state->sp = reinterpret_cast<void*>(mcontext->__ss.__rsp);
466   state->fp = reinterpret_cast<void*>(mcontext->__ss.__rbp);
467 #elif V8_HOST_ARCH_IA32
468   state->pc = reinterpret_cast<void*>(mcontext->__ss.__eip);
469   state->sp = reinterpret_cast<void*>(mcontext->__ss.__esp);
470   state->fp = reinterpret_cast<void*>(mcontext->__ss.__ebp);
471 #elif V8_HOST_ARCH_ARM64
472   state->pc =
473       reinterpret_cast<void*>(arm_thread_state64_get_pc(mcontext->__ss));
474   state->sp =
475       reinterpret_cast<void*>(arm_thread_state64_get_sp(mcontext->__ss));
476   state->fp =
477       reinterpret_cast<void*>(arm_thread_state64_get_fp(mcontext->__ss));
478 #endif  // V8_HOST_ARCH_*
479 #elif V8_OS_FREEBSD
480 #if V8_HOST_ARCH_IA32
481   state->pc = reinterpret_cast<void*>(mcontext.mc_eip);
482   state->sp = reinterpret_cast<void*>(mcontext.mc_esp);
483   state->fp = reinterpret_cast<void*>(mcontext.mc_ebp);
484 #elif V8_HOST_ARCH_X64
485   state->pc = reinterpret_cast<void*>(mcontext.mc_rip);
486   state->sp = reinterpret_cast<void*>(mcontext.mc_rsp);
487   state->fp = reinterpret_cast<void*>(mcontext.mc_rbp);
488 #elif V8_HOST_ARCH_ARM
489   state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_PC]);
490   state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_SP]);
491   state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_FP]);
492 #endif  // V8_HOST_ARCH_*
493 #elif V8_OS_NETBSD
494 #if V8_HOST_ARCH_IA32
495   state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_EIP]);
496   state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_ESP]);
497   state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_EBP]);
498 #elif V8_HOST_ARCH_X64
499   state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_RIP]);
500   state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RSP]);
501   state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RBP]);
502 #endif  // V8_HOST_ARCH_*
503 #elif V8_OS_OPENBSD
504 #if V8_HOST_ARCH_IA32
505   state->pc = reinterpret_cast<void*>(ucontext->sc_eip);
506   state->sp = reinterpret_cast<void*>(ucontext->sc_esp);
507   state->fp = reinterpret_cast<void*>(ucontext->sc_ebp);
508 #elif V8_HOST_ARCH_X64
509   state->pc = reinterpret_cast<void*>(ucontext->sc_rip);
510   state->sp = reinterpret_cast<void*>(ucontext->sc_rsp);
511   state->fp = reinterpret_cast<void*>(ucontext->sc_rbp);
512 #endif  // V8_HOST_ARCH_*
513 #elif V8_OS_SOLARIS
514   state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_PC]);
515   state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_SP]);
516   state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_FP]);
517 #elif V8_OS_QNX
518 #if V8_HOST_ARCH_IA32
519   state->pc = reinterpret_cast<void*>(mcontext.cpu.eip);
520   state->sp = reinterpret_cast<void*>(mcontext.cpu.esp);
521   state->fp = reinterpret_cast<void*>(mcontext.cpu.ebp);
522 #elif V8_HOST_ARCH_ARM
523   state->pc = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_PC]);
524   state->sp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_SP]);
525   state->fp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_FP]);
526 #endif  // V8_HOST_ARCH_*
527 #elif V8_OS_AIX
528   state->pc = reinterpret_cast<void*>(mcontext.jmp_context.iar);
529   state->sp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[1]);
530   state->fp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[31]);
531   state->lr = reinterpret_cast<void*>(mcontext.jmp_context.lr);
532 #endif  // V8_OS_AIX
533 }
534 
535 #endif  // USE_SIGNALS
536 
Sampler(Isolate * isolate)537 Sampler::Sampler(Isolate* isolate)
538     : isolate_(isolate), data_(std::make_unique<PlatformData>()) {}
539 
~Sampler()540 Sampler::~Sampler() {
541   DCHECK(!IsActive());
542 }
543 
Start()544 void Sampler::Start() {
545   DCHECK(!IsActive());
546   SetActive(true);
547 #if defined(USE_SIGNALS)
548   SignalHandler::IncreaseSamplerCount();
549   SamplerManager::instance()->AddSampler(this);
550 #endif
551 }
552 
Stop()553 void Sampler::Stop() {
554 #if defined(USE_SIGNALS)
555   SamplerManager::instance()->RemoveSampler(this);
556   SignalHandler::DecreaseSamplerCount();
557 #endif
558   DCHECK(IsActive());
559   SetActive(false);
560 }
561 
562 #if defined(USE_SIGNALS)
563 
DoSample()564 void Sampler::DoSample() {
565   if (!SignalHandler::Installed()) return;
566   DCHECK(IsActive());
567   SetShouldRecordSample();
568   pthread_kill(platform_data()->vm_tid(), SIGPROF);
569 }
570 
571 #elif V8_OS_WIN || V8_OS_CYGWIN
572 
DoSample()573 void Sampler::DoSample() {
574   HANDLE profiled_thread = platform_data()->profiled_thread();
575   if (profiled_thread == nullptr) return;
576 
577   const DWORD kSuspendFailed = static_cast<DWORD>(-1);
578   if (SuspendThread(profiled_thread) == kSuspendFailed) return;
579 
580   // Context used for sampling the register state of the profiled thread.
581   CONTEXT context;
582   memset(&context, 0, sizeof(context));
583   context.ContextFlags = CONTEXT_FULL;
584   if (GetThreadContext(profiled_thread, &context) != 0) {
585     v8::RegisterState state;
586 #if V8_HOST_ARCH_X64
587     state.pc = reinterpret_cast<void*>(context.Rip);
588     state.sp = reinterpret_cast<void*>(context.Rsp);
589     state.fp = reinterpret_cast<void*>(context.Rbp);
590 #elif V8_HOST_ARCH_ARM64
591     state.pc = reinterpret_cast<void*>(context.Pc);
592     state.sp = reinterpret_cast<void*>(context.Sp);
593     state.fp = reinterpret_cast<void*>(context.Fp);
594 #else
595     state.pc = reinterpret_cast<void*>(context.Eip);
596     state.sp = reinterpret_cast<void*>(context.Esp);
597     state.fp = reinterpret_cast<void*>(context.Ebp);
598 #endif
599     SampleStack(state);
600   }
601   ResumeThread(profiled_thread);
602 }
603 
604 #elif V8_OS_FUCHSIA
605 
DoSample()606 void Sampler::DoSample() {
607   zx_handle_t profiled_thread = platform_data()->profiled_thread();
608   if (profiled_thread == ZX_HANDLE_INVALID) return;
609 
610   zx_handle_t suspend_token = ZX_HANDLE_INVALID;
611   if (zx_task_suspend_token(profiled_thread, &suspend_token) != ZX_OK) return;
612 
613   // Wait for the target thread to become suspended, or to exit.
614   // TODO(wez): There is currently no suspension count for threads, so there
615   // is a risk that some other caller resumes the thread in-between our suspend
616   // and wait calls, causing us to miss the SUSPENDED signal. We apply a 100ms
617   // deadline to protect against hanging the sampler thread in this case.
618   zx_signals_t signals = 0;
619   zx_status_t suspended = zx_object_wait_one(
620       profiled_thread, ZX_THREAD_SUSPENDED | ZX_THREAD_TERMINATED,
621       zx_deadline_after(ZX_MSEC(100)), &signals);
622   if (suspended != ZX_OK || (signals & ZX_THREAD_SUSPENDED) == 0) {
623     zx_handle_close(suspend_token);
624     return;
625   }
626 
627   // Fetch a copy of its "general register" states.
628   zx_thread_state_general_regs_t thread_state = {};
629   if (zx_thread_read_state(profiled_thread, ZX_THREAD_STATE_GENERAL_REGS,
630                            &thread_state, sizeof(thread_state)) == ZX_OK) {
631     v8::RegisterState state;
632 #if V8_HOST_ARCH_X64
633     state.pc = reinterpret_cast<void*>(thread_state.rip);
634     state.sp = reinterpret_cast<void*>(thread_state.rsp);
635     state.fp = reinterpret_cast<void*>(thread_state.rbp);
636 #elif V8_HOST_ARCH_ARM64
637     state.pc = reinterpret_cast<void*>(thread_state.pc);
638     state.sp = reinterpret_cast<void*>(thread_state.sp);
639     state.fp = reinterpret_cast<void*>(thread_state.r[29]);
640 #endif
641     SampleStack(state);
642   }
643 
644   zx_handle_close(suspend_token);
645 }
646 
647 // TODO(wez): Remove this once the Fuchsia SDK has rolled.
648 #if defined(ZX_THREAD_STATE_REGSET0)
649 #undef ZX_THREAD_STATE_GENERAL_REGS
650 #endif
651 
652 #endif  // USE_SIGNALS
653 
654 }  // namespace sampler
655 }  // namespace v8
656