• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/libsampler/sampler.h"
6 
7 #if V8_OS_POSIX && !V8_OS_CYGWIN && !V8_OS_FUCHSIA
8 
9 #define USE_SIGNALS
10 
11 #include <errno.h>
12 #include <pthread.h>
13 #include <signal.h>
14 #include <sys/time.h>
15 
16 #if !V8_OS_QNX && !V8_OS_AIX
17 #include <sys/syscall.h>  // NOLINT
18 #endif
19 
20 #if V8_OS_MACOSX
21 #include <mach/mach.h>
22 // OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
23 // and is a typedef for struct sigcontext. There is no uc_mcontext.
24 #elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) && !V8_OS_OPENBSD
25 #include <ucontext.h>
26 #endif
27 
28 #include <unistd.h>
29 
30 // GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
31 // Old versions of the C library <signal.h> didn't define the type.
32 #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
33     (defined(__arm__) || defined(__aarch64__)) && \
34     !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
35 #include <asm/sigcontext.h>  // NOLINT
36 #endif
37 
38 #elif V8_OS_WIN || V8_OS_CYGWIN
39 
40 #include "src/base/win32-headers.h"
41 
42 #elif V8_OS_FUCHSIA
43 
44 #include <zircon/process.h>
45 #include <zircon/syscalls.h>
46 #include <zircon/syscalls/debug.h>
47 #include <zircon/types.h>
48 
49 // TODO(wez): Remove this once the Fuchsia SDK has rolled.
50 #if defined(ZX_THREAD_STATE_REGSET0)
51 #define ZX_THREAD_STATE_GENERAL_REGS ZX_THREAD_STATE_REGSET0
zx_thread_read_state(zx_handle_t h,uint32_t k,void * b,size_t l)52 zx_status_t zx_thread_read_state(zx_handle_t h, uint32_t k, void* b, size_t l) {
53   uint32_t dummy_out_len = 0;
54   return zx_thread_read_state(h, k, b, static_cast<uint32_t>(l),
55                               &dummy_out_len);
56 }
57 #if defined(__x86_64__)
58 typedef zx_x86_64_general_regs_t zx_thread_state_general_regs_t;
59 #else
60 typedef zx_arm64_general_regs_t zx_thread_state_general_regs_t;
61 #endif
62 #endif  // !defined(ZX_THREAD_STATE_GENERAL_REGS)
63 
64 #endif
65 
66 #include <algorithm>
67 #include <vector>
68 #include <map>
69 
70 #include "src/base/atomic-utils.h"
71 #include "src/base/hashmap.h"
72 #include "src/base/platform/platform.h"
73 
74 #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
75 
76 // Not all versions of Android's C library provide ucontext_t.
77 // Detect this and provide custom but compatible definitions. Note that these
78 // follow the GLibc naming convention to access register values from
79 // mcontext_t.
80 //
81 // See http://code.google.com/p/android/issues/detail?id=34784
82 
83 #if defined(__arm__)
84 
85 typedef struct sigcontext mcontext_t;
86 
87 typedef struct ucontext {
88   uint32_t uc_flags;
89   struct ucontext* uc_link;
90   stack_t uc_stack;
91   mcontext_t uc_mcontext;
92   // Other fields are not used by V8, don't define them here.
93 } ucontext_t;
94 
95 #elif defined(__aarch64__)
96 
97 typedef struct sigcontext mcontext_t;
98 
99 typedef struct ucontext {
100   uint64_t uc_flags;
101   struct ucontext *uc_link;
102   stack_t uc_stack;
103   mcontext_t uc_mcontext;
104   // Other fields are not used by V8, don't define them here.
105 } ucontext_t;
106 
107 #elif defined(__mips__)
108 // MIPS version of sigcontext, for Android bionic.
109 typedef struct {
110   uint32_t regmask;
111   uint32_t status;
112   uint64_t pc;
113   uint64_t gregs[32];
114   uint64_t fpregs[32];
115   uint32_t acx;
116   uint32_t fpc_csr;
117   uint32_t fpc_eir;
118   uint32_t used_math;
119   uint32_t dsp;
120   uint64_t mdhi;
121   uint64_t mdlo;
122   uint32_t hi1;
123   uint32_t lo1;
124   uint32_t hi2;
125   uint32_t lo2;
126   uint32_t hi3;
127   uint32_t lo3;
128 } mcontext_t;
129 
130 typedef struct ucontext {
131   uint32_t uc_flags;
132   struct ucontext* uc_link;
133   stack_t uc_stack;
134   mcontext_t uc_mcontext;
135   // Other fields are not used by V8, don't define them here.
136 } ucontext_t;
137 
138 #elif defined(__i386__)
139 // x86 version for Android.
140 typedef struct {
141   uint32_t gregs[19];
142   void* fpregs;
143   uint32_t oldmask;
144   uint32_t cr2;
145 } mcontext_t;
146 
147 typedef uint32_t kernel_sigset_t[2];  // x86 kernel uses 64-bit signal masks
148 typedef struct ucontext {
149   uint32_t uc_flags;
150   struct ucontext* uc_link;
151   stack_t uc_stack;
152   mcontext_t uc_mcontext;
153   // Other fields are not used by V8, don't define them here.
154 } ucontext_t;
155 enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
156 
157 #elif defined(__x86_64__)
158 // x64 version for Android.
159 typedef struct {
160   uint64_t gregs[23];
161   void* fpregs;
162   uint64_t __reserved1[8];
163 } mcontext_t;
164 
165 typedef struct ucontext {
166   uint64_t uc_flags;
167   struct ucontext *uc_link;
168   stack_t uc_stack;
169   mcontext_t uc_mcontext;
170   // Other fields are not used by V8, don't define them here.
171 } ucontext_t;
172 enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
173 #endif
174 
175 #endif  // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
176 
177 
178 namespace v8 {
179 namespace sampler {
180 
181 namespace {
182 
183 #if defined(USE_SIGNALS)
184 typedef std::vector<Sampler*> SamplerList;
185 typedef SamplerList::iterator SamplerListIterator;
186 typedef base::AtomicValue<bool> AtomicMutex;
187 
188 class AtomicGuard {
189  public:
AtomicGuard(AtomicMutex * atomic,bool is_blocking=true)190   explicit AtomicGuard(AtomicMutex* atomic, bool is_blocking = true)
191       : atomic_(atomic), is_success_(false) {
192     do {
193       // Use Acquire_Load to gain mutual exclusion.
194       USE(atomic_->Value());
195       is_success_ = atomic_->TrySetValue(false, true);
196     } while (is_blocking && !is_success_);
197   }
198 
is_success() const199   bool is_success() const { return is_success_; }
200 
~AtomicGuard()201   ~AtomicGuard() {
202     if (!is_success_) return;
203     atomic_->SetValue(false);
204   }
205 
206  private:
207   AtomicMutex* const atomic_;
208   bool is_success_;
209 };
210 
211 // Returns key for hash map.
ThreadKey(pthread_t thread_id)212 void* ThreadKey(pthread_t thread_id) {
213   return reinterpret_cast<void*>(thread_id);
214 }
215 
216 // Returns hash value for hash map.
ThreadHash(pthread_t thread_id)217 uint32_t ThreadHash(pthread_t thread_id) {
218 #if V8_OS_BSD
219   return static_cast<uint32_t>(reinterpret_cast<intptr_t>(thread_id));
220 #else
221   return static_cast<uint32_t>(thread_id);
222 #endif
223 }
224 
225 #endif  // USE_SIGNALS
226 
227 }  // namespace
228 
229 #if defined(USE_SIGNALS)
230 
231 class Sampler::PlatformData {
232  public:
PlatformData()233   PlatformData() : vm_tid_(pthread_self()) {}
vm_tid() const234   pthread_t vm_tid() const { return vm_tid_; }
235 
236  private:
237   pthread_t vm_tid_;
238 };
239 
240 class SamplerManager {
241  public:
SamplerManager()242   SamplerManager() : sampler_map_() {}
243 
AddSampler(Sampler * sampler)244   void AddSampler(Sampler* sampler) {
245     AtomicGuard atomic_guard(&samplers_access_counter_);
246     DCHECK(sampler->IsActive() || !sampler->IsRegistered());
247     // Add sampler into map if needed.
248     pthread_t thread_id = sampler->platform_data()->vm_tid();
249     base::HashMap::Entry* entry =
250             sampler_map_.LookupOrInsert(ThreadKey(thread_id),
251                                         ThreadHash(thread_id));
252     DCHECK_NOT_NULL(entry);
253     if (entry->value == nullptr) {
254       SamplerList* samplers = new SamplerList();
255       samplers->push_back(sampler);
256       entry->value = samplers;
257     } else {
258       SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
259       bool exists = false;
260       for (SamplerListIterator iter = samplers->begin();
261            iter != samplers->end(); ++iter) {
262         if (*iter == sampler) {
263           exists = true;
264           break;
265         }
266       }
267       if (!exists) {
268         samplers->push_back(sampler);
269       }
270     }
271   }
272 
RemoveSampler(Sampler * sampler)273   void RemoveSampler(Sampler* sampler) {
274     AtomicGuard atomic_guard(&samplers_access_counter_);
275     DCHECK(sampler->IsActive() || sampler->IsRegistered());
276     // Remove sampler from map.
277     pthread_t thread_id = sampler->platform_data()->vm_tid();
278     void* thread_key = ThreadKey(thread_id);
279     uint32_t thread_hash = ThreadHash(thread_id);
280     base::HashMap::Entry* entry = sampler_map_.Lookup(thread_key, thread_hash);
281     DCHECK_NOT_NULL(entry);
282     SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
283     for (SamplerListIterator iter = samplers->begin(); iter != samplers->end();
284          ++iter) {
285       if (*iter == sampler) {
286         samplers->erase(iter);
287         break;
288       }
289     }
290     if (samplers->empty()) {
291       sampler_map_.Remove(thread_key, thread_hash);
292       delete samplers;
293     }
294   }
295 
296 #if defined(USE_SIGNALS)
DoSample(const v8::RegisterState & state)297   void DoSample(const v8::RegisterState& state) {
298     AtomicGuard atomic_guard(&SamplerManager::samplers_access_counter_, false);
299     if (!atomic_guard.is_success()) return;
300     pthread_t thread_id = pthread_self();
301     base::HashMap::Entry* entry =
302         sampler_map_.Lookup(ThreadKey(thread_id), ThreadHash(thread_id));
303     if (!entry) return;
304     SamplerList& samplers = *static_cast<SamplerList*>(entry->value);
305 
306     for (size_t i = 0; i < samplers.size(); ++i) {
307       Sampler* sampler = samplers[i];
308       Isolate* isolate = sampler->isolate();
309       // We require a fully initialized and entered isolate.
310       if (isolate == nullptr || !isolate->IsInUse()) continue;
311       if (v8::Locker::IsActive() && !Locker::IsLocked(isolate)) continue;
312       sampler->SampleStack(state);
313     }
314   }
315 #endif
316 
instance()317   static SamplerManager* instance() { return instance_.Pointer(); }
318 
319  private:
320   base::HashMap sampler_map_;
321   static AtomicMutex samplers_access_counter_;
322   static base::LazyInstance<SamplerManager>::type instance_;
323 };
324 
325 AtomicMutex SamplerManager::samplers_access_counter_;
326 base::LazyInstance<SamplerManager>::type SamplerManager::instance_ =
327     LAZY_INSTANCE_INITIALIZER;
328 
329 #elif V8_OS_WIN || V8_OS_CYGWIN
330 
331 // ----------------------------------------------------------------------------
332 // Win32 profiler support. On Cygwin we use the same sampler implementation as
333 // on Win32.
334 
335 class Sampler::PlatformData {
336  public:
337   // Get a handle to the calling thread. This is the thread that we are
338   // going to profile. We need to make a copy of the handle because we are
339   // going to use it in the sampler thread. Using GetThreadHandle() will
340   // not work in this case. We're using OpenThread because DuplicateHandle
341   // for some reason doesn't work in Chrome's sandbox.
PlatformData()342   PlatformData()
343       : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
344                                     THREAD_SUSPEND_RESUME |
345                                     THREAD_QUERY_INFORMATION,
346                                     false,
347                                     GetCurrentThreadId())) {}
348 
~PlatformData()349   ~PlatformData() {
350     if (profiled_thread_ != nullptr) {
351       CloseHandle(profiled_thread_);
352       profiled_thread_ = nullptr;
353     }
354   }
355 
profiled_thread()356   HANDLE profiled_thread() { return profiled_thread_; }
357 
358  private:
359   HANDLE profiled_thread_;
360 };
361 
362 #elif V8_OS_FUCHSIA
363 
364 class Sampler::PlatformData {
365  public:
PlatformData()366   PlatformData() {
367     zx_handle_duplicate(zx_thread_self(), ZX_RIGHT_SAME_RIGHTS,
368                         &profiled_thread_);
369   }
~PlatformData()370   ~PlatformData() {
371     if (profiled_thread_ != ZX_HANDLE_INVALID) {
372       zx_handle_close(profiled_thread_);
373       profiled_thread_ = ZX_HANDLE_INVALID;
374     }
375   }
376 
profiled_thread()377   zx_handle_t profiled_thread() { return profiled_thread_; }
378 
379  private:
380   zx_handle_t profiled_thread_ = ZX_HANDLE_INVALID;
381 };
382 
383 #endif  // USE_SIGNALS
384 
385 
386 #if defined(USE_SIGNALS)
387 class SignalHandler {
388  public:
SetUp()389   static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
TearDown()390   static void TearDown() {
391     delete mutex_;
392     mutex_ = nullptr;
393   }
394 
IncreaseSamplerCount()395   static void IncreaseSamplerCount() {
396     base::LockGuard<base::Mutex> lock_guard(mutex_);
397     if (++client_count_ == 1) Install();
398   }
399 
DecreaseSamplerCount()400   static void DecreaseSamplerCount() {
401     base::LockGuard<base::Mutex> lock_guard(mutex_);
402     if (--client_count_ == 0) Restore();
403   }
404 
Installed()405   static bool Installed() {
406     base::LockGuard<base::Mutex> lock_guard(mutex_);
407     return signal_handler_installed_;
408   }
409 
410  private:
Install()411   static void Install() {
412     struct sigaction sa;
413     sa.sa_sigaction = &HandleProfilerSignal;
414     sigemptyset(&sa.sa_mask);
415 #if V8_OS_QNX
416     sa.sa_flags = SA_SIGINFO;
417 #else
418     sa.sa_flags = SA_RESTART | SA_SIGINFO;
419 #endif
420     signal_handler_installed_ =
421         (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
422   }
423 
Restore()424   static void Restore() {
425     if (signal_handler_installed_) {
426       sigaction(SIGPROF, &old_signal_handler_, 0);
427       signal_handler_installed_ = false;
428     }
429   }
430 
431   static void FillRegisterState(void* context, RegisterState* regs);
432   static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
433 
434   // Protects the process wide state below.
435   static base::Mutex* mutex_;
436   static int client_count_;
437   static bool signal_handler_installed_;
438   static struct sigaction old_signal_handler_;
439 };
440 
441 base::Mutex* SignalHandler::mutex_ = nullptr;
442 int SignalHandler::client_count_ = 0;
443 struct sigaction SignalHandler::old_signal_handler_;
444 bool SignalHandler::signal_handler_installed_ = false;
445 
446 
HandleProfilerSignal(int signal,siginfo_t * info,void * context)447 void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
448                                          void* context) {
449   USE(info);
450   if (signal != SIGPROF) return;
451   v8::RegisterState state;
452   FillRegisterState(context, &state);
453   SamplerManager::instance()->DoSample(state);
454 }
455 
FillRegisterState(void * context,RegisterState * state)456 void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
457   // Extracting the sample from the context is extremely machine dependent.
458   ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
459 #if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390)))
460   mcontext_t& mcontext = ucontext->uc_mcontext;
461 #endif
462 #if V8_OS_LINUX
463 #if V8_HOST_ARCH_IA32
464   state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_EIP]);
465   state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_ESP]);
466   state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_EBP]);
467 #elif V8_HOST_ARCH_X64
468   state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_RIP]);
469   state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_RSP]);
470   state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_RBP]);
471 #elif V8_HOST_ARCH_ARM
472 #if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
473   // Old GLibc ARM versions used a gregs[] array to access the register
474   // values from mcontext_t.
475   state->pc = reinterpret_cast<void*>(mcontext.gregs[R15]);
476   state->sp = reinterpret_cast<void*>(mcontext.gregs[R13]);
477   state->fp = reinterpret_cast<void*>(mcontext.gregs[R11]);
478 #else
479   state->pc = reinterpret_cast<void*>(mcontext.arm_pc);
480   state->sp = reinterpret_cast<void*>(mcontext.arm_sp);
481   state->fp = reinterpret_cast<void*>(mcontext.arm_fp);
482 #endif  // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
483 #elif V8_HOST_ARCH_ARM64
484   state->pc = reinterpret_cast<void*>(mcontext.pc);
485   state->sp = reinterpret_cast<void*>(mcontext.sp);
486   // FP is an alias for x29.
487   state->fp = reinterpret_cast<void*>(mcontext.regs[29]);
488 #elif V8_HOST_ARCH_MIPS
489   state->pc = reinterpret_cast<void*>(mcontext.pc);
490   state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
491   state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
492 #elif V8_HOST_ARCH_MIPS64
493   state->pc = reinterpret_cast<void*>(mcontext.pc);
494   state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
495   state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
496 #elif V8_HOST_ARCH_PPC
497 #if V8_LIBC_GLIBC
498   state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->nip);
499   state->sp =
500       reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
501   state->fp =
502       reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
503 #else
504   // Some C libraries, notably Musl, define the regs member as a void pointer
505   state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[32]);
506   state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[1]);
507   state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[31]);
508 #endif
509 #elif V8_HOST_ARCH_S390
510 #if V8_TARGET_ARCH_32_BIT
511   // 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing
512   // mode.  This bit needs to be masked out to resolve actual address.
513   state->pc =
514       reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF);
515 #else
516   state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr);
517 #endif  // V8_TARGET_ARCH_32_BIT
518   state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[15]);
519   state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[11]);
520 #endif  // V8_HOST_ARCH_*
521 #elif V8_OS_MACOSX
522 #if V8_HOST_ARCH_X64
523 #if __DARWIN_UNIX03
524   state->pc = reinterpret_cast<void*>(mcontext->__ss.__rip);
525   state->sp = reinterpret_cast<void*>(mcontext->__ss.__rsp);
526   state->fp = reinterpret_cast<void*>(mcontext->__ss.__rbp);
527 #else  // !__DARWIN_UNIX03
528   state->pc = reinterpret_cast<void*>(mcontext->ss.rip);
529   state->sp = reinterpret_cast<void*>(mcontext->ss.rsp);
530   state->fp = reinterpret_cast<void*>(mcontext->ss.rbp);
531 #endif  // __DARWIN_UNIX03
532 #elif V8_HOST_ARCH_IA32
533 #if __DARWIN_UNIX03
534   state->pc = reinterpret_cast<void*>(mcontext->__ss.__eip);
535   state->sp = reinterpret_cast<void*>(mcontext->__ss.__esp);
536   state->fp = reinterpret_cast<void*>(mcontext->__ss.__ebp);
537 #else  // !__DARWIN_UNIX03
538   state->pc = reinterpret_cast<void*>(mcontext->ss.eip);
539   state->sp = reinterpret_cast<void*>(mcontext->ss.esp);
540   state->fp = reinterpret_cast<void*>(mcontext->ss.ebp);
541 #endif  // __DARWIN_UNIX03
542 #endif  // V8_HOST_ARCH_IA32
543 #elif V8_OS_FREEBSD
544 #if V8_HOST_ARCH_IA32
545   state->pc = reinterpret_cast<void*>(mcontext.mc_eip);
546   state->sp = reinterpret_cast<void*>(mcontext.mc_esp);
547   state->fp = reinterpret_cast<void*>(mcontext.mc_ebp);
548 #elif V8_HOST_ARCH_X64
549   state->pc = reinterpret_cast<void*>(mcontext.mc_rip);
550   state->sp = reinterpret_cast<void*>(mcontext.mc_rsp);
551   state->fp = reinterpret_cast<void*>(mcontext.mc_rbp);
552 #elif V8_HOST_ARCH_ARM
553   state->pc = reinterpret_cast<void*>(mcontext.mc_r15);
554   state->sp = reinterpret_cast<void*>(mcontext.mc_r13);
555   state->fp = reinterpret_cast<void*>(mcontext.mc_r11);
556 #endif  // V8_HOST_ARCH_*
557 #elif V8_OS_NETBSD
558 #if V8_HOST_ARCH_IA32
559   state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_EIP]);
560   state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_ESP]);
561   state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_EBP]);
562 #elif V8_HOST_ARCH_X64
563   state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_RIP]);
564   state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RSP]);
565   state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RBP]);
566 #endif  // V8_HOST_ARCH_*
567 #elif V8_OS_OPENBSD
568 #if V8_HOST_ARCH_IA32
569   state->pc = reinterpret_cast<void*>(ucontext->sc_eip);
570   state->sp = reinterpret_cast<void*>(ucontext->sc_esp);
571   state->fp = reinterpret_cast<void*>(ucontext->sc_ebp);
572 #elif V8_HOST_ARCH_X64
573   state->pc = reinterpret_cast<void*>(ucontext->sc_rip);
574   state->sp = reinterpret_cast<void*>(ucontext->sc_rsp);
575   state->fp = reinterpret_cast<void*>(ucontext->sc_rbp);
576 #endif  // V8_HOST_ARCH_*
577 #elif V8_OS_SOLARIS
578   state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_PC]);
579   state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_SP]);
580   state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_FP]);
581 #elif V8_OS_QNX
582 #if V8_HOST_ARCH_IA32
583   state->pc = reinterpret_cast<void*>(mcontext.cpu.eip);
584   state->sp = reinterpret_cast<void*>(mcontext.cpu.esp);
585   state->fp = reinterpret_cast<void*>(mcontext.cpu.ebp);
586 #elif V8_HOST_ARCH_ARM
587   state->pc = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_PC]);
588   state->sp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_SP]);
589   state->fp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_FP]);
590 #endif  // V8_HOST_ARCH_*
591 #elif V8_OS_AIX
592   state->pc = reinterpret_cast<void*>(mcontext.jmp_context.iar);
593   state->sp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[1]);
594   state->fp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[31]);
595 #endif  // V8_OS_AIX
596 }
597 
598 #endif  // USE_SIGNALS
599 
600 
SetUp()601 void Sampler::SetUp() {
602 #if defined(USE_SIGNALS)
603   SignalHandler::SetUp();
604 #endif
605 }
606 
607 
TearDown()608 void Sampler::TearDown() {
609 #if defined(USE_SIGNALS)
610   SignalHandler::TearDown();
611 #endif
612 }
613 
Sampler(Isolate * isolate)614 Sampler::Sampler(Isolate* isolate)
615     : is_counting_samples_(false),
616       js_sample_count_(0),
617       external_sample_count_(0),
618       isolate_(isolate),
619       profiling_(false),
620       has_processing_thread_(false),
621       active_(false),
622       registered_(false) {
623   data_ = new PlatformData;
624 }
625 
~Sampler()626 Sampler::~Sampler() {
627   DCHECK(!IsActive());
628 #if defined(USE_SIGNALS)
629   if (IsRegistered()) {
630     SamplerManager::instance()->RemoveSampler(this);
631   }
632 #endif
633   delete data_;
634 }
635 
Start()636 void Sampler::Start() {
637   DCHECK(!IsActive());
638   SetActive(true);
639 #if defined(USE_SIGNALS)
640   SamplerManager::instance()->AddSampler(this);
641 #endif
642 }
643 
644 
Stop()645 void Sampler::Stop() {
646 #if defined(USE_SIGNALS)
647   SamplerManager::instance()->RemoveSampler(this);
648 #endif
649   DCHECK(IsActive());
650   SetActive(false);
651   SetRegistered(false);
652 }
653 
654 
IncreaseProfilingDepth()655 void Sampler::IncreaseProfilingDepth() {
656   base::Relaxed_AtomicIncrement(&profiling_, 1);
657 #if defined(USE_SIGNALS)
658   SignalHandler::IncreaseSamplerCount();
659 #endif
660 }
661 
662 
DecreaseProfilingDepth()663 void Sampler::DecreaseProfilingDepth() {
664 #if defined(USE_SIGNALS)
665   SignalHandler::DecreaseSamplerCount();
666 #endif
667   base::Relaxed_AtomicIncrement(&profiling_, -1);
668 }
669 
670 
671 #if defined(USE_SIGNALS)
672 
DoSample()673 void Sampler::DoSample() {
674   if (!SignalHandler::Installed()) return;
675   if (!IsActive() && !IsRegistered()) {
676     SamplerManager::instance()->AddSampler(this);
677     SetRegistered(true);
678   }
679   pthread_kill(platform_data()->vm_tid(), SIGPROF);
680 }
681 
682 #elif V8_OS_WIN || V8_OS_CYGWIN
683 
DoSample()684 void Sampler::DoSample() {
685   HANDLE profiled_thread = platform_data()->profiled_thread();
686   if (profiled_thread == nullptr) return;
687 
688   const DWORD kSuspendFailed = static_cast<DWORD>(-1);
689   if (SuspendThread(profiled_thread) == kSuspendFailed) return;
690 
691   // Context used for sampling the register state of the profiled thread.
692   CONTEXT context;
693   memset(&context, 0, sizeof(context));
694   context.ContextFlags = CONTEXT_FULL;
695   if (GetThreadContext(profiled_thread, &context) != 0) {
696     v8::RegisterState state;
697 #if V8_HOST_ARCH_X64
698     state.pc = reinterpret_cast<void*>(context.Rip);
699     state.sp = reinterpret_cast<void*>(context.Rsp);
700     state.fp = reinterpret_cast<void*>(context.Rbp);
701 #else
702     state.pc = reinterpret_cast<void*>(context.Eip);
703     state.sp = reinterpret_cast<void*>(context.Esp);
704     state.fp = reinterpret_cast<void*>(context.Ebp);
705 #endif
706     SampleStack(state);
707   }
708   ResumeThread(profiled_thread);
709 }
710 
711 #elif V8_OS_FUCHSIA
712 
DoSample()713 void Sampler::DoSample() {
714   zx_handle_t profiled_thread = platform_data()->profiled_thread();
715   if (profiled_thread == ZX_HANDLE_INVALID) return;
716 
717   zx_handle_t suspend_token = ZX_HANDLE_INVALID;
718   if (zx_task_suspend_token(profiled_thread, &suspend_token) != ZX_OK) return;
719 
720   // Wait for the target thread to become suspended, or to exit.
721   // TODO(wez): There is currently no suspension count for threads, so there
722   // is a risk that some other caller resumes the thread in-between our suspend
723   // and wait calls, causing us to miss the SUSPENDED signal. We apply a 100ms
724   // deadline to protect against hanging the sampler thread in this case.
725   zx_signals_t signals = 0;
726   zx_status_t suspended = zx_object_wait_one(
727       profiled_thread, ZX_THREAD_SUSPENDED | ZX_THREAD_TERMINATED,
728       zx_deadline_after(ZX_MSEC(100)), &signals);
729   if (suspended != ZX_OK || (signals & ZX_THREAD_SUSPENDED) == 0) {
730     zx_handle_close(suspend_token);
731     return;
732   }
733 
734   // Fetch a copy of its "general register" states.
735   zx_thread_state_general_regs_t thread_state = {};
736   if (zx_thread_read_state(profiled_thread, ZX_THREAD_STATE_GENERAL_REGS,
737                            &thread_state, sizeof(thread_state)) == ZX_OK) {
738     v8::RegisterState state;
739 #if V8_HOST_ARCH_X64
740     state.pc = reinterpret_cast<void*>(thread_state.rip);
741     state.sp = reinterpret_cast<void*>(thread_state.rsp);
742     state.fp = reinterpret_cast<void*>(thread_state.rbp);
743 #elif V8_HOST_ARCH_ARM64
744     state.pc = reinterpret_cast<void*>(thread_state.pc);
745     state.sp = reinterpret_cast<void*>(thread_state.sp);
746     state.fp = reinterpret_cast<void*>(thread_state.r[29]);
747 #endif
748     SampleStack(state);
749   }
750 
751   zx_handle_close(suspend_token);
752 }
753 
754 // TODO(wez): Remove this once the Fuchsia SDK has rolled.
755 #if defined(ZX_THREAD_STATE_REGSET0)
756 #undef ZX_THREAD_STATE_GENERAL_REGS
757 #endif
758 
759 #endif  // USE_SIGNALS
760 
761 }  // namespace sampler
762 }  // namespace v8
763