• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 // Platform specific code for Solaris 10 goes here. For the POSIX comaptible
29 // parts the implementation is in platform-posix.cc.
30 
31 #ifdef __sparc
32 # error "V8 does not support the SPARC CPU architecture."
33 #endif
34 
35 #include <sys/stack.h>  // for stack alignment
36 #include <unistd.h>  // getpagesize(), usleep()
37 #include <sys/mman.h>  // mmap()
38 #include <ucontext.h>  // walkstack(), getcontext()
39 #include <dlfcn.h>     // dladdr
40 #include <pthread.h>
41 #include <sched.h>  // for sched_yield
42 #include <semaphore.h>
43 #include <time.h>
44 #include <sys/time.h>  // gettimeofday(), timeradd()
45 #include <errno.h>
46 #include <ieeefp.h>  // finite()
47 #include <signal.h>  // sigemptyset(), etc
48 #include <sys/regset.h>
49 
50 
51 #undef MAP_TYPE
52 
53 #include "v8.h"
54 
55 #include "platform-posix.h"
56 #include "platform.h"
57 #include "v8threads.h"
58 #include "vm-state-inl.h"
59 
60 
61 // It seems there is a bug in some Solaris distributions (experienced in
62 // SunOS 5.10 Generic_141445-09) which make it difficult or impossible to
63 // access signbit() despite the availability of other C99 math functions.
64 #ifndef signbit
65 // Test sign - usually defined in math.h
signbit(double x)66 int signbit(double x) {
67   // We need to take care of the special case of both positive and negative
68   // versions of zero.
69   if (x == 0) {
70     return fpclass(x) & FP_NZERO;
71   } else {
72     // This won't detect negative NaN but that should be okay since we don't
73     // assume that behavior.
74     return x < 0;
75   }
76 }
77 #endif  // signbit
78 
79 namespace v8 {
80 namespace internal {
81 
82 
83 // 0 is never a valid thread id on Solaris since the main thread is 1 and
84 // subsequent have their ids incremented from there
85 static const pthread_t kNoThread = (pthread_t) 0;
86 
87 
ceiling(double x)88 double ceiling(double x) {
89   return ceil(x);
90 }
91 
92 
93 static Mutex* limit_mutex = NULL;
SetUp()94 void OS::SetUp() {
95   // Seed the random number generator.
96   // Convert the current time to a 64-bit integer first, before converting it
97   // to an unsigned. Going directly will cause an overflow and the seed to be
98   // set to all ones. The seed will be identical for different instances that
99   // call this setup code within the same millisecond.
100   uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
101   srandom(static_cast<unsigned int>(seed));
102   limit_mutex = CreateMutex();
103 }
104 
105 
PostSetUp()106 void OS::PostSetUp() {
107   // Math functions depend on CPU features therefore they are initialized after
108   // CPU.
109   MathSetup();
110 }
111 
112 
CpuFeaturesImpliedByPlatform()113 uint64_t OS::CpuFeaturesImpliedByPlatform() {
114   return 0;  // Solaris runs on a lot of things.
115 }
116 
117 
ActivationFrameAlignment()118 int OS::ActivationFrameAlignment() {
119   // GCC generates code that requires 16 byte alignment such as movdqa.
120   return Max(STACK_ALIGN, 16);
121 }
122 
123 
ReleaseStore(volatile AtomicWord * ptr,AtomicWord value)124 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
125   __asm__ __volatile__("" : : : "memory");
126   *ptr = value;
127 }
128 
129 
LocalTimezone(double time)130 const char* OS::LocalTimezone(double time) {
131   if (isnan(time)) return "";
132   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
133   struct tm* t = localtime(&tv);
134   if (NULL == t) return "";
135   return tzname[0];  // The location of the timezone string on Solaris.
136 }
137 
138 
LocalTimeOffset()139 double OS::LocalTimeOffset() {
140   // On Solaris, struct tm does not contain a tm_gmtoff field.
141   time_t utc = time(NULL);
142   ASSERT(utc != -1);
143   struct tm* loc = localtime(&utc);
144   ASSERT(loc != NULL);
145   return static_cast<double>((mktime(loc) - utc) * msPerSecond);
146 }
147 
148 
149 // We keep the lowest and highest addresses mapped as a quick way of
150 // determining that pointers are outside the heap (used mostly in assertions
151 // and verification).  The estimate is conservative, i.e., not all addresses in
152 // 'allocated' space are actually allocated to our heap.  The range is
153 // [lowest, highest), inclusive on the low and and exclusive on the high end.
154 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
155 static void* highest_ever_allocated = reinterpret_cast<void*>(0);
156 
157 
UpdateAllocatedSpaceLimits(void * address,int size)158 static void UpdateAllocatedSpaceLimits(void* address, int size) {
159   ASSERT(limit_mutex != NULL);
160   ScopedLock lock(limit_mutex);
161 
162   lowest_ever_allocated = Min(lowest_ever_allocated, address);
163   highest_ever_allocated =
164       Max(highest_ever_allocated,
165           reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
166 }
167 
168 
IsOutsideAllocatedSpace(void * address)169 bool OS::IsOutsideAllocatedSpace(void* address) {
170   return address < lowest_ever_allocated || address >= highest_ever_allocated;
171 }
172 
173 
AllocateAlignment()174 size_t OS::AllocateAlignment() {
175   return static_cast<size_t>(getpagesize());
176 }
177 
178 
Allocate(const size_t requested,size_t * allocated,bool is_executable)179 void* OS::Allocate(const size_t requested,
180                    size_t* allocated,
181                    bool is_executable) {
182   const size_t msize = RoundUp(requested, getpagesize());
183   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
184   void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
185 
186   if (mbase == MAP_FAILED) {
187     LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
188     return NULL;
189   }
190   *allocated = msize;
191   UpdateAllocatedSpaceLimits(mbase, msize);
192   return mbase;
193 }
194 
195 
Free(void * address,const size_t size)196 void OS::Free(void* address, const size_t size) {
197   // TODO(1240712): munmap has a return value which is ignored here.
198   int result = munmap(address, size);
199   USE(result);
200   ASSERT(result == 0);
201 }
202 
203 
Sleep(int milliseconds)204 void OS::Sleep(int milliseconds) {
205   useconds_t ms = static_cast<useconds_t>(milliseconds);
206   usleep(1000 * ms);
207 }
208 
209 
Abort()210 void OS::Abort() {
211   // Redirect to std abort to signal abnormal program termination.
212   abort();
213 }
214 
215 
DebugBreak()216 void OS::DebugBreak() {
217   asm("int $3");
218 }
219 
220 
221 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
222  public:
PosixMemoryMappedFile(FILE * file,void * memory,int size)223   PosixMemoryMappedFile(FILE* file, void* memory, int size)
224     : file_(file), memory_(memory), size_(size) { }
225   virtual ~PosixMemoryMappedFile();
memory()226   virtual void* memory() { return memory_; }
size()227   virtual int size() { return size_; }
228  private:
229   FILE* file_;
230   void* memory_;
231   int size_;
232 };
233 
234 
open(const char * name)235 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
236   FILE* file = fopen(name, "r+");
237   if (file == NULL) return NULL;
238 
239   fseek(file, 0, SEEK_END);
240   int size = ftell(file);
241 
242   void* memory =
243       mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
244   return new PosixMemoryMappedFile(file, memory, size);
245 }
246 
247 
create(const char * name,int size,void * initial)248 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
249     void* initial) {
250   FILE* file = fopen(name, "w+");
251   if (file == NULL) return NULL;
252   int result = fwrite(initial, size, 1, file);
253   if (result < 1) {
254     fclose(file);
255     return NULL;
256   }
257   void* memory =
258       mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
259   return new PosixMemoryMappedFile(file, memory, size);
260 }
261 
262 
~PosixMemoryMappedFile()263 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
264   if (memory_) munmap(memory_, size_);
265   fclose(file_);
266 }
267 
268 
LogSharedLibraryAddresses()269 void OS::LogSharedLibraryAddresses() {
270 }
271 
272 
SignalCodeMovingGC()273 void OS::SignalCodeMovingGC() {
274 }
275 
276 
277 struct StackWalker {
278   Vector<OS::StackFrame>& frames;
279   int index;
280 };
281 
282 
StackWalkCallback(uintptr_t pc,int signo,void * data)283 static int StackWalkCallback(uintptr_t pc, int signo, void* data) {
284   struct StackWalker* walker = static_cast<struct StackWalker*>(data);
285   Dl_info info;
286 
287   int i = walker->index;
288 
289   walker->frames[i].address = reinterpret_cast<void*>(pc);
290 
291   // Make sure line termination is in place.
292   walker->frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0';
293 
294   Vector<char> text = MutableCStrVector(walker->frames[i].text,
295                                         OS::kStackWalkMaxTextLen);
296 
297   if (dladdr(reinterpret_cast<void*>(pc), &info) == 0) {
298     OS::SNPrintF(text, "[0x%p]", pc);
299   } else if ((info.dli_fname != NULL && info.dli_sname != NULL)) {
300     // We have symbol info.
301     OS::SNPrintF(text, "%s'%s+0x%x", info.dli_fname, info.dli_sname, pc);
302   } else {
303     // No local symbol info.
304     OS::SNPrintF(text,
305                  "%s'0x%p [0x%p]",
306                  info.dli_fname,
307                  pc - reinterpret_cast<uintptr_t>(info.dli_fbase),
308                  pc);
309   }
310   walker->index++;
311   return 0;
312 }
313 
314 
StackWalk(Vector<OS::StackFrame> frames)315 int OS::StackWalk(Vector<OS::StackFrame> frames) {
316   ucontext_t ctx;
317   struct StackWalker walker = { frames, 0 };
318 
319   if (getcontext(&ctx) < 0) return kStackWalkError;
320 
321   if (!walkcontext(&ctx, StackWalkCallback, &walker)) {
322     return kStackWalkError;
323   }
324 
325   return walker.index;
326 }
327 
328 
329 // Constants used for mmap.
330 static const int kMmapFd = -1;
331 static const int kMmapFdOffset = 0;
332 
333 
VirtualMemory()334 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
335 
VirtualMemory(size_t size)336 VirtualMemory::VirtualMemory(size_t size) {
337   address_ = ReserveRegion(size);
338   size_ = size;
339 }
340 
341 
VirtualMemory(size_t size,size_t alignment)342 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
343     : address_(NULL), size_(0) {
344   ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
345   size_t request_size = RoundUp(size + alignment,
346                                 static_cast<intptr_t>(OS::AllocateAlignment()));
347   void* reservation = mmap(OS::GetRandomMmapAddr(),
348                            request_size,
349                            PROT_NONE,
350                            MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
351                            kMmapFd,
352                            kMmapFdOffset);
353   if (reservation == MAP_FAILED) return;
354 
355   Address base = static_cast<Address>(reservation);
356   Address aligned_base = RoundUp(base, alignment);
357   ASSERT_LE(base, aligned_base);
358 
359   // Unmap extra memory reserved before and after the desired block.
360   if (aligned_base != base) {
361     size_t prefix_size = static_cast<size_t>(aligned_base - base);
362     OS::Free(base, prefix_size);
363     request_size -= prefix_size;
364   }
365 
366   size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
367   ASSERT_LE(aligned_size, request_size);
368 
369   if (aligned_size != request_size) {
370     size_t suffix_size = request_size - aligned_size;
371     OS::Free(aligned_base + aligned_size, suffix_size);
372     request_size -= suffix_size;
373   }
374 
375   ASSERT(aligned_size == request_size);
376 
377   address_ = static_cast<void*>(aligned_base);
378   size_ = aligned_size;
379 }
380 
381 
~VirtualMemory()382 VirtualMemory::~VirtualMemory() {
383   if (IsReserved()) {
384     bool result = ReleaseRegion(address(), size());
385     ASSERT(result);
386     USE(result);
387   }
388 }
389 
390 
IsReserved()391 bool VirtualMemory::IsReserved() {
392   return address_ != NULL;
393 }
394 
395 
Reset()396 void VirtualMemory::Reset() {
397   address_ = NULL;
398   size_ = 0;
399 }
400 
401 
Commit(void * address,size_t size,bool is_executable)402 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
403   return CommitRegion(address, size, is_executable);
404 }
405 
406 
Uncommit(void * address,size_t size)407 bool VirtualMemory::Uncommit(void* address, size_t size) {
408   return UncommitRegion(address, size);
409 }
410 
411 
Guard(void * address)412 bool VirtualMemory::Guard(void* address) {
413   OS::Guard(address, OS::CommitPageSize());
414   return true;
415 }
416 
417 
ReserveRegion(size_t size)418 void* VirtualMemory::ReserveRegion(size_t size) {
419   void* result = mmap(OS::GetRandomMmapAddr(),
420                       size,
421                       PROT_NONE,
422                       MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
423                       kMmapFd,
424                       kMmapFdOffset);
425 
426   if (result == MAP_FAILED) return NULL;
427 
428   return result;
429 }
430 
431 
CommitRegion(void * base,size_t size,bool is_executable)432 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
433   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
434   if (MAP_FAILED == mmap(base,
435                          size,
436                          prot,
437                          MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
438                          kMmapFd,
439                          kMmapFdOffset)) {
440     return false;
441   }
442 
443   UpdateAllocatedSpaceLimits(base, size);
444   return true;
445 }
446 
447 
UncommitRegion(void * base,size_t size)448 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
449   return mmap(base,
450               size,
451               PROT_NONE,
452               MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
453               kMmapFd,
454               kMmapFdOffset) != MAP_FAILED;
455 }
456 
457 
ReleaseRegion(void * base,size_t size)458 bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
459   return munmap(base, size) == 0;
460 }
461 
462 
463 class Thread::PlatformData : public Malloced {
464  public:
PlatformData()465   PlatformData() : thread_(kNoThread) {  }
466 
467   pthread_t thread_;  // Thread handle for pthread.
468 };
469 
470 
Thread(const Options & options)471 Thread::Thread(const Options& options)
472     : data_(new PlatformData()),
473       stack_size_(options.stack_size()) {
474   set_name(options.name());
475 }
476 
477 
~Thread()478 Thread::~Thread() {
479   delete data_;
480 }
481 
482 
ThreadEntry(void * arg)483 static void* ThreadEntry(void* arg) {
484   Thread* thread = reinterpret_cast<Thread*>(arg);
485   // This is also initialized by the first argument to pthread_create() but we
486   // don't know which thread will run first (the original thread or the new
487   // one) so we initialize it here too.
488   thread->data()->thread_ = pthread_self();
489   ASSERT(thread->data()->thread_ != kNoThread);
490   thread->Run();
491   return NULL;
492 }
493 
494 
set_name(const char * name)495 void Thread::set_name(const char* name) {
496   strncpy(name_, name, sizeof(name_));
497   name_[sizeof(name_) - 1] = '\0';
498 }
499 
500 
Start()501 void Thread::Start() {
502   pthread_attr_t* attr_ptr = NULL;
503   pthread_attr_t attr;
504   if (stack_size_ > 0) {
505     pthread_attr_init(&attr);
506     pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
507     attr_ptr = &attr;
508   }
509   pthread_create(&data_->thread_, NULL, ThreadEntry, this);
510   ASSERT(data_->thread_ != kNoThread);
511 }
512 
513 
Join()514 void Thread::Join() {
515   pthread_join(data_->thread_, NULL);
516 }
517 
518 
CreateThreadLocalKey()519 Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
520   pthread_key_t key;
521   int result = pthread_key_create(&key, NULL);
522   USE(result);
523   ASSERT(result == 0);
524   return static_cast<LocalStorageKey>(key);
525 }
526 
527 
DeleteThreadLocalKey(LocalStorageKey key)528 void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
529   pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
530   int result = pthread_key_delete(pthread_key);
531   USE(result);
532   ASSERT(result == 0);
533 }
534 
535 
GetThreadLocal(LocalStorageKey key)536 void* Thread::GetThreadLocal(LocalStorageKey key) {
537   pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
538   return pthread_getspecific(pthread_key);
539 }
540 
541 
SetThreadLocal(LocalStorageKey key,void * value)542 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
543   pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
544   pthread_setspecific(pthread_key, value);
545 }
546 
547 
YieldCPU()548 void Thread::YieldCPU() {
549   sched_yield();
550 }
551 
552 
553 class SolarisMutex : public Mutex {
554  public:
SolarisMutex()555   SolarisMutex() {
556     pthread_mutexattr_t attr;
557     pthread_mutexattr_init(&attr);
558     pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
559     pthread_mutex_init(&mutex_, &attr);
560   }
561 
~SolarisMutex()562   ~SolarisMutex() { pthread_mutex_destroy(&mutex_); }
563 
Lock()564   int Lock() { return pthread_mutex_lock(&mutex_); }
565 
Unlock()566   int Unlock() { return pthread_mutex_unlock(&mutex_); }
567 
TryLock()568   virtual bool TryLock() {
569     int result = pthread_mutex_trylock(&mutex_);
570     // Return false if the lock is busy and locking failed.
571     if (result == EBUSY) {
572       return false;
573     }
574     ASSERT(result == 0);  // Verify no other errors.
575     return true;
576   }
577 
578  private:
579   pthread_mutex_t mutex_;
580 };
581 
582 
CreateMutex()583 Mutex* OS::CreateMutex() {
584   return new SolarisMutex();
585 }
586 
587 
588 class SolarisSemaphore : public Semaphore {
589  public:
SolarisSemaphore(int count)590   explicit SolarisSemaphore(int count) {  sem_init(&sem_, 0, count); }
~SolarisSemaphore()591   virtual ~SolarisSemaphore() { sem_destroy(&sem_); }
592 
593   virtual void Wait();
594   virtual bool Wait(int timeout);
Signal()595   virtual void Signal() { sem_post(&sem_); }
596  private:
597   sem_t sem_;
598 };
599 
600 
Wait()601 void SolarisSemaphore::Wait() {
602   while (true) {
603     int result = sem_wait(&sem_);
604     if (result == 0) return;  // Successfully got semaphore.
605     CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
606   }
607 }
608 
609 
610 #ifndef TIMEVAL_TO_TIMESPEC
611 #define TIMEVAL_TO_TIMESPEC(tv, ts) do {                            \
612     (ts)->tv_sec = (tv)->tv_sec;                                    \
613     (ts)->tv_nsec = (tv)->tv_usec * 1000;                           \
614 } while (false)
615 #endif
616 
617 
618 #ifndef timeradd
619 #define timeradd(a, b, result) \
620   do { \
621     (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
622     (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
623     if ((result)->tv_usec >= 1000000) { \
624       ++(result)->tv_sec; \
625       (result)->tv_usec -= 1000000; \
626     } \
627   } while (0)
628 #endif
629 
630 
Wait(int timeout)631 bool SolarisSemaphore::Wait(int timeout) {
632   const long kOneSecondMicros = 1000000;  // NOLINT
633 
634   // Split timeout into second and nanosecond parts.
635   struct timeval delta;
636   delta.tv_usec = timeout % kOneSecondMicros;
637   delta.tv_sec = timeout / kOneSecondMicros;
638 
639   struct timeval current_time;
640   // Get the current time.
641   if (gettimeofday(&current_time, NULL) == -1) {
642     return false;
643   }
644 
645   // Calculate time for end of timeout.
646   struct timeval end_time;
647   timeradd(&current_time, &delta, &end_time);
648 
649   struct timespec ts;
650   TIMEVAL_TO_TIMESPEC(&end_time, &ts);
651   // Wait for semaphore signalled or timeout.
652   while (true) {
653     int result = sem_timedwait(&sem_, &ts);
654     if (result == 0) return true;  // Successfully got semaphore.
655     if (result == -1 && errno == ETIMEDOUT) return false;  // Timeout.
656     CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
657   }
658 }
659 
660 
CreateSemaphore(int count)661 Semaphore* OS::CreateSemaphore(int count) {
662   return new SolarisSemaphore(count);
663 }
664 
665 
GetThreadID()666 static pthread_t GetThreadID() {
667   return pthread_self();
668 }
669 
ProfilerSignalHandler(int signal,siginfo_t * info,void * context)670 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
671   USE(info);
672   if (signal != SIGPROF) return;
673   Isolate* isolate = Isolate::UncheckedCurrent();
674   if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
675     // We require a fully initialized and entered isolate.
676     return;
677   }
678   if (v8::Locker::IsActive() &&
679       !isolate->thread_manager()->IsLockedByCurrentThread()) {
680     return;
681   }
682 
683   Sampler* sampler = isolate->logger()->sampler();
684   if (sampler == NULL || !sampler->IsActive()) return;
685 
686   TickSample sample_obj;
687   TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
688   if (sample == NULL) sample = &sample_obj;
689 
690   // Extracting the sample from the context is extremely machine dependent.
691   ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
692   mcontext_t& mcontext = ucontext->uc_mcontext;
693   sample->state = isolate->current_vm_state();
694 
695   sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
696   sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
697   sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
698 
699   sampler->SampleStack(sample);
700   sampler->Tick(sample);
701 }
702 
703 class Sampler::PlatformData : public Malloced {
704  public:
PlatformData()705   PlatformData() : vm_tid_(GetThreadID()) {}
706 
vm_tid() const707   pthread_t vm_tid() const { return vm_tid_; }
708 
709  private:
710   pthread_t vm_tid_;
711 };
712 
713 
714 class SignalSender : public Thread {
715  public:
716   enum SleepInterval {
717     HALF_INTERVAL,
718     FULL_INTERVAL
719   };
720 
721   static const int kSignalSenderStackSize = 64 * KB;
722 
SignalSender(int interval)723   explicit SignalSender(int interval)
724       : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
725         interval_(interval) {}
726 
InstallSignalHandler()727   static void InstallSignalHandler() {
728     struct sigaction sa;
729     sa.sa_sigaction = ProfilerSignalHandler;
730     sigemptyset(&sa.sa_mask);
731     sa.sa_flags = SA_RESTART | SA_SIGINFO;
732     signal_handler_installed_ =
733         (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
734   }
735 
RestoreSignalHandler()736   static void RestoreSignalHandler() {
737     if (signal_handler_installed_) {
738       sigaction(SIGPROF, &old_signal_handler_, 0);
739       signal_handler_installed_ = false;
740     }
741   }
742 
AddActiveSampler(Sampler * sampler)743   static void AddActiveSampler(Sampler* sampler) {
744     ScopedLock lock(mutex_.Pointer());
745     SamplerRegistry::AddActiveSampler(sampler);
746     if (instance_ == NULL) {
747       // Start a thread that will send SIGPROF signal to VM threads,
748       // when CPU profiling will be enabled.
749       instance_ = new SignalSender(sampler->interval());
750       instance_->Start();
751     } else {
752       ASSERT(instance_->interval_ == sampler->interval());
753     }
754   }
755 
RemoveActiveSampler(Sampler * sampler)756   static void RemoveActiveSampler(Sampler* sampler) {
757     ScopedLock lock(mutex_.Pointer());
758     SamplerRegistry::RemoveActiveSampler(sampler);
759     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
760       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
761       delete instance_;
762       instance_ = NULL;
763       RestoreSignalHandler();
764     }
765   }
766 
767   // Implement Thread::Run().
Run()768   virtual void Run() {
769     SamplerRegistry::State state;
770     while ((state = SamplerRegistry::GetState()) !=
771            SamplerRegistry::HAS_NO_SAMPLERS) {
772       bool cpu_profiling_enabled =
773           (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
774       bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
775       if (cpu_profiling_enabled && !signal_handler_installed_) {
776         InstallSignalHandler();
777       } else if (!cpu_profiling_enabled && signal_handler_installed_) {
778         RestoreSignalHandler();
779       }
780 
781       // When CPU profiling is enabled both JavaScript and C++ code is
782       // profiled. We must not suspend.
783       if (!cpu_profiling_enabled) {
784         if (rate_limiter_.SuspendIfNecessary()) continue;
785       }
786       if (cpu_profiling_enabled && runtime_profiler_enabled) {
787         if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
788           return;
789         }
790         Sleep(HALF_INTERVAL);
791         if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
792           return;
793         }
794         Sleep(HALF_INTERVAL);
795       } else {
796         if (cpu_profiling_enabled) {
797           if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
798                                                       this)) {
799             return;
800           }
801         }
802         if (runtime_profiler_enabled) {
803           if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
804                                                       NULL)) {
805             return;
806           }
807         }
808         Sleep(FULL_INTERVAL);
809       }
810     }
811   }
812 
DoCpuProfile(Sampler * sampler,void * raw_sender)813   static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
814     if (!sampler->IsProfiling()) return;
815     SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
816     sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
817   }
818 
DoRuntimeProfile(Sampler * sampler,void * ignored)819   static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
820     if (!sampler->isolate()->IsInitialized()) return;
821     sampler->isolate()->runtime_profiler()->NotifyTick();
822   }
823 
SendProfilingSignal(pthread_t tid)824   void SendProfilingSignal(pthread_t tid) {
825     if (!signal_handler_installed_) return;
826     pthread_kill(tid, SIGPROF);
827   }
828 
Sleep(SleepInterval full_or_half)829   void Sleep(SleepInterval full_or_half) {
830     // Convert ms to us and subtract 100 us to compensate delays
831     // occuring during signal delivery.
832     useconds_t interval = interval_ * 1000 - 100;
833     if (full_or_half == HALF_INTERVAL) interval /= 2;
834     int result = usleep(interval);
835 #ifdef DEBUG
836     if (result != 0 && errno != EINTR) {
837       fprintf(stderr,
838               "SignalSender usleep error; interval = %u, errno = %d\n",
839               interval,
840               errno);
841       ASSERT(result == 0 || errno == EINTR);
842     }
843 #endif
844     USE(result);
845   }
846 
847   const int interval_;
848   RuntimeProfilerRateLimiter rate_limiter_;
849 
850   // Protects the process wide state below.
851   static LazyMutex mutex_;
852   static SignalSender* instance_;
853   static bool signal_handler_installed_;
854   static struct sigaction old_signal_handler_;
855 
856  private:
857   DISALLOW_COPY_AND_ASSIGN(SignalSender);
858 };
859 
860 LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
861 SignalSender* SignalSender::instance_ = NULL;
862 struct sigaction SignalSender::old_signal_handler_;
863 bool SignalSender::signal_handler_installed_ = false;
864 
865 
Sampler(Isolate * isolate,int interval)866 Sampler::Sampler(Isolate* isolate, int interval)
867     : isolate_(isolate),
868       interval_(interval),
869       profiling_(false),
870       active_(false),
871       samples_taken_(0) {
872   data_ = new PlatformData;
873 }
874 
875 
~Sampler()876 Sampler::~Sampler() {
877   ASSERT(!IsActive());
878   delete data_;
879 }
880 
881 
Start()882 void Sampler::Start() {
883   ASSERT(!IsActive());
884   SetActive(true);
885   SignalSender::AddActiveSampler(this);
886 }
887 
888 
Stop()889 void Sampler::Stop() {
890   ASSERT(IsActive());
891   SignalSender::RemoveActiveSampler(this);
892   SetActive(false);
893 }
894 
895 } }  // namespace v8::internal
896