• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 // Platform specific code for MacOS goes here. For the POSIX comaptible parts
29 // the implementation is in platform-posix.cc.
30 
31 #include <dlfcn.h>
32 #include <unistd.h>
33 #include <sys/mman.h>
34 #include <mach/mach_init.h>
35 #include <mach-o/dyld.h>
36 #include <mach-o/getsect.h>
37 
38 #include <AvailabilityMacros.h>
39 
40 #include <pthread.h>
41 #include <semaphore.h>
42 #include <signal.h>
43 #include <libkern/OSAtomic.h>
44 #include <mach/mach.h>
45 #include <mach/semaphore.h>
46 #include <mach/task.h>
47 #include <mach/vm_statistics.h>
48 #include <sys/time.h>
49 #include <sys/resource.h>
50 #include <sys/types.h>
51 #include <sys/sysctl.h>
52 #include <stdarg.h>
53 #include <stdlib.h>
54 #include <string.h>
55 #include <errno.h>
56 
57 #undef MAP_TYPE
58 
59 #include "v8.h"
60 
61 #include "platform.h"
62 #include "vm-state-inl.h"
63 
64 // Manually define these here as weak imports, rather than including execinfo.h.
65 // This lets us launch on 10.4 which does not have these calls.
66 extern "C" {
67   extern int backtrace(void**, int) __attribute__((weak_import));
68   extern char** backtrace_symbols(void* const*, int)
69       __attribute__((weak_import));
70   extern void backtrace_symbols_fd(void* const*, int, int)
71       __attribute__((weak_import));
72 }
73 
74 
75 namespace v8 {
76 namespace internal {
77 
78 // 0 is never a valid thread id on MacOSX since a ptread_t is
79 // a pointer.
80 static const pthread_t kNoThread = (pthread_t) 0;
81 
82 
ceiling(double x)83 double ceiling(double x) {
84   // Correct Mac OS X Leopard 'ceil' behavior.
85   if (-1.0 < x && x < 0.0) {
86     return -0.0;
87   } else {
88     return ceil(x);
89   }
90 }
91 
92 
93 static Mutex* limit_mutex = NULL;
94 
95 
Setup()96 void OS::Setup() {
97   // Seed the random number generator.
98   // Convert the current time to a 64-bit integer first, before converting it
99   // to an unsigned. Going directly will cause an overflow and the seed to be
100   // set to all ones. The seed will be identical for different instances that
101   // call this setup code within the same millisecond.
102   uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
103   srandom(static_cast<unsigned int>(seed));
104   limit_mutex = CreateMutex();
105 }
106 
107 
108 // We keep the lowest and highest addresses mapped as a quick way of
109 // determining that pointers are outside the heap (used mostly in assertions
110 // and verification).  The estimate is conservative, ie, not all addresses in
111 // 'allocated' space are actually allocated to our heap.  The range is
112 // [lowest, highest), inclusive on the low and and exclusive on the high end.
113 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
114 static void* highest_ever_allocated = reinterpret_cast<void*>(0);
115 
116 
UpdateAllocatedSpaceLimits(void * address,int size)117 static void UpdateAllocatedSpaceLimits(void* address, int size) {
118   ASSERT(limit_mutex != NULL);
119   ScopedLock lock(limit_mutex);
120 
121   lowest_ever_allocated = Min(lowest_ever_allocated, address);
122   highest_ever_allocated =
123       Max(highest_ever_allocated,
124           reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
125 }
126 
127 
IsOutsideAllocatedSpace(void * address)128 bool OS::IsOutsideAllocatedSpace(void* address) {
129   return address < lowest_ever_allocated || address >= highest_ever_allocated;
130 }
131 
132 
AllocateAlignment()133 size_t OS::AllocateAlignment() {
134   return getpagesize();
135 }
136 
137 
138 // Constants used for mmap.
139 // kMmapFd is used to pass vm_alloc flags to tag the region with the user
140 // defined tag 255 This helps identify V8-allocated regions in memory analysis
141 // tools like vmmap(1).
142 static const int kMmapFd = VM_MAKE_TAG(255);
143 static const off_t kMmapFdOffset = 0;
144 
145 
Allocate(const size_t requested,size_t * allocated,bool is_executable)146 void* OS::Allocate(const size_t requested,
147                    size_t* allocated,
148                    bool is_executable) {
149   const size_t msize = RoundUp(requested, getpagesize());
150   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
151   void* mbase = mmap(NULL, msize, prot,
152                      MAP_PRIVATE | MAP_ANON,
153                      kMmapFd, kMmapFdOffset);
154   if (mbase == MAP_FAILED) {
155     LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
156     return NULL;
157   }
158   *allocated = msize;
159   UpdateAllocatedSpaceLimits(mbase, msize);
160   return mbase;
161 }
162 
163 
Free(void * address,const size_t size)164 void OS::Free(void* address, const size_t size) {
165   // TODO(1240712): munmap has a return value which is ignored here.
166   int result = munmap(address, size);
167   USE(result);
168   ASSERT(result == 0);
169 }
170 
171 
172 #ifdef ENABLE_HEAP_PROTECTION
173 
Protect(void * address,size_t size)174 void OS::Protect(void* address, size_t size) {
175   UNIMPLEMENTED();
176 }
177 
178 
Unprotect(void * address,size_t size,bool is_executable)179 void OS::Unprotect(void* address, size_t size, bool is_executable) {
180   UNIMPLEMENTED();
181 }
182 
183 #endif
184 
185 
Sleep(int milliseconds)186 void OS::Sleep(int milliseconds) {
187   usleep(1000 * milliseconds);
188 }
189 
190 
Abort()191 void OS::Abort() {
192   // Redirect to std abort to signal abnormal program termination
193   abort();
194 }
195 
196 
DebugBreak()197 void OS::DebugBreak() {
198   asm("int $3");
199 }
200 
201 
202 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
203  public:
PosixMemoryMappedFile(FILE * file,void * memory,int size)204   PosixMemoryMappedFile(FILE* file, void* memory, int size)
205     : file_(file), memory_(memory), size_(size) { }
206   virtual ~PosixMemoryMappedFile();
memory()207   virtual void* memory() { return memory_; }
size()208   virtual int size() { return size_; }
209  private:
210   FILE* file_;
211   void* memory_;
212   int size_;
213 };
214 
215 
open(const char * name)216 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
217   FILE* file = fopen(name, "r+");
218   if (file == NULL) return NULL;
219 
220   fseek(file, 0, SEEK_END);
221   int size = ftell(file);
222 
223   void* memory =
224       mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
225   return new PosixMemoryMappedFile(file, memory, size);
226 }
227 
228 
create(const char * name,int size,void * initial)229 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
230     void* initial) {
231   FILE* file = fopen(name, "w+");
232   if (file == NULL) return NULL;
233   int result = fwrite(initial, size, 1, file);
234   if (result < 1) {
235     fclose(file);
236     return NULL;
237   }
238   void* memory =
239       mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
240   return new PosixMemoryMappedFile(file, memory, size);
241 }
242 
243 
~PosixMemoryMappedFile()244 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
245   if (memory_) munmap(memory_, size_);
246   fclose(file_);
247 }
248 
249 
LogSharedLibraryAddresses()250 void OS::LogSharedLibraryAddresses() {
251 #ifdef ENABLE_LOGGING_AND_PROFILING
252   unsigned int images_count = _dyld_image_count();
253   for (unsigned int i = 0; i < images_count; ++i) {
254     const mach_header* header = _dyld_get_image_header(i);
255     if (header == NULL) continue;
256 #if V8_HOST_ARCH_X64
257     uint64_t size;
258     char* code_ptr = getsectdatafromheader_64(
259         reinterpret_cast<const mach_header_64*>(header),
260         SEG_TEXT,
261         SECT_TEXT,
262         &size);
263 #else
264     unsigned int size;
265     char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
266 #endif
267     if (code_ptr == NULL) continue;
268     const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
269     const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
270     LOG(Isolate::Current(),
271         SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
272   }
273 #endif  // ENABLE_LOGGING_AND_PROFILING
274 }
275 
276 
SignalCodeMovingGC()277 void OS::SignalCodeMovingGC() {
278 }
279 
280 
CpuFeaturesImpliedByPlatform()281 uint64_t OS::CpuFeaturesImpliedByPlatform() {
282   // MacOSX requires all these to install so we can assume they are present.
283   // These constants are defined by the CPUid instructions.
284   const uint64_t one = 1;
285   return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID);
286 }
287 
288 
ActivationFrameAlignment()289 int OS::ActivationFrameAlignment() {
290   // OS X activation frames must be 16 byte-aligned; see "Mac OS X ABI
291   // Function Call Guide".
292   return 16;
293 }
294 
295 
ReleaseStore(volatile AtomicWord * ptr,AtomicWord value)296 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
297   OSMemoryBarrier();
298   *ptr = value;
299 }
300 
301 
LocalTimezone(double time)302 const char* OS::LocalTimezone(double time) {
303   if (isnan(time)) return "";
304   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
305   struct tm* t = localtime(&tv);
306   if (NULL == t) return "";
307   return t->tm_zone;
308 }
309 
310 
LocalTimeOffset()311 double OS::LocalTimeOffset() {
312   time_t tv = time(NULL);
313   struct tm* t = localtime(&tv);
314   // tm_gmtoff includes any daylight savings offset, so subtract it.
315   return static_cast<double>(t->tm_gmtoff * msPerSecond -
316                              (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
317 }
318 
319 
StackWalk(Vector<StackFrame> frames)320 int OS::StackWalk(Vector<StackFrame> frames) {
321   // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
322   if (backtrace == NULL)
323     return 0;
324 
325   int frames_size = frames.length();
326   ScopedVector<void*> addresses(frames_size);
327 
328   int frames_count = backtrace(addresses.start(), frames_size);
329 
330   char** symbols = backtrace_symbols(addresses.start(), frames_count);
331   if (symbols == NULL) {
332     return kStackWalkError;
333   }
334 
335   for (int i = 0; i < frames_count; i++) {
336     frames[i].address = addresses[i];
337     // Format a text representation of the frame based on the information
338     // available.
339     SNPrintF(MutableCStrVector(frames[i].text,
340                                kStackWalkMaxTextLen),
341              "%s",
342              symbols[i]);
343     // Make sure line termination is in place.
344     frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
345   }
346 
347   free(symbols);
348 
349   return frames_count;
350 }
351 
352 
353 
354 
VirtualMemory(size_t size)355 VirtualMemory::VirtualMemory(size_t size) {
356   address_ = mmap(NULL, size, PROT_NONE,
357                   MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
358                   kMmapFd, kMmapFdOffset);
359   size_ = size;
360 }
361 
362 
~VirtualMemory()363 VirtualMemory::~VirtualMemory() {
364   if (IsReserved()) {
365     if (0 == munmap(address(), size())) address_ = MAP_FAILED;
366   }
367 }
368 
369 
IsReserved()370 bool VirtualMemory::IsReserved() {
371   return address_ != MAP_FAILED;
372 }
373 
374 
Commit(void * address,size_t size,bool is_executable)375 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
376   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
377   if (MAP_FAILED == mmap(address, size, prot,
378                          MAP_PRIVATE | MAP_ANON | MAP_FIXED,
379                          kMmapFd, kMmapFdOffset)) {
380     return false;
381   }
382 
383   UpdateAllocatedSpaceLimits(address, size);
384   return true;
385 }
386 
387 
Uncommit(void * address,size_t size)388 bool VirtualMemory::Uncommit(void* address, size_t size) {
389   return mmap(address, size, PROT_NONE,
390               MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
391               kMmapFd, kMmapFdOffset) != MAP_FAILED;
392 }
393 
394 
395 class Thread::PlatformData : public Malloced {
396  public:
PlatformData()397   PlatformData() : thread_(kNoThread) {}
398   pthread_t thread_;  // Thread handle for pthread.
399 };
400 
Thread(Isolate * isolate,const Options & options)401 Thread::Thread(Isolate* isolate, const Options& options)
402     : data_(new PlatformData),
403       isolate_(isolate),
404       stack_size_(options.stack_size) {
405   set_name(options.name);
406 }
407 
408 
Thread(Isolate * isolate,const char * name)409 Thread::Thread(Isolate* isolate, const char* name)
410     : data_(new PlatformData),
411       isolate_(isolate),
412       stack_size_(0) {
413   set_name(name);
414 }
415 
416 
~Thread()417 Thread::~Thread() {
418   delete data_;
419 }
420 
421 
SetThreadName(const char * name)422 static void SetThreadName(const char* name) {
423   // pthread_setname_np is only available in 10.6 or later, so test
424   // for it at runtime.
425   int (*dynamic_pthread_setname_np)(const char*);
426   *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
427     dlsym(RTLD_DEFAULT, "pthread_setname_np");
428   if (!dynamic_pthread_setname_np)
429     return;
430 
431   // Mac OS X does not expose the length limit of the name, so hardcode it.
432   static const int kMaxNameLength = 63;
433   USE(kMaxNameLength);
434   ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
435   dynamic_pthread_setname_np(name);
436 }
437 
438 
ThreadEntry(void * arg)439 static void* ThreadEntry(void* arg) {
440   Thread* thread = reinterpret_cast<Thread*>(arg);
441   // This is also initialized by the first argument to pthread_create() but we
442   // don't know which thread will run first (the original thread or the new
443   // one) so we initialize it here too.
444   thread->data()->thread_ = pthread_self();
445   SetThreadName(thread->name());
446   ASSERT(thread->data()->thread_ != kNoThread);
447   Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
448   thread->Run();
449   return NULL;
450 }
451 
452 
set_name(const char * name)453 void Thread::set_name(const char* name) {
454   strncpy(name_, name, sizeof(name_));
455   name_[sizeof(name_) - 1] = '\0';
456 }
457 
458 
Start()459 void Thread::Start() {
460   pthread_attr_t* attr_ptr = NULL;
461   pthread_attr_t attr;
462   if (stack_size_ > 0) {
463     pthread_attr_init(&attr);
464     pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
465     attr_ptr = &attr;
466   }
467   pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
468   ASSERT(data_->thread_ != kNoThread);
469 }
470 
471 
Join()472 void Thread::Join() {
473   pthread_join(data_->thread_, NULL);
474 }
475 
476 
477 #ifdef V8_FAST_TLS_SUPPORTED
478 
479 static Atomic32 tls_base_offset_initialized = 0;
480 intptr_t kMacTlsBaseOffset = 0;
481 
482 // It's safe to do the initialization more that once, but it has to be
483 // done at least once.
InitializeTlsBaseOffset()484 static void InitializeTlsBaseOffset() {
485   const size_t kBufferSize = 128;
486   char buffer[kBufferSize];
487   size_t buffer_size = kBufferSize;
488   int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
489   if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
490     V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
491   }
492   // The buffer now contains a string of the form XX.YY.ZZ, where
493   // XX is the major kernel version component.
494   // Make sure the buffer is 0-terminated.
495   buffer[kBufferSize - 1] = '\0';
496   char* period_pos = strchr(buffer, '.');
497   *period_pos = '\0';
498   int kernel_version_major =
499       static_cast<int>(strtol(buffer, NULL, 10));  // NOLINT
500   // The constants below are taken from pthreads.s from the XNU kernel
501   // sources archive at www.opensource.apple.com.
502   if (kernel_version_major < 11) {
503     // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
504     // same offsets.
505 #if defined(V8_HOST_ARCH_IA32)
506     kMacTlsBaseOffset = 0x48;
507 #else
508     kMacTlsBaseOffset = 0x60;
509 #endif
510   } else {
511     // 11.x.x (Lion) changed the offset.
512     kMacTlsBaseOffset = 0;
513   }
514 
515   Release_Store(&tls_base_offset_initialized, 1);
516 }
517 
CheckFastTls(Thread::LocalStorageKey key)518 static void CheckFastTls(Thread::LocalStorageKey key) {
519   void* expected = reinterpret_cast<void*>(0x1234CAFE);
520   Thread::SetThreadLocal(key, expected);
521   void* actual = Thread::GetExistingThreadLocal(key);
522   if (expected != actual) {
523     V8_Fatal(__FILE__, __LINE__,
524              "V8 failed to initialize fast TLS on current kernel");
525   }
526   Thread::SetThreadLocal(key, NULL);
527 }
528 
529 #endif  // V8_FAST_TLS_SUPPORTED
530 
531 
CreateThreadLocalKey()532 Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
533 #ifdef V8_FAST_TLS_SUPPORTED
534   bool check_fast_tls = false;
535   if (tls_base_offset_initialized == 0) {
536     check_fast_tls = true;
537     InitializeTlsBaseOffset();
538   }
539 #endif
540   pthread_key_t key;
541   int result = pthread_key_create(&key, NULL);
542   USE(result);
543   ASSERT(result == 0);
544   LocalStorageKey typed_key = static_cast<LocalStorageKey>(key);
545 #ifdef V8_FAST_TLS_SUPPORTED
546   // If we just initialized fast TLS support, make sure it works.
547   if (check_fast_tls) CheckFastTls(typed_key);
548 #endif
549   return typed_key;
550 }
551 
552 
DeleteThreadLocalKey(LocalStorageKey key)553 void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
554   pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
555   int result = pthread_key_delete(pthread_key);
556   USE(result);
557   ASSERT(result == 0);
558 }
559 
560 
GetThreadLocal(LocalStorageKey key)561 void* Thread::GetThreadLocal(LocalStorageKey key) {
562   pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
563   return pthread_getspecific(pthread_key);
564 }
565 
566 
SetThreadLocal(LocalStorageKey key,void * value)567 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
568   pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
569   pthread_setspecific(pthread_key, value);
570 }
571 
572 
YieldCPU()573 void Thread::YieldCPU() {
574   sched_yield();
575 }
576 
577 
578 class MacOSMutex : public Mutex {
579  public:
580 
MacOSMutex()581   MacOSMutex() {
582     pthread_mutexattr_t attr;
583     pthread_mutexattr_init(&attr);
584     pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
585     pthread_mutex_init(&mutex_, &attr);
586   }
587 
~MacOSMutex()588   virtual ~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
589 
Lock()590   virtual int Lock() { return pthread_mutex_lock(&mutex_); }
Unlock()591   virtual int Unlock() { return pthread_mutex_unlock(&mutex_); }
592 
TryLock()593   virtual bool TryLock() {
594     int result = pthread_mutex_trylock(&mutex_);
595     // Return false if the lock is busy and locking failed.
596     if (result == EBUSY) {
597       return false;
598     }
599     ASSERT(result == 0);  // Verify no other errors.
600     return true;
601   }
602 
603  private:
604   pthread_mutex_t mutex_;
605 };
606 
607 
CreateMutex()608 Mutex* OS::CreateMutex() {
609   return new MacOSMutex();
610 }
611 
612 
613 class MacOSSemaphore : public Semaphore {
614  public:
MacOSSemaphore(int count)615   explicit MacOSSemaphore(int count) {
616     semaphore_create(mach_task_self(), &semaphore_, SYNC_POLICY_FIFO, count);
617   }
618 
~MacOSSemaphore()619   ~MacOSSemaphore() {
620     semaphore_destroy(mach_task_self(), semaphore_);
621   }
622 
623   // The MacOS mach semaphore documentation claims it does not have spurious
624   // wakeups, the way pthreads semaphores do.  So the code from the linux
625   // platform is not needed here.
Wait()626   void Wait() { semaphore_wait(semaphore_); }
627 
628   bool Wait(int timeout);
629 
Signal()630   void Signal() { semaphore_signal(semaphore_); }
631 
632  private:
633   semaphore_t semaphore_;
634 };
635 
636 
Wait(int timeout)637 bool MacOSSemaphore::Wait(int timeout) {
638   mach_timespec_t ts;
639   ts.tv_sec = timeout / 1000000;
640   ts.tv_nsec = (timeout % 1000000) * 1000;
641   return semaphore_timedwait(semaphore_, ts) != KERN_OPERATION_TIMED_OUT;
642 }
643 
644 
CreateSemaphore(int count)645 Semaphore* OS::CreateSemaphore(int count) {
646   return new MacOSSemaphore(count);
647 }
648 
649 
650 #ifdef ENABLE_LOGGING_AND_PROFILING
651 
652 class Sampler::PlatformData : public Malloced {
653  public:
PlatformData()654   PlatformData() : profiled_thread_(mach_thread_self()) {}
655 
~PlatformData()656   ~PlatformData() {
657     // Deallocate Mach port for thread.
658     mach_port_deallocate(mach_task_self(), profiled_thread_);
659   }
660 
profiled_thread()661   thread_act_t profiled_thread() { return profiled_thread_; }
662 
663  private:
664   // Note: for profiled_thread_ Mach primitives are used instead of PThread's
665   // because the latter doesn't provide thread manipulation primitives required.
666   // For details, consult "Mac OS X Internals" book, Section 7.3.
667   thread_act_t profiled_thread_;
668 };
669 
670 class SamplerThread : public Thread {
671  public:
SamplerThread(int interval)672   explicit SamplerThread(int interval)
673       : Thread(NULL, "SamplerThread"),
674         interval_(interval) {}
675 
AddActiveSampler(Sampler * sampler)676   static void AddActiveSampler(Sampler* sampler) {
677     ScopedLock lock(mutex_);
678     SamplerRegistry::AddActiveSampler(sampler);
679     if (instance_ == NULL) {
680       instance_ = new SamplerThread(sampler->interval());
681       instance_->Start();
682     } else {
683       ASSERT(instance_->interval_ == sampler->interval());
684     }
685   }
686 
RemoveActiveSampler(Sampler * sampler)687   static void RemoveActiveSampler(Sampler* sampler) {
688     ScopedLock lock(mutex_);
689     SamplerRegistry::RemoveActiveSampler(sampler);
690     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
691       RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
692       instance_->Join();
693       delete instance_;
694       instance_ = NULL;
695     }
696   }
697 
698   // Implement Thread::Run().
Run()699   virtual void Run() {
700     SamplerRegistry::State state;
701     while ((state = SamplerRegistry::GetState()) !=
702            SamplerRegistry::HAS_NO_SAMPLERS) {
703       bool cpu_profiling_enabled =
704           (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
705       bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
706       // When CPU profiling is enabled both JavaScript and C++ code is
707       // profiled. We must not suspend.
708       if (!cpu_profiling_enabled) {
709         if (rate_limiter_.SuspendIfNecessary()) continue;
710       }
711       if (cpu_profiling_enabled) {
712         if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
713           return;
714         }
715       }
716       if (runtime_profiler_enabled) {
717         if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
718           return;
719         }
720       }
721       OS::Sleep(interval_);
722     }
723   }
724 
DoCpuProfile(Sampler * sampler,void * raw_sampler_thread)725   static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
726     if (!sampler->isolate()->IsInitialized()) return;
727     if (!sampler->IsProfiling()) return;
728     SamplerThread* sampler_thread =
729         reinterpret_cast<SamplerThread*>(raw_sampler_thread);
730     sampler_thread->SampleContext(sampler);
731   }
732 
DoRuntimeProfile(Sampler * sampler,void * ignored)733   static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
734     if (!sampler->isolate()->IsInitialized()) return;
735     sampler->isolate()->runtime_profiler()->NotifyTick();
736   }
737 
SampleContext(Sampler * sampler)738   void SampleContext(Sampler* sampler) {
739     thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
740     TickSample sample_obj;
741     TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
742     if (sample == NULL) sample = &sample_obj;
743 
744     if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
745 
746 #if V8_HOST_ARCH_X64
747     thread_state_flavor_t flavor = x86_THREAD_STATE64;
748     x86_thread_state64_t state;
749     mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
750 #if __DARWIN_UNIX03
751 #define REGISTER_FIELD(name) __r ## name
752 #else
753 #define REGISTER_FIELD(name) r ## name
754 #endif  // __DARWIN_UNIX03
755 #elif V8_HOST_ARCH_IA32
756     thread_state_flavor_t flavor = i386_THREAD_STATE;
757     i386_thread_state_t state;
758     mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
759 #if __DARWIN_UNIX03
760 #define REGISTER_FIELD(name) __e ## name
761 #else
762 #define REGISTER_FIELD(name) e ## name
763 #endif  // __DARWIN_UNIX03
764 #else
765 #error Unsupported Mac OS X host architecture.
766 #endif  // V8_HOST_ARCH
767 
768     if (thread_get_state(profiled_thread,
769                          flavor,
770                          reinterpret_cast<natural_t*>(&state),
771                          &count) == KERN_SUCCESS) {
772       sample->state = sampler->isolate()->current_vm_state();
773       sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
774       sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
775       sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
776       sampler->SampleStack(sample);
777       sampler->Tick(sample);
778     }
779     thread_resume(profiled_thread);
780   }
781 
782   const int interval_;
783   RuntimeProfilerRateLimiter rate_limiter_;
784 
785   // Protects the process wide state below.
786   static Mutex* mutex_;
787   static SamplerThread* instance_;
788 
789   DISALLOW_COPY_AND_ASSIGN(SamplerThread);
790 };
791 
792 #undef REGISTER_FIELD
793 
794 
795 Mutex* SamplerThread::mutex_ = OS::CreateMutex();
796 SamplerThread* SamplerThread::instance_ = NULL;
797 
798 
Sampler(Isolate * isolate,int interval)799 Sampler::Sampler(Isolate* isolate, int interval)
800     : isolate_(isolate),
801       interval_(interval),
802       profiling_(false),
803       active_(false),
804       samples_taken_(0) {
805   data_ = new PlatformData;
806 }
807 
808 
~Sampler()809 Sampler::~Sampler() {
810   ASSERT(!IsActive());
811   delete data_;
812 }
813 
814 
Start()815 void Sampler::Start() {
816   ASSERT(!IsActive());
817   SetActive(true);
818   SamplerThread::AddActiveSampler(this);
819 }
820 
821 
Stop()822 void Sampler::Stop() {
823   ASSERT(IsActive());
824   SamplerThread::RemoveActiveSampler(this);
825   SetActive(false);
826 }
827 
828 #endif  // ENABLE_LOGGING_AND_PROFILING
829 
830 } }  // namespace v8::internal
831