1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 // Platform specific code for Linux goes here. For the POSIX comaptible parts
29 // the implementation is in platform-posix.cc.
30
31 #include <pthread.h>
32 #include <semaphore.h>
33 #include <signal.h>
34 #include <sys/time.h>
35 #include <sys/resource.h>
36 #include <sys/types.h>
37 #include <stdlib.h>
38
39 // Ubuntu Dapper requires memory pages to be marked as
40 // executable. Otherwise, OS raises an exception when executing code
41 // in that page.
42 #include <sys/types.h> // mmap & munmap
43 #include <sys/mman.h> // mmap & munmap
44 #include <sys/stat.h> // open
45 #include <fcntl.h> // open
46 #include <unistd.h> // sysconf
47 #ifdef __GLIBC__
48 #include <execinfo.h> // backtrace, backtrace_symbols
49 #endif // def __GLIBC__
50 #include <strings.h> // index
51 #include <errno.h>
52 #include <stdarg.h>
53
54 #undef MAP_TYPE
55
56 #include "v8.h"
57
58 #include "platform.h"
59 #include "top.h"
60 #include "v8threads.h"
61
62
63 namespace v8 {
64 namespace internal {
65
66 // 0 is never a valid thread id on Linux since tids and pids share a
67 // name space and pid 0 is reserved (see man 2 kill).
68 static const pthread_t kNoThread = (pthread_t) 0;
69
70
ceiling(double x)71 double ceiling(double x) {
72 return ceil(x);
73 }
74
75
Setup()76 void OS::Setup() {
77 // Seed the random number generator.
78 // Convert the current time to a 64-bit integer first, before converting it
79 // to an unsigned. Going directly can cause an overflow and the seed to be
80 // set to all ones. The seed will be identical for different instances that
81 // call this setup code within the same millisecond.
82 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
83 srandom(static_cast<unsigned int>(seed));
84 }
85
86
nan_value()87 double OS::nan_value() {
88 return NAN;
89 }
90
91
ActivationFrameAlignment()92 int OS::ActivationFrameAlignment() {
93 #ifdef V8_TARGET_ARCH_ARM
94 // On EABI ARM targets this is required for fp correctness in the
95 // runtime system.
96 return 8;
97 #else
98 // With gcc 4.4 the tree vectorization optimiser can generate code
99 // that requires 16 byte alignment such as movdqa on x86.
100 return 16;
101 #endif
102 }
103
104
105 // We keep the lowest and highest addresses mapped as a quick way of
106 // determining that pointers are outside the heap (used mostly in assertions
107 // and verification). The estimate is conservative, ie, not all addresses in
108 // 'allocated' space are actually allocated to our heap. The range is
109 // [lowest, highest), inclusive on the low and and exclusive on the high end.
110 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
111 static void* highest_ever_allocated = reinterpret_cast<void*>(0);
112
113
UpdateAllocatedSpaceLimits(void * address,int size)114 static void UpdateAllocatedSpaceLimits(void* address, int size) {
115 lowest_ever_allocated = Min(lowest_ever_allocated, address);
116 highest_ever_allocated =
117 Max(highest_ever_allocated,
118 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
119 }
120
121
IsOutsideAllocatedSpace(void * address)122 bool OS::IsOutsideAllocatedSpace(void* address) {
123 return address < lowest_ever_allocated || address >= highest_ever_allocated;
124 }
125
126
AllocateAlignment()127 size_t OS::AllocateAlignment() {
128 return sysconf(_SC_PAGESIZE);
129 }
130
131
Allocate(const size_t requested,size_t * allocated,bool is_executable)132 void* OS::Allocate(const size_t requested,
133 size_t* allocated,
134 bool is_executable) {
135 const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
136 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
137 void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
138 if (mbase == MAP_FAILED) {
139 LOG(StringEvent("OS::Allocate", "mmap failed"));
140 return NULL;
141 }
142 *allocated = msize;
143 UpdateAllocatedSpaceLimits(mbase, msize);
144 return mbase;
145 }
146
147
Free(void * address,const size_t size)148 void OS::Free(void* address, const size_t size) {
149 // TODO(1240712): munmap has a return value which is ignored here.
150 munmap(address, size);
151 }
152
153
154 #ifdef ENABLE_HEAP_PROTECTION
155
Protect(void * address,size_t size)156 void OS::Protect(void* address, size_t size) {
157 // TODO(1240712): mprotect has a return value which is ignored here.
158 mprotect(address, size, PROT_READ);
159 }
160
161
Unprotect(void * address,size_t size,bool is_executable)162 void OS::Unprotect(void* address, size_t size, bool is_executable) {
163 // TODO(1240712): mprotect has a return value which is ignored here.
164 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
165 mprotect(address, size, prot);
166 }
167
168 #endif
169
170
Sleep(int milliseconds)171 void OS::Sleep(int milliseconds) {
172 unsigned int ms = static_cast<unsigned int>(milliseconds);
173 usleep(1000 * ms);
174 }
175
176
Abort()177 void OS::Abort() {
178 // Redirect to std abort to signal abnormal program termination.
179 abort();
180 }
181
182
DebugBreak()183 void OS::DebugBreak() {
184 // TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
185 // which is the architecture of generated code).
186 #if defined(__arm__) || defined(__thumb__)
187 asm("bkpt 0");
188 #else
189 asm("int $3");
190 #endif
191 }
192
193
194 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
195 public:
PosixMemoryMappedFile(FILE * file,void * memory,int size)196 PosixMemoryMappedFile(FILE* file, void* memory, int size)
197 : file_(file), memory_(memory), size_(size) { }
198 virtual ~PosixMemoryMappedFile();
memory()199 virtual void* memory() { return memory_; }
200 private:
201 FILE* file_;
202 void* memory_;
203 int size_;
204 };
205
206
create(const char * name,int size,void * initial)207 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
208 void* initial) {
209 FILE* file = fopen(name, "w+");
210 if (file == NULL) return NULL;
211 int result = fwrite(initial, size, 1, file);
212 if (result < 1) {
213 fclose(file);
214 return NULL;
215 }
216 void* memory =
217 mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
218 return new PosixMemoryMappedFile(file, memory, size);
219 }
220
221
~PosixMemoryMappedFile()222 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
223 if (memory_) munmap(memory_, size_);
224 fclose(file_);
225 }
226
227
LogSharedLibraryAddresses()228 void OS::LogSharedLibraryAddresses() {
229 #ifdef ENABLE_LOGGING_AND_PROFILING
230 // This function assumes that the layout of the file is as follows:
231 // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
232 // If we encounter an unexpected situation we abort scanning further entries.
233 FILE *fp = fopen("/proc/self/maps", "r");
234 if (fp == NULL) return;
235
236 // Allocate enough room to be able to store a full file name.
237 const int kLibNameLen = FILENAME_MAX + 1;
238 char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
239
240 // This loop will terminate once the scanning hits an EOF.
241 while (true) {
242 uintptr_t start, end;
243 char attr_r, attr_w, attr_x, attr_p;
244 // Parse the addresses and permission bits at the beginning of the line.
245 if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
246 if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
247
248 int c;
249 if (attr_r == 'r' && attr_x == 'x') {
250 // Found a readable and executable entry. Skip characters until we reach
251 // the beginning of the filename or the end of the line.
252 do {
253 c = getc(fp);
254 } while ((c != EOF) && (c != '\n') && (c != '/'));
255 if (c == EOF) break; // EOF: Was unexpected, just exit.
256
257 // Process the filename if found.
258 if (c == '/') {
259 ungetc(c, fp); // Push the '/' back into the stream to be read below.
260
261 // Read to the end of the line. Exit if the read fails.
262 if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
263
264 // Drop the newline character read by fgets. We do not need to check
265 // for a zero-length string because we know that we at least read the
266 // '/' character.
267 lib_name[strlen(lib_name) - 1] = '\0';
268 } else {
269 // No library name found, just record the raw address range.
270 snprintf(lib_name, kLibNameLen,
271 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
272 }
273 LOG(SharedLibraryEvent(lib_name, start, end));
274 } else {
275 // Entry not describing executable data. Skip to end of line to setup
276 // reading the next entry.
277 do {
278 c = getc(fp);
279 } while ((c != EOF) && (c != '\n'));
280 if (c == EOF) break;
281 }
282 }
283 free(lib_name);
284 fclose(fp);
285 #endif
286 }
287
288
StackWalk(Vector<OS::StackFrame> frames)289 int OS::StackWalk(Vector<OS::StackFrame> frames) {
290 // backtrace is a glibc extension.
291 #ifdef __GLIBC__
292 int frames_size = frames.length();
293 void** addresses = NewArray<void*>(frames_size);
294
295 int frames_count = backtrace(addresses, frames_size);
296
297 char** symbols;
298 symbols = backtrace_symbols(addresses, frames_count);
299 if (symbols == NULL) {
300 DeleteArray(addresses);
301 return kStackWalkError;
302 }
303
304 for (int i = 0; i < frames_count; i++) {
305 frames[i].address = addresses[i];
306 // Format a text representation of the frame based on the information
307 // available.
308 SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
309 "%s",
310 symbols[i]);
311 // Make sure line termination is in place.
312 frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
313 }
314
315 DeleteArray(addresses);
316 free(symbols);
317
318 return frames_count;
319 #else // ndef __GLIBC__
320 return 0;
321 #endif // ndef __GLIBC__
322 }
323
324
325 // Constants used for mmap.
326 static const int kMmapFd = -1;
327 static const int kMmapFdOffset = 0;
328
329
VirtualMemory(size_t size)330 VirtualMemory::VirtualMemory(size_t size) {
331 address_ = mmap(NULL, size, PROT_NONE,
332 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
333 kMmapFd, kMmapFdOffset);
334 size_ = size;
335 }
336
337
~VirtualMemory()338 VirtualMemory::~VirtualMemory() {
339 if (IsReserved()) {
340 if (0 == munmap(address(), size())) address_ = MAP_FAILED;
341 }
342 }
343
344
IsReserved()345 bool VirtualMemory::IsReserved() {
346 return address_ != MAP_FAILED;
347 }
348
349
Commit(void * address,size_t size,bool is_executable)350 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
351 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
352 if (MAP_FAILED == mmap(address, size, prot,
353 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
354 kMmapFd, kMmapFdOffset)) {
355 return false;
356 }
357
358 UpdateAllocatedSpaceLimits(address, size);
359 return true;
360 }
361
362
Uncommit(void * address,size_t size)363 bool VirtualMemory::Uncommit(void* address, size_t size) {
364 return mmap(address, size, PROT_NONE,
365 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
366 kMmapFd, kMmapFdOffset) != MAP_FAILED;
367 }
368
369
370 class ThreadHandle::PlatformData : public Malloced {
371 public:
PlatformData(ThreadHandle::Kind kind)372 explicit PlatformData(ThreadHandle::Kind kind) {
373 Initialize(kind);
374 }
375
Initialize(ThreadHandle::Kind kind)376 void Initialize(ThreadHandle::Kind kind) {
377 switch (kind) {
378 case ThreadHandle::SELF: thread_ = pthread_self(); break;
379 case ThreadHandle::INVALID: thread_ = kNoThread; break;
380 }
381 }
382
383 pthread_t thread_; // Thread handle for pthread.
384 };
385
386
ThreadHandle(Kind kind)387 ThreadHandle::ThreadHandle(Kind kind) {
388 data_ = new PlatformData(kind);
389 }
390
391
Initialize(ThreadHandle::Kind kind)392 void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
393 data_->Initialize(kind);
394 }
395
396
~ThreadHandle()397 ThreadHandle::~ThreadHandle() {
398 delete data_;
399 }
400
401
IsSelf() const402 bool ThreadHandle::IsSelf() const {
403 return pthread_equal(data_->thread_, pthread_self());
404 }
405
406
IsValid() const407 bool ThreadHandle::IsValid() const {
408 return data_->thread_ != kNoThread;
409 }
410
411
Thread()412 Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
413 }
414
415
~Thread()416 Thread::~Thread() {
417 }
418
419
ThreadEntry(void * arg)420 static void* ThreadEntry(void* arg) {
421 Thread* thread = reinterpret_cast<Thread*>(arg);
422 // This is also initialized by the first argument to pthread_create() but we
423 // don't know which thread will run first (the original thread or the new
424 // one) so we initialize it here too.
425 thread->thread_handle_data()->thread_ = pthread_self();
426 ASSERT(thread->IsValid());
427 thread->Run();
428 return NULL;
429 }
430
431
Start()432 void Thread::Start() {
433 pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
434 ASSERT(IsValid());
435 }
436
437
Join()438 void Thread::Join() {
439 pthread_join(thread_handle_data()->thread_, NULL);
440 }
441
442
CreateThreadLocalKey()443 Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
444 pthread_key_t key;
445 int result = pthread_key_create(&key, NULL);
446 USE(result);
447 ASSERT(result == 0);
448 return static_cast<LocalStorageKey>(key);
449 }
450
451
DeleteThreadLocalKey(LocalStorageKey key)452 void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
453 pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
454 int result = pthread_key_delete(pthread_key);
455 USE(result);
456 ASSERT(result == 0);
457 }
458
459
GetThreadLocal(LocalStorageKey key)460 void* Thread::GetThreadLocal(LocalStorageKey key) {
461 pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
462 return pthread_getspecific(pthread_key);
463 }
464
465
SetThreadLocal(LocalStorageKey key,void * value)466 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
467 pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
468 pthread_setspecific(pthread_key, value);
469 }
470
471
YieldCPU()472 void Thread::YieldCPU() {
473 sched_yield();
474 }
475
476
477 class LinuxMutex : public Mutex {
478 public:
479
LinuxMutex()480 LinuxMutex() {
481 pthread_mutexattr_t attrs;
482 int result = pthread_mutexattr_init(&attrs);
483 ASSERT(result == 0);
484 result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
485 ASSERT(result == 0);
486 result = pthread_mutex_init(&mutex_, &attrs);
487 ASSERT(result == 0);
488 }
489
~LinuxMutex()490 virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
491
Lock()492 virtual int Lock() {
493 int result = pthread_mutex_lock(&mutex_);
494 return result;
495 }
496
Unlock()497 virtual int Unlock() {
498 int result = pthread_mutex_unlock(&mutex_);
499 return result;
500 }
501
502 private:
503 pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
504 };
505
506
CreateMutex()507 Mutex* OS::CreateMutex() {
508 return new LinuxMutex();
509 }
510
511
512 class LinuxSemaphore : public Semaphore {
513 public:
LinuxSemaphore(int count)514 explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); }
~LinuxSemaphore()515 virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
516
517 virtual void Wait();
518 virtual bool Wait(int timeout);
Signal()519 virtual void Signal() { sem_post(&sem_); }
520 private:
521 sem_t sem_;
522 };
523
524
Wait()525 void LinuxSemaphore::Wait() {
526 while (true) {
527 int result = sem_wait(&sem_);
528 if (result == 0) return; // Successfully got semaphore.
529 CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
530 }
531 }
532
533
534 #ifndef TIMEVAL_TO_TIMESPEC
535 #define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
536 (ts)->tv_sec = (tv)->tv_sec; \
537 (ts)->tv_nsec = (tv)->tv_usec * 1000; \
538 } while (false)
539 #endif
540
541
Wait(int timeout)542 bool LinuxSemaphore::Wait(int timeout) {
543 const long kOneSecondMicros = 1000000; // NOLINT
544
545 // Split timeout into second and nanosecond parts.
546 struct timeval delta;
547 delta.tv_usec = timeout % kOneSecondMicros;
548 delta.tv_sec = timeout / kOneSecondMicros;
549
550 struct timeval current_time;
551 // Get the current time.
552 if (gettimeofday(¤t_time, NULL) == -1) {
553 return false;
554 }
555
556 // Calculate time for end of timeout.
557 struct timeval end_time;
558 timeradd(¤t_time, &delta, &end_time);
559
560 struct timespec ts;
561 TIMEVAL_TO_TIMESPEC(&end_time, &ts);
562 // Wait for semaphore signalled or timeout.
563 while (true) {
564 int result = sem_timedwait(&sem_, &ts);
565 if (result == 0) return true; // Successfully got semaphore.
566 if (result > 0) {
567 // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
568 errno = result;
569 result = -1;
570 }
571 if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
572 CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
573 }
574 }
575
576
CreateSemaphore(int count)577 Semaphore* OS::CreateSemaphore(int count) {
578 return new LinuxSemaphore(count);
579 }
580
581
582 #ifdef ENABLE_LOGGING_AND_PROFILING
583
584 static Sampler* active_sampler_ = NULL;
585 static pthread_t vm_thread_ = 0;
586
587
588 #if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
589 // Android runs a fairly new Linux kernel, so signal info is there,
590 // but the C library doesn't have the structs defined.
591
592 struct sigcontext {
593 uint32_t trap_no;
594 uint32_t error_code;
595 uint32_t oldmask;
596 uint32_t gregs[16];
597 uint32_t arm_cpsr;
598 uint32_t fault_address;
599 };
600 typedef uint32_t __sigset_t;
601 typedef struct sigcontext mcontext_t;
602 typedef struct ucontext {
603 uint32_t uc_flags;
604 struct ucontext *uc_link;
605 stack_t uc_stack;
606 mcontext_t uc_mcontext;
607 __sigset_t uc_sigmask;
608 } ucontext_t;
609 enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
610
611 #endif
612
613
614 // A function that determines if a signal handler is called in the context
615 // of a VM thread.
616 //
617 // The problem is that SIGPROF signal can be delivered to an arbitrary thread
618 // (see http://code.google.com/p/google-perftools/issues/detail?id=106#c2)
619 // So, if the signal is being handled in the context of a non-VM thread,
620 // it means that the VM thread is running, and trying to sample its stack can
621 // cause a crash.
IsVmThread()622 static inline bool IsVmThread() {
623 // In the case of a single VM thread, this check is enough.
624 if (pthread_equal(pthread_self(), vm_thread_)) return true;
625 // If there are multiple threads that use VM, they must have a thread id
626 // stored in TLS. To verify that the thread is really executing VM,
627 // we check Top's data. Having that ThreadManager::RestoreThread first
628 // restores ThreadLocalTop from TLS, and only then erases the TLS value,
629 // reading Top::thread_id() should not be affected by races.
630 if (ThreadManager::HasId() && !ThreadManager::IsArchived() &&
631 ThreadManager::CurrentId() == Top::thread_id()) {
632 return true;
633 }
634 return false;
635 }
636
637
ProfilerSignalHandler(int signal,siginfo_t * info,void * context)638 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
639 USE(info);
640 if (signal != SIGPROF) return;
641 if (active_sampler_ == NULL) return;
642
643 TickSample sample;
644
645 // If profiling, we extract the current pc and sp.
646 if (active_sampler_->IsProfiling()) {
647 // Extracting the sample from the context is extremely machine dependent.
648 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
649 mcontext_t& mcontext = ucontext->uc_mcontext;
650 #if V8_HOST_ARCH_IA32
651 sample.pc = mcontext.gregs[REG_EIP];
652 sample.sp = mcontext.gregs[REG_ESP];
653 sample.fp = mcontext.gregs[REG_EBP];
654 #elif V8_HOST_ARCH_X64
655 sample.pc = mcontext.gregs[REG_RIP];
656 sample.sp = mcontext.gregs[REG_RSP];
657 sample.fp = mcontext.gregs[REG_RBP];
658 #elif V8_HOST_ARCH_ARM
659 // An undefined macro evaluates to 0, so this applies to Android's Bionic also.
660 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
661 sample.pc = mcontext.gregs[R15];
662 sample.sp = mcontext.gregs[R13];
663 sample.fp = mcontext.gregs[R11];
664 #else
665 sample.pc = mcontext.arm_pc;
666 sample.sp = mcontext.arm_sp;
667 sample.fp = mcontext.arm_fp;
668 #endif
669 #endif
670 if (IsVmThread())
671 active_sampler_->SampleStack(&sample);
672 }
673
674 // We always sample the VM state.
675 sample.state = Logger::state();
676
677 active_sampler_->Tick(&sample);
678 }
679
680
681 class Sampler::PlatformData : public Malloced {
682 public:
PlatformData()683 PlatformData() {
684 signal_handler_installed_ = false;
685 }
686
687 bool signal_handler_installed_;
688 struct sigaction old_signal_handler_;
689 struct itimerval old_timer_value_;
690 };
691
692
Sampler(int interval,bool profiling)693 Sampler::Sampler(int interval, bool profiling)
694 : interval_(interval), profiling_(profiling), active_(false) {
695 data_ = new PlatformData();
696 }
697
698
~Sampler()699 Sampler::~Sampler() {
700 delete data_;
701 }
702
703
Start()704 void Sampler::Start() {
705 // There can only be one active sampler at the time on POSIX
706 // platforms.
707 if (active_sampler_ != NULL) return;
708
709 vm_thread_ = pthread_self();
710
711 // Request profiling signals.
712 struct sigaction sa;
713 sa.sa_sigaction = ProfilerSignalHandler;
714 sigemptyset(&sa.sa_mask);
715 sa.sa_flags = SA_SIGINFO;
716 if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
717 data_->signal_handler_installed_ = true;
718
719 // Set the itimer to generate a tick for each interval.
720 itimerval itimer;
721 itimer.it_interval.tv_sec = interval_ / 1000;
722 itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
723 itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
724 itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
725 setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
726
727 // Set this sampler as the active sampler.
728 active_sampler_ = this;
729 active_ = true;
730 }
731
732
Stop()733 void Sampler::Stop() {
734 // Restore old signal handler
735 if (data_->signal_handler_installed_) {
736 setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
737 sigaction(SIGPROF, &data_->old_signal_handler_, 0);
738 data_->signal_handler_installed_ = false;
739 }
740
741 // This sampler is no longer the active sampler.
742 active_sampler_ = NULL;
743 active_ = false;
744 }
745
746
747 #endif // ENABLE_LOGGING_AND_PROFILING
748
749 } } // namespace v8::internal
750