1 // Copyright 2011 the V8 project authors. All rights reserved. 2 // Redistribution and use in source and binary forms, with or without 3 // modification, are permitted provided that the following conditions are 4 // met: 5 // 6 // * Redistributions of source code must retain the above copyright 7 // notice, this list of conditions and the following disclaimer. 8 // * Redistributions in binary form must reproduce the above 9 // copyright notice, this list of conditions and the following 10 // disclaimer in the documentation and/or other materials provided 11 // with the distribution. 12 // * Neither the name of Google Inc. nor the names of its 13 // contributors may be used to endorse or promote products derived 14 // from this software without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 // This module contains the platform-specific code. This make the rest of the 29 // code less dependent on operating system, compilers and runtime libraries. 30 // This module does specifically not deal with differences between different 31 // processor architecture. 32 // The platform classes have the same definition for all platforms. The 33 // implementation for a particular platform is put in platform_<os>.cc. 34 // The build system then uses the implementation for the target platform. 35 // 36 // This design has been chosen because it is simple and fast. Alternatively, 37 // the platform dependent classes could have been implemented using abstract 38 // superclasses with virtual methods and having specializations for each 39 // platform. This design was rejected because it was more complicated and 40 // slower. It would require factory methods for selecting the right 41 // implementation and the overhead of virtual methods for performance 42 // sensitive like mutex locking/unlocking. 43 44 #ifndef V8_PLATFORM_H_ 45 #define V8_PLATFORM_H_ 46 47 #define V8_INFINITY INFINITY 48 49 // Windows specific stuff. 50 #ifdef WIN32 51 52 // Microsoft Visual C++ specific stuff. 53 #ifdef _MSC_VER 54 55 enum { 56 FP_NAN, 57 FP_INFINITE, 58 FP_ZERO, 59 FP_SUBNORMAL, 60 FP_NORMAL 61 }; 62 63 #undef V8_INFINITY 64 #define V8_INFINITY HUGE_VAL 65 66 namespace v8 { 67 namespace internal { 68 int isfinite(double x); 69 } } 70 int isnan(double x); 71 int isinf(double x); 72 int isless(double x, double y); 73 int isgreater(double x, double y); 74 int fpclassify(double x); 75 int signbit(double x); 76 77 int strncasecmp(const char* s1, const char* s2, int n); 78 79 #endif // _MSC_VER 80 81 // Random is missing on both Visual Studio and MinGW. 82 int random(); 83 84 #endif // WIN32 85 86 87 #ifdef __sun 88 # ifndef signbit 89 int signbit(double x); 90 # endif 91 #endif 92 93 94 // GCC specific stuff 95 #ifdef __GNUC__ 96 97 // Needed for va_list on at least MinGW and Android. 98 #include <stdarg.h> 99 100 #define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100) 101 102 // Unfortunately, the INFINITY macro cannot be used with the '-pedantic' 103 // warning flag and certain versions of GCC due to a bug: 104 // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931 105 // For now, we use the more involved template-based version from <limits>, but 106 // only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x) 107 // __GNUC_PREREQ is not defined in GCC for Mac OS X, so we define our own macro 108 #if __GNUC_VERSION__ >= 29600 && __GNUC_VERSION__ < 40100 109 #include <limits> 110 #undef V8_INFINITY 111 #define V8_INFINITY std::numeric_limits<double>::infinity() 112 #endif 113 114 #endif // __GNUC__ 115 116 #include "atomicops.h" 117 #include "platform-tls.h" 118 #include "utils.h" 119 #include "v8globals.h" 120 121 namespace v8 { 122 namespace internal { 123 124 // Use AtomicWord for a machine-sized pointer. It is assumed that 125 // reads and writes of naturally aligned values of this type are atomic. 126 typedef intptr_t AtomicWord; 127 128 class Semaphore; 129 class Mutex; 130 131 double ceiling(double x); 132 double modulo(double x, double y); 133 134 // Forward declarations. 135 class Socket; 136 137 // ---------------------------------------------------------------------------- 138 // OS 139 // 140 // This class has static methods for the different platform specific 141 // functions. Add methods here to cope with differences between the 142 // supported platforms. 143 144 class OS { 145 public: 146 // Initializes the platform OS support. Called once at VM startup. 147 static void Setup(); 148 149 // Returns the accumulated user time for thread. This routine 150 // can be used for profiling. The implementation should 151 // strive for high-precision timer resolution, preferable 152 // micro-second resolution. 153 static int GetUserTime(uint32_t* secs, uint32_t* usecs); 154 155 // Get a tick counter normalized to one tick per microsecond. 156 // Used for calculating time intervals. 157 static int64_t Ticks(); 158 159 // Returns current time as the number of milliseconds since 160 // 00:00:00 UTC, January 1, 1970. 161 static double TimeCurrentMillis(); 162 163 // Returns a string identifying the current time zone. The 164 // timestamp is used for determining if DST is in effect. 165 static const char* LocalTimezone(double time); 166 167 // Returns the local time offset in milliseconds east of UTC without 168 // taking daylight savings time into account. 169 static double LocalTimeOffset(); 170 171 // Returns the daylight savings offset for the given time. 172 static double DaylightSavingsOffset(double time); 173 174 // Returns last OS error. 175 static int GetLastError(); 176 177 static FILE* FOpen(const char* path, const char* mode); 178 static bool Remove(const char* path); 179 180 // Log file open mode is platform-dependent due to line ends issues. 181 static const char* const LogFileOpenMode; 182 183 // Print output to console. This is mostly used for debugging output. 184 // On platforms that has standard terminal output, the output 185 // should go to stdout. 186 static void Print(const char* format, ...); 187 static void VPrint(const char* format, va_list args); 188 189 // Print output to a file. This is mostly used for debugging output. 190 static void FPrint(FILE* out, const char* format, ...); 191 static void VFPrint(FILE* out, const char* format, va_list args); 192 193 // Print error output to console. This is mostly used for error message 194 // output. On platforms that has standard terminal output, the output 195 // should go to stderr. 196 static void PrintError(const char* format, ...); 197 static void VPrintError(const char* format, va_list args); 198 199 // Allocate/Free memory used by JS heap. Pages are readable/writable, but 200 // they are not guaranteed to be executable unless 'executable' is true. 201 // Returns the address of allocated memory, or NULL if failed. 202 static void* Allocate(const size_t requested, 203 size_t* allocated, 204 bool is_executable); 205 static void Free(void* address, const size_t size); 206 // Get the Alignment guaranteed by Allocate(). 207 static size_t AllocateAlignment(); 208 209 #ifdef ENABLE_HEAP_PROTECTION 210 // Protect/unprotect a block of memory by marking it read-only/writable. 211 static void Protect(void* address, size_t size); 212 static void Unprotect(void* address, size_t size, bool is_executable); 213 #endif 214 215 // Returns an indication of whether a pointer is in a space that 216 // has been allocated by Allocate(). This method may conservatively 217 // always return false, but giving more accurate information may 218 // improve the robustness of the stack dump code in the presence of 219 // heap corruption. 220 static bool IsOutsideAllocatedSpace(void* pointer); 221 222 // Sleep for a number of milliseconds. 223 static void Sleep(const int milliseconds); 224 225 // Abort the current process. 226 static void Abort(); 227 228 // Debug break. 229 static void DebugBreak(); 230 231 // Walk the stack. 232 static const int kStackWalkError = -1; 233 static const int kStackWalkMaxNameLen = 256; 234 static const int kStackWalkMaxTextLen = 256; 235 struct StackFrame { 236 void* address; 237 char text[kStackWalkMaxTextLen]; 238 }; 239 240 static int StackWalk(Vector<StackFrame> frames); 241 242 // Factory method for creating platform dependent Mutex. 243 // Please use delete to reclaim the storage for the returned Mutex. 244 static Mutex* CreateMutex(); 245 246 // Factory method for creating platform dependent Semaphore. 247 // Please use delete to reclaim the storage for the returned Semaphore. 248 static Semaphore* CreateSemaphore(int count); 249 250 // Factory method for creating platform dependent Socket. 251 // Please use delete to reclaim the storage for the returned Socket. 252 static Socket* CreateSocket(); 253 254 class MemoryMappedFile { 255 public: 256 static MemoryMappedFile* open(const char* name); 257 static MemoryMappedFile* create(const char* name, int size, void* initial); ~MemoryMappedFile()258 virtual ~MemoryMappedFile() { } 259 virtual void* memory() = 0; 260 virtual int size() = 0; 261 }; 262 263 // Safe formatting print. Ensures that str is always null-terminated. 264 // Returns the number of chars written, or -1 if output was truncated. 265 static int SNPrintF(Vector<char> str, const char* format, ...); 266 static int VSNPrintF(Vector<char> str, 267 const char* format, 268 va_list args); 269 270 static char* StrChr(char* str, int c); 271 static void StrNCpy(Vector<char> dest, const char* src, size_t n); 272 273 // Support for the profiler. Can do nothing, in which case ticks 274 // occuring in shared libraries will not be properly accounted for. 275 static void LogSharedLibraryAddresses(); 276 277 // Support for the profiler. Notifies the external profiling 278 // process that a code moving garbage collection starts. Can do 279 // nothing, in which case the code objects must not move (e.g., by 280 // using --never-compact) if accurate profiling is desired. 281 static void SignalCodeMovingGC(); 282 283 // The return value indicates the CPU features we are sure of because of the 284 // OS. For example MacOSX doesn't run on any x86 CPUs that don't have SSE2 285 // instructions. 286 // This is a little messy because the interpretation is subject to the cross 287 // of the CPU and the OS. The bits in the answer correspond to the bit 288 // positions indicated by the members of the CpuFeature enum from globals.h 289 static uint64_t CpuFeaturesImpliedByPlatform(); 290 291 // Returns the double constant NAN 292 static double nan_value(); 293 294 // Support runtime detection of VFP3 on ARM CPUs. 295 static bool ArmCpuHasFeature(CpuFeature feature); 296 297 // Support runtime detection of FPU on MIPS CPUs. 298 static bool MipsCpuHasFeature(CpuFeature feature); 299 300 // Returns the activation frame alignment constraint or zero if 301 // the platform doesn't care. Guaranteed to be a power of two. 302 static int ActivationFrameAlignment(); 303 304 static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value); 305 306 #if defined(V8_TARGET_ARCH_IA32) 307 // Copy memory area to disjoint memory area. 308 static void MemCopy(void* dest, const void* src, size_t size); 309 // Limit below which the extra overhead of the MemCopy function is likely 310 // to outweigh the benefits of faster copying. 311 static const int kMinComplexMemCopy = 64; 312 typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size); 313 314 #else // V8_TARGET_ARCH_IA32 MemCopy(void * dest,const void * src,size_t size)315 static void MemCopy(void* dest, const void* src, size_t size) { 316 memcpy(dest, src, size); 317 } 318 static const int kMinComplexMemCopy = 256; 319 #endif // V8_TARGET_ARCH_IA32 320 321 private: 322 static const int msPerSecond = 1000; 323 324 DISALLOW_IMPLICIT_CONSTRUCTORS(OS); 325 }; 326 327 328 class VirtualMemory { 329 public: 330 // Reserves virtual memory with size. 331 explicit VirtualMemory(size_t size); 332 ~VirtualMemory(); 333 334 // Returns whether the memory has been reserved. 335 bool IsReserved(); 336 337 // Returns the start address of the reserved memory. address()338 void* address() { 339 ASSERT(IsReserved()); 340 return address_; 341 } 342 343 // Returns the size of the reserved memory. size()344 size_t size() { return size_; } 345 346 // Commits real memory. Returns whether the operation succeeded. 347 bool Commit(void* address, size_t size, bool is_executable); 348 349 // Uncommit real memory. Returns whether the operation succeeded. 350 bool Uncommit(void* address, size_t size); 351 352 private: 353 void* address_; // Start address of the virtual memory. 354 size_t size_; // Size of the virtual memory. 355 }; 356 357 // ---------------------------------------------------------------------------- 358 // Thread 359 // 360 // Thread objects are used for creating and running threads. When the start() 361 // method is called the new thread starts running the run() method in the new 362 // thread. The Thread object should not be deallocated before the thread has 363 // terminated. 364 365 class Thread { 366 public: 367 // Opaque data type for thread-local storage keys. 368 // LOCAL_STORAGE_KEY_MIN_VALUE and LOCAL_STORAGE_KEY_MAX_VALUE are specified 369 // to ensure that enumeration type has correct value range (see Issue 830 for 370 // more details). 371 enum LocalStorageKey { 372 LOCAL_STORAGE_KEY_MIN_VALUE = kMinInt, 373 LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt 374 }; 375 376 struct Options { OptionsOptions377 Options() : name("v8:<unknown>"), stack_size(0) {} 378 379 const char* name; 380 int stack_size; 381 }; 382 383 // Create new thread (with a value for storing in the TLS isolate field). 384 Thread(Isolate* isolate, const Options& options); 385 Thread(Isolate* isolate, const char* name); 386 virtual ~Thread(); 387 388 // Start new thread by calling the Run() method in the new thread. 389 void Start(); 390 391 // Wait until thread terminates. 392 void Join(); 393 name()394 inline const char* name() const { 395 return name_; 396 } 397 398 // Abstract method for run handler. 399 virtual void Run() = 0; 400 401 // Thread-local storage. 402 static LocalStorageKey CreateThreadLocalKey(); 403 static void DeleteThreadLocalKey(LocalStorageKey key); 404 static void* GetThreadLocal(LocalStorageKey key); GetThreadLocalInt(LocalStorageKey key)405 static int GetThreadLocalInt(LocalStorageKey key) { 406 return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key))); 407 } 408 static void SetThreadLocal(LocalStorageKey key, void* value); SetThreadLocalInt(LocalStorageKey key,int value)409 static void SetThreadLocalInt(LocalStorageKey key, int value) { 410 SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value))); 411 } HasThreadLocal(LocalStorageKey key)412 static bool HasThreadLocal(LocalStorageKey key) { 413 return GetThreadLocal(key) != NULL; 414 } 415 416 #ifdef V8_FAST_TLS_SUPPORTED GetExistingThreadLocal(LocalStorageKey key)417 static inline void* GetExistingThreadLocal(LocalStorageKey key) { 418 void* result = reinterpret_cast<void*>( 419 InternalGetExistingThreadLocal(static_cast<intptr_t>(key))); 420 ASSERT(result == GetThreadLocal(key)); 421 return result; 422 } 423 #else GetExistingThreadLocal(LocalStorageKey key)424 static inline void* GetExistingThreadLocal(LocalStorageKey key) { 425 return GetThreadLocal(key); 426 } 427 #endif 428 429 // A hint to the scheduler to let another thread run. 430 static void YieldCPU(); 431 isolate()432 Isolate* isolate() const { return isolate_; } 433 434 // The thread name length is limited to 16 based on Linux's implementation of 435 // prctl(). 436 static const int kMaxThreadNameLength = 16; 437 438 class PlatformData; data()439 PlatformData* data() { return data_; } 440 441 private: 442 void set_name(const char *name); 443 444 PlatformData* data_; 445 446 Isolate* isolate_; 447 char name_[kMaxThreadNameLength]; 448 int stack_size_; 449 450 DISALLOW_COPY_AND_ASSIGN(Thread); 451 }; 452 453 454 // ---------------------------------------------------------------------------- 455 // Mutex 456 // 457 // Mutexes are used for serializing access to non-reentrant sections of code. 458 // The implementations of mutex should allow for nested/recursive locking. 459 460 class Mutex { 461 public: ~Mutex()462 virtual ~Mutex() {} 463 464 // Locks the given mutex. If the mutex is currently unlocked, it becomes 465 // locked and owned by the calling thread, and immediately. If the mutex 466 // is already locked by another thread, suspends the calling thread until 467 // the mutex is unlocked. 468 virtual int Lock() = 0; 469 470 // Unlocks the given mutex. The mutex is assumed to be locked and owned by 471 // the calling thread on entrance. 472 virtual int Unlock() = 0; 473 474 // Tries to lock the given mutex. Returns whether the mutex was 475 // successfully locked. 476 virtual bool TryLock() = 0; 477 }; 478 479 480 // ---------------------------------------------------------------------------- 481 // ScopedLock 482 // 483 // Stack-allocated ScopedLocks provide block-scoped locking and 484 // unlocking of a mutex. 485 class ScopedLock { 486 public: ScopedLock(Mutex * mutex)487 explicit ScopedLock(Mutex* mutex): mutex_(mutex) { 488 ASSERT(mutex_ != NULL); 489 mutex_->Lock(); 490 } ~ScopedLock()491 ~ScopedLock() { 492 mutex_->Unlock(); 493 } 494 495 private: 496 Mutex* mutex_; 497 DISALLOW_COPY_AND_ASSIGN(ScopedLock); 498 }; 499 500 501 // ---------------------------------------------------------------------------- 502 // Semaphore 503 // 504 // A semaphore object is a synchronization object that maintains a count. The 505 // count is decremented each time a thread completes a wait for the semaphore 506 // object and incremented each time a thread signals the semaphore. When the 507 // count reaches zero, threads waiting for the semaphore blocks until the 508 // count becomes non-zero. 509 510 class Semaphore { 511 public: ~Semaphore()512 virtual ~Semaphore() {} 513 514 // Suspends the calling thread until the semaphore counter is non zero 515 // and then decrements the semaphore counter. 516 virtual void Wait() = 0; 517 518 // Suspends the calling thread until the counter is non zero or the timeout 519 // time has passsed. If timeout happens the return value is false and the 520 // counter is unchanged. Otherwise the semaphore counter is decremented and 521 // true is returned. The timeout value is specified in microseconds. 522 virtual bool Wait(int timeout) = 0; 523 524 // Increments the semaphore counter. 525 virtual void Signal() = 0; 526 }; 527 528 529 // ---------------------------------------------------------------------------- 530 // Socket 531 // 532 533 class Socket { 534 public: ~Socket()535 virtual ~Socket() {} 536 537 // Server initialization. 538 virtual bool Bind(const int port) = 0; 539 virtual bool Listen(int backlog) const = 0; 540 virtual Socket* Accept() const = 0; 541 542 // Client initialization. 543 virtual bool Connect(const char* host, const char* port) = 0; 544 545 // Shutdown socket for both read and write. This causes blocking Send and 546 // Receive calls to exit. After Shutdown the Socket object cannot be used for 547 // any communication. 548 virtual bool Shutdown() = 0; 549 550 // Data Transimission 551 virtual int Send(const char* data, int len) const = 0; 552 virtual int Receive(char* data, int len) const = 0; 553 554 // Set the value of the SO_REUSEADDR socket option. 555 virtual bool SetReuseAddress(bool reuse_address) = 0; 556 557 virtual bool IsValid() const = 0; 558 559 static bool Setup(); 560 static int LastError(); 561 static uint16_t HToN(uint16_t value); 562 static uint16_t NToH(uint16_t value); 563 static uint32_t HToN(uint32_t value); 564 static uint32_t NToH(uint32_t value); 565 }; 566 567 568 // ---------------------------------------------------------------------------- 569 // Sampler 570 // 571 // A sampler periodically samples the state of the VM and optionally 572 // (if used for profiling) the program counter and stack pointer for 573 // the thread that created it. 574 575 // TickSample captures the information collected for each sample. 576 class TickSample { 577 public: TickSample()578 TickSample() 579 : state(OTHER), 580 pc(NULL), 581 sp(NULL), 582 fp(NULL), 583 tos(NULL), 584 frames_count(0), 585 has_external_callback(false) {} 586 StateTag state; // The state of the VM. 587 Address pc; // Instruction pointer. 588 Address sp; // Stack pointer. 589 Address fp; // Frame pointer. 590 union { 591 Address tos; // Top stack value (*sp). 592 Address external_callback; 593 }; 594 static const int kMaxFramesCount = 64; 595 Address stack[kMaxFramesCount]; // Call stack. 596 int frames_count : 8; // Number of captured frames. 597 bool has_external_callback : 1; 598 }; 599 600 #ifdef ENABLE_LOGGING_AND_PROFILING 601 class Sampler { 602 public: 603 // Initialize sampler. 604 Sampler(Isolate* isolate, int interval); 605 virtual ~Sampler(); 606 interval()607 int interval() const { return interval_; } 608 609 // Performs stack sampling. SampleStack(TickSample * sample)610 void SampleStack(TickSample* sample) { 611 DoSampleStack(sample); 612 IncSamplesTaken(); 613 } 614 615 // This method is called for each sampling period with the current 616 // program counter. 617 virtual void Tick(TickSample* sample) = 0; 618 619 // Start and stop sampler. 620 void Start(); 621 void Stop(); 622 623 // Is the sampler used for profiling? IsProfiling()624 bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; } IncreaseProfilingDepth()625 void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); } DecreaseProfilingDepth()626 void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); } 627 628 // Whether the sampler is running (that is, consumes resources). IsActive()629 bool IsActive() const { return NoBarrier_Load(&active_); } 630 isolate()631 Isolate* isolate() { return isolate_; } 632 633 // Used in tests to make sure that stack sampling is performed. samples_taken()634 int samples_taken() const { return samples_taken_; } ResetSamplesTaken()635 void ResetSamplesTaken() { samples_taken_ = 0; } 636 637 class PlatformData; data()638 PlatformData* data() { return data_; } 639 platform_data()640 PlatformData* platform_data() { return data_; } 641 642 protected: 643 virtual void DoSampleStack(TickSample* sample) = 0; 644 645 private: SetActive(bool value)646 void SetActive(bool value) { NoBarrier_Store(&active_, value); } IncSamplesTaken()647 void IncSamplesTaken() { if (++samples_taken_ < 0) samples_taken_ = 0; } 648 649 Isolate* isolate_; 650 const int interval_; 651 Atomic32 profiling_; 652 Atomic32 active_; 653 PlatformData* data_; // Platform specific data. 654 int samples_taken_; // Counts stack samples taken. 655 DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler); 656 }; 657 658 659 #endif // ENABLE_LOGGING_AND_PROFILING 660 661 } } // namespace v8::internal 662 663 #endif // V8_PLATFORM_H_ 664