• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // Platform-specific code for POSIX goes here. This is not a platform on its
6 // own, but contains the parts which are the same across the POSIX platforms
7 // Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX.
8 
9 #include <errno.h>
10 #include <limits.h>
11 #include <pthread.h>
12 #if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
13 #include <pthread_np.h>  // for pthread_set_name_np
14 #endif
15 #include <fcntl.h>
16 #include <sched.h>  // for sched_yield
17 #include <stdio.h>
18 #include <sys/mman.h>
19 #include <sys/stat.h>
20 #include <sys/time.h>
21 #include <sys/types.h>
22 #include <time.h>
23 #include <unistd.h>
24 #if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
25     defined(__NetBSD__) || defined(__OpenBSD__)
26 #include <sys/sysctl.h>  // for sysctl
27 #endif
28 
29 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
30 #define LOG_TAG "v8"
31 #include <android/log.h>
32 #endif
33 
34 #include <cmath>
35 #include <cstdlib>
36 
37 #include "src/base/platform/platform-posix.h"
38 
39 #include "src/base/lazy-instance.h"
40 #include "src/base/macros.h"
41 #include "src/base/platform/platform.h"
42 #include "src/base/platform/time.h"
43 #include "src/base/utils/random-number-generator.h"
44 
45 #ifdef V8_FAST_TLS_SUPPORTED
46 #include <atomic>
47 #endif
48 
49 #if V8_OS_DARWIN || V8_OS_LINUX
50 #include <dlfcn.h>  // for dlsym
51 #endif
52 
53 #if V8_OS_DARWIN
54 #include <mach/mach.h>
55 #endif
56 
57 #if V8_OS_LINUX
58 #include <sys/prctl.h>  // for prctl
59 #endif
60 
61 #if defined(V8_OS_FUCHSIA)
62 #include <zircon/process.h>
63 #else
64 #include <sys/resource.h>
65 #endif
66 
67 #if !defined(_AIX) && !defined(V8_OS_FUCHSIA)
68 #include <sys/syscall.h>
69 #endif
70 
71 #if V8_OS_FREEBSD || V8_OS_DARWIN || V8_OS_OPENBSD || V8_OS_SOLARIS
72 #define MAP_ANONYMOUS MAP_ANON
73 #endif
74 
75 #if defined(V8_OS_SOLARIS)
76 #if (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE > 2) || defined(__EXTENSIONS__)
77 extern "C" int madvise(caddr_t, size_t, int);
78 #else
79 extern int madvise(caddr_t, size_t, int);
80 #endif
81 #endif
82 
83 #ifndef MADV_FREE
84 #define MADV_FREE MADV_DONTNEED
85 #endif
86 
87 #if defined(V8_LIBC_GLIBC)
88 extern "C" void* __libc_stack_end;
89 #endif
90 
91 namespace v8 {
92 namespace base {
93 
94 namespace {
95 
96 // 0 is never a valid thread id.
97 const pthread_t kNoThread = static_cast<pthread_t>(0);
98 
99 bool g_hard_abort = false;
100 
101 const char* g_gc_fake_mmap = nullptr;
102 
103 DEFINE_LAZY_LEAKY_OBJECT_GETTER(RandomNumberGenerator,
104                                 GetPlatformRandomNumberGenerator)
105 static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
106 
107 #if !V8_OS_FUCHSIA
108 #if V8_OS_DARWIN
109 // kMmapFd is used to pass vm_alloc flags to tag the region with the user
110 // defined tag 255 This helps identify V8-allocated regions in memory analysis
111 // tools like vmmap(1).
112 const int kMmapFd = VM_MAKE_TAG(255);
113 #else   // !V8_OS_DARWIN
114 const int kMmapFd = -1;
115 #endif  // !V8_OS_DARWIN
116 
117 #if defined(V8_TARGET_OS_MACOS) && V8_HOST_ARCH_ARM64
118 // During snapshot generation in cross builds, sysconf() runs on the Intel
119 // host and returns host page size, while the snapshot needs to use the
120 // target page size.
121 constexpr int kAppleArmPageSize = 1 << 14;
122 #endif
123 
124 const int kMmapFdOffset = 0;
125 
126 // TODO(v8:10026): Add the right permission flag to make executable pages
127 // guarded.
GetProtectionFromMemoryPermission(OS::MemoryPermission access)128 int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
129   switch (access) {
130     case OS::MemoryPermission::kNoAccess:
131     case OS::MemoryPermission::kNoAccessWillJitLater:
132       return PROT_NONE;
133     case OS::MemoryPermission::kRead:
134       return PROT_READ;
135     case OS::MemoryPermission::kReadWrite:
136       return PROT_READ | PROT_WRITE;
137     case OS::MemoryPermission::kReadWriteExecute:
138       return PROT_READ | PROT_WRITE | PROT_EXEC;
139     case OS::MemoryPermission::kReadExecute:
140       return PROT_READ | PROT_EXEC;
141   }
142   UNREACHABLE();
143 }
144 
145 enum class PageType { kShared, kPrivate };
146 
GetFlagsForMemoryPermission(OS::MemoryPermission access,PageType page_type)147 int GetFlagsForMemoryPermission(OS::MemoryPermission access,
148                                 PageType page_type) {
149   int flags = MAP_ANONYMOUS;
150   flags |= (page_type == PageType::kShared) ? MAP_SHARED : MAP_PRIVATE;
151   if (access == OS::MemoryPermission::kNoAccess) {
152 #if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
153     flags |= MAP_NORESERVE;
154 #endif  // !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
155 #if V8_OS_QNX
156     flags |= MAP_LAZY;
157 #endif  // V8_OS_QNX
158   }
159 #if V8_OS_DARWIN
160   // MAP_JIT is required to obtain writable and executable pages when the
161   // hardened runtime/memory protection is enabled, which is optional (via code
162   // signing) on Intel-based Macs but mandatory on Apple silicon ones. See also
163   // https://developer.apple.com/documentation/apple-silicon/porting-just-in-time-compilers-to-apple-silicon.
164   if (access == OS::MemoryPermission::kNoAccessWillJitLater) {
165     flags |= MAP_JIT;
166   }
167 #endif  // V8_OS_DARWIN
168   return flags;
169 }
170 
Allocate(void * hint,size_t size,OS::MemoryPermission access,PageType page_type)171 void* Allocate(void* hint, size_t size, OS::MemoryPermission access,
172                PageType page_type) {
173   int prot = GetProtectionFromMemoryPermission(access);
174   int flags = GetFlagsForMemoryPermission(access, page_type);
175   void* result = mmap(hint, size, prot, flags, kMmapFd, kMmapFdOffset);
176   if (result == MAP_FAILED) return nullptr;
177 #if ENABLE_HUGEPAGE
178   if (result != nullptr && size >= kHugePageSize) {
179     const uintptr_t huge_start =
180         RoundUp(reinterpret_cast<uintptr_t>(result), kHugePageSize);
181     const uintptr_t huge_end =
182         RoundDown(reinterpret_cast<uintptr_t>(result) + size, kHugePageSize);
183     if (huge_end > huge_start) {
184       // Bail out in case the aligned addresses do not provide a block of at
185       // least kHugePageSize size.
186       madvise(reinterpret_cast<void*>(huge_start), huge_end - huge_start,
187               MADV_HUGEPAGE);
188     }
189   }
190 #endif
191 
192   return result;
193 }
194 
195 #endif  // !V8_OS_FUCHSIA
196 
197 }  // namespace
198 
199 #if V8_OS_LINUX || V8_OS_FREEBSD
200 #ifdef __arm__
201 
ArmUsingHardFloat()202 bool OS::ArmUsingHardFloat() {
203   // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
204   // the Floating Point ABI used (PCS stands for Procedure Call Standard).
205   // We use these as well as a couple of other defines to statically determine
206   // what FP ABI used.
207   // GCC versions 4.4 and below don't support hard-fp.
208   // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
209   // __ARM_PCS_VFP.
210 
211 #define GCC_VERSION \
212   (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
213 #if GCC_VERSION >= 40600 && !defined(__clang__)
214 #if defined(__ARM_PCS_VFP)
215   return true;
216 #else
217   return false;
218 #endif
219 
220 #elif GCC_VERSION < 40500 && !defined(__clang__)
221   return false;
222 
223 #else
224 #if defined(__ARM_PCS_VFP)
225   return true;
226 #elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
227     !defined(__VFP_FP__)
228   return false;
229 #else
230 #error \
231     "Your version of compiler does not report the FP ABI compiled for."     \
232        "Please report it on this issue"                                        \
233        "http://code.google.com/p/v8/issues/detail?id=2140"
234 
235 #endif
236 #endif
237 #undef GCC_VERSION
238 }
239 
240 #endif  // def __arm__
241 #endif
242 
PosixInitializeCommon(bool hard_abort,const char * const gc_fake_mmap)243 void PosixInitializeCommon(bool hard_abort, const char* const gc_fake_mmap) {
244   g_hard_abort = hard_abort;
245   g_gc_fake_mmap = gc_fake_mmap;
246 }
247 
248 #if !V8_OS_FUCHSIA
Initialize(bool hard_abort,const char * const gc_fake_mmap)249 void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
250   PosixInitializeCommon(hard_abort, gc_fake_mmap);
251 }
252 #endif  // !V8_OS_FUCHSIA
253 
ActivationFrameAlignment()254 int OS::ActivationFrameAlignment() {
255 #if V8_TARGET_ARCH_ARM
256   // On EABI ARM targets this is required for fp correctness in the
257   // runtime system.
258   return 8;
259 #elif V8_TARGET_ARCH_MIPS
260   return 8;
261 #elif V8_TARGET_ARCH_S390
262   return 8;
263 #else
264   // Otherwise we just assume 16 byte alignment, i.e.:
265   // - With gcc 4.4 the tree vectorization optimizer can generate code
266   //   that requires 16 byte alignment such as movdqa on x86.
267   // - Mac OS X, PPC and Solaris (64-bit) activation frames must
268   //   be 16 byte-aligned;  see "Mac OS X ABI Function Call Guide"
269   return 16;
270 #endif
271 }
272 
273 // static
AllocatePageSize()274 size_t OS::AllocatePageSize() {
275 #if defined(V8_TARGET_OS_MACOS) && V8_HOST_ARCH_ARM64
276   return kAppleArmPageSize;
277 #else
278   static size_t page_size = static_cast<size_t>(sysconf(_SC_PAGESIZE));
279   return page_size;
280 #endif
281 }
282 
283 // static
CommitPageSize()284 size_t OS::CommitPageSize() {
285   // Commit and allocate page size are the same on posix.
286   return OS::AllocatePageSize();
287 }
288 
289 // static
SetRandomMmapSeed(int64_t seed)290 void OS::SetRandomMmapSeed(int64_t seed) {
291   if (seed) {
292     MutexGuard guard(rng_mutex.Pointer());
293     GetPlatformRandomNumberGenerator()->SetSeed(seed);
294   }
295 }
296 
297 // static
GetRandomMmapAddr()298 void* OS::GetRandomMmapAddr() {
299   uintptr_t raw_addr;
300   {
301     MutexGuard guard(rng_mutex.Pointer());
302     GetPlatformRandomNumberGenerator()->NextBytes(&raw_addr, sizeof(raw_addr));
303   }
304 #if V8_HOST_ARCH_ARM64
305 #if defined(V8_TARGET_OS_MACOS)
306   DCHECK_EQ(1 << 14, AllocatePageSize());
307 #endif
308   // Keep the address page-aligned, AArch64 supports 4K, 16K and 64K
309   // configurations.
310   raw_addr = RoundDown(raw_addr, AllocatePageSize());
311 #endif
312 #if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
313     defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER)
314   // If random hint addresses interfere with address ranges hard coded in
315   // sanitizers, bad things happen. This address range is copied from TSAN
316   // source but works with all tools.
317   // See crbug.com/539863.
318   raw_addr &= 0x007fffff0000ULL;
319   raw_addr += 0x7e8000000000ULL;
320 #else
321 #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
322   // Currently available CPUs have 48 bits of virtual addressing.  Truncate
323   // the hint address to 46 bits to give the kernel a fighting chance of
324   // fulfilling our placement request.
325   raw_addr &= uint64_t{0x3FFFFFFFF000};
326 #elif V8_TARGET_ARCH_PPC64
327 #if V8_OS_AIX
328   // AIX: 64 bits of virtual addressing, but we limit address range to:
329   //   a) minimize Segment Lookaside Buffer (SLB) misses and
330   raw_addr &= uint64_t{0x3FFFF000};
331   // Use extra address space to isolate the mmap regions.
332   raw_addr += uint64_t{0x400000000000};
333 #elif V8_TARGET_BIG_ENDIAN
334   // Big-endian Linux: 42 bits of virtual addressing.
335   raw_addr &= uint64_t{0x03FFFFFFF000};
336 #else
337   // Little-endian Linux: 46 bits of virtual addressing.
338   raw_addr &= uint64_t{0x3FFFFFFF0000};
339 #endif
340 #elif V8_TARGET_ARCH_S390X
341   // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
342   // of virtual addressing.  Truncate to 40 bits to allow kernel chance to
343   // fulfill request.
344   raw_addr &= uint64_t{0xFFFFFFF000};
345 #elif V8_TARGET_ARCH_S390
346   // 31 bits of virtual addressing.  Truncate to 29 bits to allow kernel chance
347   // to fulfill request.
348   raw_addr &= 0x1FFFF000;
349 #elif V8_TARGET_ARCH_MIPS64
350   // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
351   // to fulfill request.
352   raw_addr &= uint64_t{0xFFFFFF0000};
353 #elif V8_TARGET_ARCH_RISCV64
354   // TODO(RISCV): We need more information from the kernel to correctly mask
355   // this address for RISC-V. https://github.com/v8-riscv/v8/issues/375
356   raw_addr &= uint64_t{0xFFFFFF0000};
357 #elif V8_TARGET_ARCH_LOONG64
358   // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
359   // to fulfill request.
360   raw_addr &= uint64_t{0xFFFFFF0000};
361 #else
362   raw_addr &= 0x3FFFF000;
363 
364 #ifdef __sun
365   // For our Solaris/illumos mmap hint, we pick a random address in the bottom
366   // half of the top half of the address space (that is, the third quarter).
367   // Because we do not MAP_FIXED, this will be treated only as a hint -- the
368   // system will not fail to mmap() because something else happens to already
369   // be mapped at our random address. We deliberately set the hint high enough
370   // to get well above the system's break (that is, the heap); Solaris and
371   // illumos will try the hint and if that fails allocate as if there were
372   // no hint at all. The high hint prevents the break from getting hemmed in
373   // at low values, ceding half of the address space to the system heap.
374   raw_addr += 0x80000000;
375 #elif V8_OS_AIX
376   // The range 0x30000000 - 0xD0000000 is available on AIX;
377   // choose the upper range.
378   raw_addr += 0x90000000;
379 #else
380   // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
381   // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
382   // 10.6 and 10.7.
383   raw_addr += 0x20000000;
384 #endif
385 #endif
386 #endif
387   return reinterpret_cast<void*>(raw_addr);
388 }
389 
390 // TODO(bbudge) Move Cygwin and Fuchsia stuff into platform-specific files.
391 #if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
392 // static
Allocate(void * hint,size_t size,size_t alignment,MemoryPermission access)393 void* OS::Allocate(void* hint, size_t size, size_t alignment,
394                    MemoryPermission access) {
395   size_t page_size = AllocatePageSize();
396   DCHECK_EQ(0, size % page_size);
397   DCHECK_EQ(0, alignment % page_size);
398   hint = AlignedAddress(hint, alignment);
399   // Add the maximum misalignment so we are guaranteed an aligned base address.
400   size_t request_size = size + (alignment - page_size);
401   request_size = RoundUp(request_size, OS::AllocatePageSize());
402   void* result = base::Allocate(hint, request_size, access, PageType::kPrivate);
403   if (result == nullptr) return nullptr;
404 
405   // Unmap memory allocated before the aligned base address.
406   uint8_t* base = static_cast<uint8_t*>(result);
407   uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
408       RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
409   if (aligned_base != base) {
410     DCHECK_LT(base, aligned_base);
411     size_t prefix_size = static_cast<size_t>(aligned_base - base);
412     Free(base, prefix_size);
413     request_size -= prefix_size;
414   }
415   // Unmap memory allocated after the potentially unaligned end.
416   if (size != request_size) {
417     DCHECK_LT(size, request_size);
418     size_t suffix_size = request_size - size;
419     Free(aligned_base + size, suffix_size);
420     request_size -= suffix_size;
421   }
422 
423   DCHECK_EQ(size, request_size);
424   return static_cast<void*>(aligned_base);
425 }
426 
427 // static
AllocateShared(size_t size,MemoryPermission access)428 void* OS::AllocateShared(size_t size, MemoryPermission access) {
429   DCHECK_EQ(0, size % AllocatePageSize());
430   return base::Allocate(nullptr, size, access, PageType::kShared);
431 }
432 
433 // static
Free(void * address,size_t size)434 void OS::Free(void* address, size_t size) {
435   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
436   DCHECK_EQ(0, size % AllocatePageSize());
437   CHECK_EQ(0, munmap(address, size));
438 }
439 
440 // macOS specific implementation in platform-macos.cc.
441 #if !defined(V8_OS_MACOS)
442 // static
AllocateShared(void * hint,size_t size,MemoryPermission access,PlatformSharedMemoryHandle handle,uint64_t offset)443 void* OS::AllocateShared(void* hint, size_t size, MemoryPermission access,
444                          PlatformSharedMemoryHandle handle, uint64_t offset) {
445   DCHECK_EQ(0, size % AllocatePageSize());
446   int prot = GetProtectionFromMemoryPermission(access);
447   int fd = FileDescriptorFromSharedMemoryHandle(handle);
448   void* result = mmap(hint, size, prot, MAP_SHARED, fd, offset);
449   if (result == MAP_FAILED) return nullptr;
450   return result;
451 }
452 #endif  // !defined(V8_OS_MACOS)
453 
454 // static
FreeShared(void * address,size_t size)455 void OS::FreeShared(void* address, size_t size) {
456   DCHECK_EQ(0, size % AllocatePageSize());
457   CHECK_EQ(0, munmap(address, size));
458 }
459 
460 // static
Release(void * address,size_t size)461 void OS::Release(void* address, size_t size) {
462   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
463   DCHECK_EQ(0, size % CommitPageSize());
464   CHECK_EQ(0, munmap(address, size));
465 }
466 
467 // static
SetPermissions(void * address,size_t size,MemoryPermission access)468 bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
469   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
470   DCHECK_EQ(0, size % CommitPageSize());
471 
472   int prot = GetProtectionFromMemoryPermission(access);
473   int ret = mprotect(address, size, prot);
474 
475   // MacOS 11.2 on Apple Silicon refuses to switch permissions from
476   // rwx to none. Just use madvise instead.
477 #if defined(V8_OS_DARWIN)
478   if (ret != 0 && access == OS::MemoryPermission::kNoAccess) {
479     ret = madvise(address, size, MADV_FREE_REUSABLE);
480     return ret == 0;
481   }
482 #endif
483 
484   if (ret == 0 && access == OS::MemoryPermission::kNoAccess) {
485     // This is advisory; ignore errors and continue execution.
486     USE(DiscardSystemPages(address, size));
487   }
488 
489 // For accounting purposes, we want to call MADV_FREE_REUSE on macOS after
490 // changing permissions away from OS::MemoryPermission::kNoAccess. Since this
491 // state is not kept at this layer, we always call this if access != kNoAccess.
492 // The cost is a syscall that effectively no-ops.
493 // TODO(erikchen): Fix this to only call MADV_FREE_REUSE when necessary.
494 // https://crbug.com/823915
495 #if defined(V8_OS_DARWIN)
496   if (access != OS::MemoryPermission::kNoAccess)
497     madvise(address, size, MADV_FREE_REUSE);
498 #endif
499 
500   return ret == 0;
501 }
502 
503 // static
DiscardSystemPages(void * address,size_t size)504 bool OS::DiscardSystemPages(void* address, size_t size) {
505   // Roughly based on PartitionAlloc's DiscardSystemPagesInternal
506   // (base/allocator/partition_allocator/page_allocator_internals_posix.h)
507   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
508   DCHECK_EQ(0, size % CommitPageSize());
509 #if defined(V8_OS_DARWIN)
510   // On OSX, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also
511   // marks the pages with the reusable bit, which allows both Activity Monitor
512   // and memory-infra to correctly track the pages.
513   int ret = madvise(address, size, MADV_FREE_REUSABLE);
514   if (ret) {
515     // MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED.
516     ret = madvise(address, size, MADV_DONTNEED);
517   }
518 #elif defined(_AIX) || defined(V8_OS_SOLARIS)
519   int ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_FREE);
520   if (ret != 0 && errno == ENOSYS)
521     return true;  // madvise is not available on all systems.
522   if (ret != 0 && errno == EINVAL)
523     ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_DONTNEED);
524 #else
525   int ret = madvise(address, size, MADV_DONTNEED);
526 #endif
527   return ret == 0;
528 }
529 
530 #if !defined(_AIX)
531 // See AIX version for details.
532 // static
DecommitPages(void * address,size_t size)533 bool OS::DecommitPages(void* address, size_t size) {
534   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
535   DCHECK_EQ(0, size % CommitPageSize());
536   // From https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html:
537   // "If a MAP_FIXED request is successful, then any previous mappings [...] for
538   // those whole pages containing any part of the address range [pa,pa+len)
539   // shall be removed, as if by an appropriate call to munmap(), before the new
540   // mapping is established." As a consequence, the memory will be
541   // zero-initialized on next access.
542   void* ptr = mmap(address, size, PROT_NONE,
543                    MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
544   return ptr == address;
545 }
546 #endif  // !defined(_AIX)
547 
548 // static
CanReserveAddressSpace()549 bool OS::CanReserveAddressSpace() { return true; }
550 
551 // static
CreateAddressSpaceReservation(void * hint,size_t size,size_t alignment,MemoryPermission max_permission)552 Optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
553     void* hint, size_t size, size_t alignment,
554     MemoryPermission max_permission) {
555   // On POSIX, address space reservations are backed by private memory mappings.
556   MemoryPermission permission = MemoryPermission::kNoAccess;
557   if (max_permission == MemoryPermission::kReadWriteExecute) {
558     permission = MemoryPermission::kNoAccessWillJitLater;
559   }
560 
561   void* reservation = Allocate(hint, size, alignment, permission);
562   if (!reservation && permission == MemoryPermission::kNoAccessWillJitLater) {
563     // Retry without MAP_JIT, for example in case we are running on an old OS X.
564     permission = MemoryPermission::kNoAccess;
565     reservation = Allocate(hint, size, alignment, permission);
566   }
567 
568   if (!reservation) return {};
569 
570   return AddressSpaceReservation(reservation, size);
571 }
572 
573 // static
FreeAddressSpaceReservation(AddressSpaceReservation reservation)574 void OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
575   Free(reservation.base(), reservation.size());
576 }
577 
578 // macOS specific implementation in platform-macos.cc.
579 #if !defined(V8_OS_MACOS)
580 // static
581 // Need to disable CFI_ICALL due to the indirect call to memfd_create.
582 DISABLE_CFI_ICALL
CreateSharedMemoryHandleForTesting(size_t size)583 PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) {
584   return kInvalidSharedMemoryHandle;
585 }
586 
587 // static
DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle)588 void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) {
589   DCHECK_NE(kInvalidSharedMemoryHandle, handle);
590   int fd = FileDescriptorFromSharedMemoryHandle(handle);
591   CHECK_EQ(0, close(fd));
592 }
593 #endif  // !defined(V8_OS_MACOS)
594 
595 // static
HasLazyCommits()596 bool OS::HasLazyCommits() {
597 #if V8_OS_AIX || V8_OS_LINUX || V8_OS_DARWIN
598   return true;
599 #else
600   // TODO(bbudge) Return true for all POSIX platforms.
601   return false;
602 #endif
603 }
604 #endif  // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
605 
GetGCFakeMMapFile()606 const char* OS::GetGCFakeMMapFile() {
607   return g_gc_fake_mmap;
608 }
609 
610 
Sleep(TimeDelta interval)611 void OS::Sleep(TimeDelta interval) {
612   usleep(static_cast<useconds_t>(interval.InMicroseconds()));
613 }
614 
615 
Abort()616 void OS::Abort() {
617   if (g_hard_abort) {
618     IMMEDIATE_CRASH();
619   }
620   // Redirect to std abort to signal abnormal program termination.
621   abort();
622 }
623 
624 
DebugBreak()625 void OS::DebugBreak() {
626 #if V8_HOST_ARCH_ARM
627   asm("bkpt 0");
628 #elif V8_HOST_ARCH_ARM64
629   asm("brk 0");
630 #elif V8_HOST_ARCH_MIPS
631   asm("break");
632 #elif V8_HOST_ARCH_MIPS64
633   asm("break");
634 #elif V8_HOST_ARCH_LOONG64
635   asm("break 0");
636 #elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
637   asm("twge 2,2");
638 #elif V8_HOST_ARCH_IA32
639   asm("int $3");
640 #elif V8_HOST_ARCH_X64
641   asm("int $3");
642 #elif V8_HOST_ARCH_S390
643   // Software breakpoint instruction is 0x0001
644   asm volatile(".word 0x0001");
645 #elif V8_HOST_ARCH_RISCV64
646   asm("ebreak");
647 #else
648 #error Unsupported host architecture.
649 #endif
650 }
651 
652 
653 class PosixMemoryMappedFile final : public OS::MemoryMappedFile {
654  public:
PosixMemoryMappedFile(FILE * file,void * memory,size_t size)655   PosixMemoryMappedFile(FILE* file, void* memory, size_t size)
656       : file_(file), memory_(memory), size_(size) {}
657   ~PosixMemoryMappedFile() final;
memory() const658   void* memory() const final { return memory_; }
size() const659   size_t size() const final { return size_; }
660 
661  private:
662   FILE* const file_;
663   void* const memory_;
664   size_t const size_;
665 };
666 
667 
668 // static
open(const char * name,FileMode mode)669 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name,
670                                                  FileMode mode) {
671   const char* fopen_mode = (mode == FileMode::kReadOnly) ? "r" : "r+";
672   struct stat statbuf;
673   // Make sure path exists and is not a directory.
674   if (stat(name, &statbuf) == 0 && !S_ISDIR(statbuf.st_mode)) {
675     if (FILE* file = fopen(name, fopen_mode)) {
676       if (fseek(file, 0, SEEK_END) == 0) {
677         long size = ftell(file);  // NOLINT(runtime/int)
678         if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0);
679         if (size > 0) {
680           int prot = PROT_READ;
681           int flags = MAP_PRIVATE;
682           if (mode == FileMode::kReadWrite) {
683             prot |= PROT_WRITE;
684             flags = MAP_SHARED;
685           }
686           void* const memory =
687               mmap(OS::GetRandomMmapAddr(), size, prot, flags, fileno(file), 0);
688           if (memory != MAP_FAILED) {
689             return new PosixMemoryMappedFile(file, memory, size);
690           }
691         }
692       }
693       fclose(file);
694     }
695   }
696   return nullptr;
697 }
698 
699 // static
create(const char * name,size_t size,void * initial)700 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
701                                                    size_t size, void* initial) {
702   if (FILE* file = fopen(name, "w+")) {
703     if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0);
704     size_t result = fwrite(initial, 1, size, file);
705     if (result == size && !ferror(file)) {
706       void* memory = mmap(OS::GetRandomMmapAddr(), result,
707                           PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
708       if (memory != MAP_FAILED) {
709         return new PosixMemoryMappedFile(file, memory, result);
710       }
711     }
712     fclose(file);
713   }
714   return nullptr;
715 }
716 
717 
~PosixMemoryMappedFile()718 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
719   if (memory_) OS::Free(memory_, RoundUp(size_, OS::AllocatePageSize()));
720   fclose(file_);
721 }
722 
723 
GetCurrentProcessId()724 int OS::GetCurrentProcessId() {
725   return static_cast<int>(getpid());
726 }
727 
728 
GetCurrentThreadId()729 int OS::GetCurrentThreadId() {
730 #if V8_OS_DARWIN || (V8_OS_ANDROID && defined(__APPLE__))
731   return static_cast<int>(pthread_mach_thread_np(pthread_self()));
732 #elif V8_OS_LINUX
733   return static_cast<int>(syscall(__NR_gettid));
734 #elif V8_OS_ANDROID
735   return static_cast<int>(gettid());
736 #elif V8_OS_AIX
737   return static_cast<int>(thread_self());
738 #elif V8_OS_FUCHSIA
739   return static_cast<int>(zx_thread_self());
740 #elif V8_OS_SOLARIS
741   return static_cast<int>(pthread_self());
742 #else
743   return static_cast<int>(reinterpret_cast<intptr_t>(pthread_self()));
744 #endif
745 }
746 
ExitProcess(int exit_code)747 void OS::ExitProcess(int exit_code) {
748   // Use _exit instead of exit to avoid races between isolate
749   // threads and static destructors.
750   fflush(stdout);
751   fflush(stderr);
752   _exit(exit_code);
753 }
754 
755 // ----------------------------------------------------------------------------
756 // POSIX date/time support.
757 //
758 
759 #if !defined(V8_OS_FUCHSIA)
GetUserTime(uint32_t * secs,uint32_t * usecs)760 int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
761   struct rusage usage;
762 
763   if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
764   *secs = static_cast<uint32_t>(usage.ru_utime.tv_sec);
765   *usecs = static_cast<uint32_t>(usage.ru_utime.tv_usec);
766   return 0;
767 }
768 #endif
769 
TimeCurrentMillis()770 double OS::TimeCurrentMillis() {
771   return Time::Now().ToJsTime();
772 }
773 
DaylightSavingsOffset(double time)774 double PosixTimezoneCache::DaylightSavingsOffset(double time) {
775   if (std::isnan(time)) return std::numeric_limits<double>::quiet_NaN();
776   time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
777   struct tm tm;
778   struct tm* t = localtime_r(&tv, &tm);
779   if (nullptr == t) return std::numeric_limits<double>::quiet_NaN();
780   return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
781 }
782 
783 
GetLastError()784 int OS::GetLastError() {
785   return errno;
786 }
787 
788 
789 // ----------------------------------------------------------------------------
790 // POSIX stdio support.
791 //
792 
FOpen(const char * path,const char * mode)793 FILE* OS::FOpen(const char* path, const char* mode) {
794   FILE* file = fopen(path, mode);
795   if (file == nullptr) return nullptr;
796   struct stat file_stat;
797   if (fstat(fileno(file), &file_stat) != 0) {
798     fclose(file);
799     return nullptr;
800   }
801   bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
802   if (is_regular_file) return file;
803   fclose(file);
804   return nullptr;
805 }
806 
807 
Remove(const char * path)808 bool OS::Remove(const char* path) {
809   return (remove(path) == 0);
810 }
811 
DirectorySeparator()812 char OS::DirectorySeparator() { return '/'; }
813 
isDirectorySeparator(const char ch)814 bool OS::isDirectorySeparator(const char ch) {
815   return ch == DirectorySeparator();
816 }
817 
818 
OpenTemporaryFile()819 FILE* OS::OpenTemporaryFile() {
820   return tmpfile();
821 }
822 
823 const char* const OS::LogFileOpenMode = "w+";
824 
Print(const char * format,...)825 void OS::Print(const char* format, ...) {
826   va_list args;
827   va_start(args, format);
828   VPrint(format, args);
829   va_end(args);
830 }
831 
832 
VPrint(const char * format,va_list args)833 void OS::VPrint(const char* format, va_list args) {
834 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
835   __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
836 #else
837   vprintf(format, args);
838 #endif
839 }
840 
841 
FPrint(FILE * out,const char * format,...)842 void OS::FPrint(FILE* out, const char* format, ...) {
843   va_list args;
844   va_start(args, format);
845   VFPrint(out, format, args);
846   va_end(args);
847 }
848 
849 
VFPrint(FILE * out,const char * format,va_list args)850 void OS::VFPrint(FILE* out, const char* format, va_list args) {
851 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
852   __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
853 #else
854   vfprintf(out, format, args);
855 #endif
856 }
857 
858 
PrintError(const char * format,...)859 void OS::PrintError(const char* format, ...) {
860   va_list args;
861   va_start(args, format);
862   VPrintError(format, args);
863   va_end(args);
864 }
865 
866 
VPrintError(const char * format,va_list args)867 void OS::VPrintError(const char* format, va_list args) {
868 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
869   __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
870 #else
871   vfprintf(stderr, format, args);
872 #endif
873 }
874 
875 
SNPrintF(char * str,int length,const char * format,...)876 int OS::SNPrintF(char* str, int length, const char* format, ...) {
877   va_list args;
878   va_start(args, format);
879   int result = VSNPrintF(str, length, format, args);
880   va_end(args);
881   return result;
882 }
883 
884 
VSNPrintF(char * str,int length,const char * format,va_list args)885 int OS::VSNPrintF(char* str,
886                   int length,
887                   const char* format,
888                   va_list args) {
889   int n = vsnprintf(str, length, format, args);
890   if (n < 0 || n >= length) {
891     // If the length is zero, the assignment fails.
892     if (length > 0)
893       str[length - 1] = '\0';
894     return -1;
895   } else {
896     return n;
897   }
898 }
899 
900 
901 // ----------------------------------------------------------------------------
902 // POSIX string support.
903 //
904 
StrNCpy(char * dest,int length,const char * src,size_t n)905 void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
906   strncpy(dest, src, n);
907 }
908 
909 // ----------------------------------------------------------------------------
910 // POSIX Address space reservation support.
911 //
912 
913 #if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
914 
CreateSubReservation(void * address,size_t size,OS::MemoryPermission max_permission)915 Optional<AddressSpaceReservation> AddressSpaceReservation::CreateSubReservation(
916     void* address, size_t size, OS::MemoryPermission max_permission) {
917   DCHECK(Contains(address, size));
918   DCHECK_EQ(0, size % OS::AllocatePageSize());
919   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % OS::AllocatePageSize());
920 
921   return AddressSpaceReservation(address, size);
922 }
923 
FreeSubReservation(AddressSpaceReservation reservation)924 bool AddressSpaceReservation::FreeSubReservation(
925     AddressSpaceReservation reservation) {
926   // Nothing to do.
927   // Pages allocated inside the reservation must've already been freed.
928   return true;
929 }
930 
Allocate(void * address,size_t size,OS::MemoryPermission access)931 bool AddressSpaceReservation::Allocate(void* address, size_t size,
932                                        OS::MemoryPermission access) {
933   // The region is already mmap'ed, so it just has to be made accessible now.
934   DCHECK(Contains(address, size));
935   if (access == OS::MemoryPermission::kNoAccess) {
936     // Nothing to do. We don't want to call SetPermissions with kNoAccess here
937     // as that will for example mark the pages as discardable, which is
938     // probably not desired here.
939     return true;
940   }
941   return OS::SetPermissions(address, size, access);
942 }
943 
Free(void * address,size_t size)944 bool AddressSpaceReservation::Free(void* address, size_t size) {
945   DCHECK(Contains(address, size));
946   return OS::DecommitPages(address, size);
947 }
948 
949 // macOS specific implementation in platform-macos.cc.
950 #if !defined(V8_OS_MACOS)
AllocateShared(void * address,size_t size,OS::MemoryPermission access,PlatformSharedMemoryHandle handle,uint64_t offset)951 bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
952                                              OS::MemoryPermission access,
953                                              PlatformSharedMemoryHandle handle,
954                                              uint64_t offset) {
955   DCHECK(Contains(address, size));
956   int prot = GetProtectionFromMemoryPermission(access);
957   int fd = FileDescriptorFromSharedMemoryHandle(handle);
958   return mmap(address, size, prot, MAP_SHARED | MAP_FIXED, fd, offset) !=
959          MAP_FAILED;
960 }
961 #endif  // !defined(V8_OS_MACOS)
962 
FreeShared(void * address,size_t size)963 bool AddressSpaceReservation::FreeShared(void* address, size_t size) {
964   DCHECK(Contains(address, size));
965   return mmap(address, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE,
966               -1, 0) == address;
967 }
968 
SetPermissions(void * address,size_t size,OS::MemoryPermission access)969 bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
970                                              OS::MemoryPermission access) {
971   DCHECK(Contains(address, size));
972   return OS::SetPermissions(address, size, access);
973 }
974 
DiscardSystemPages(void * address,size_t size)975 bool AddressSpaceReservation::DiscardSystemPages(void* address, size_t size) {
976   DCHECK(Contains(address, size));
977   return OS::DiscardSystemPages(address, size);
978 }
979 
DecommitPages(void * address,size_t size)980 bool AddressSpaceReservation::DecommitPages(void* address, size_t size) {
981   DCHECK(Contains(address, size));
982   return OS::DecommitPages(address, size);
983 }
984 
985 #endif  // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
986 
987 // ----------------------------------------------------------------------------
988 // POSIX thread support.
989 //
990 
991 class Thread::PlatformData {
992  public:
PlatformData()993   PlatformData() : thread_(kNoThread) {}
994   pthread_t thread_;  // Thread handle for pthread.
995   // Synchronizes thread creation
996   Mutex thread_creation_mutex_;
997 };
998 
Thread(const Options & options)999 Thread::Thread(const Options& options)
1000     : data_(new PlatformData),
1001       stack_size_(options.stack_size()),
1002       start_semaphore_(nullptr) {
1003   const int min_stack_size = static_cast<int>(PTHREAD_STACK_MIN);
1004   if (stack_size_ > 0) stack_size_ = std::max(stack_size_, min_stack_size);
1005   set_name(options.name());
1006 }
1007 
1008 
~Thread()1009 Thread::~Thread() {
1010   delete data_;
1011 }
1012 
1013 
SetThreadName(const char * name)1014 static void SetThreadName(const char* name) {
1015 #if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD
1016   pthread_set_name_np(pthread_self(), name);
1017 #elif V8_OS_NETBSD
1018   STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
1019   pthread_setname_np(pthread_self(), "%s", name);
1020 #elif V8_OS_DARWIN
1021   // pthread_setname_np is only available in 10.6 or later, so test
1022   // for it at runtime.
1023   int (*dynamic_pthread_setname_np)(const char*);
1024   *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
1025     dlsym(RTLD_DEFAULT, "pthread_setname_np");
1026   if (dynamic_pthread_setname_np == nullptr) return;
1027 
1028   // Mac OS X does not expose the length limit of the name, so hardcode it.
1029   static const int kMaxNameLength = 63;
1030   STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
1031   dynamic_pthread_setname_np(name);
1032 #elif defined(PR_SET_NAME)
1033   prctl(PR_SET_NAME,
1034         reinterpret_cast<unsigned long>(name),  // NOLINT
1035         0, 0, 0);
1036 #endif
1037 }
1038 
1039 
ThreadEntry(void * arg)1040 static void* ThreadEntry(void* arg) {
1041   Thread* thread = reinterpret_cast<Thread*>(arg);
1042   // We take the lock here to make sure that pthread_create finished first since
1043   // we don't know which thread will run first (the original thread or the new
1044   // one).
1045   { MutexGuard lock_guard(&thread->data()->thread_creation_mutex_); }
1046   SetThreadName(thread->name());
1047   DCHECK_NE(thread->data()->thread_, kNoThread);
1048   thread->NotifyStartedAndRun();
1049   return nullptr;
1050 }
1051 
1052 
set_name(const char * name)1053 void Thread::set_name(const char* name) {
1054   strncpy(name_, name, sizeof(name_) - 1);
1055   name_[sizeof(name_) - 1] = '\0';
1056 }
1057 
Start()1058 bool Thread::Start() {
1059   int result;
1060   pthread_attr_t attr;
1061   memset(&attr, 0, sizeof(attr));
1062   result = pthread_attr_init(&attr);
1063   if (result != 0) return false;
1064   size_t stack_size = stack_size_;
1065   if (stack_size == 0) {
1066 #if V8_OS_DARWIN
1067     // Default on Mac OS X is 512kB -- bump up to 1MB
1068     stack_size = 1 * 1024 * 1024;
1069 #elif V8_OS_AIX
1070     // Default on AIX is 96kB -- bump up to 2MB
1071     stack_size = 2 * 1024 * 1024;
1072 #endif
1073   }
1074   if (stack_size > 0) {
1075     result = pthread_attr_setstacksize(&attr, stack_size);
1076     if (result != 0) return pthread_attr_destroy(&attr), false;
1077   }
1078   {
1079     MutexGuard lock_guard(&data_->thread_creation_mutex_);
1080     result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
1081     if (result != 0 || data_->thread_ == kNoThread) {
1082       return pthread_attr_destroy(&attr), false;
1083     }
1084   }
1085   result = pthread_attr_destroy(&attr);
1086   return result == 0;
1087 }
1088 
Join()1089 void Thread::Join() { pthread_join(data_->thread_, nullptr); }
1090 
PthreadKeyToLocalKey(pthread_key_t pthread_key)1091 static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
1092 #if V8_OS_CYGWIN
1093   // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
1094   // because pthread_key_t is a pointer type on Cygwin. This will probably not
1095   // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
1096   STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
1097   intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
1098   return static_cast<Thread::LocalStorageKey>(ptr_key);
1099 #else
1100   return static_cast<Thread::LocalStorageKey>(pthread_key);
1101 #endif
1102 }
1103 
1104 
LocalKeyToPthreadKey(Thread::LocalStorageKey local_key)1105 static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
1106 #if V8_OS_CYGWIN
1107   STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
1108   intptr_t ptr_key = static_cast<intptr_t>(local_key);
1109   return reinterpret_cast<pthread_key_t>(ptr_key);
1110 #else
1111   return static_cast<pthread_key_t>(local_key);
1112 #endif
1113 }
1114 
1115 
1116 #ifdef V8_FAST_TLS_SUPPORTED
1117 
1118 static std::atomic<bool> tls_base_offset_initialized{false};
1119 intptr_t kMacTlsBaseOffset = 0;
1120 
1121 // It's safe to do the initialization more that once, but it has to be
1122 // done at least once.
InitializeTlsBaseOffset()1123 static void InitializeTlsBaseOffset() {
1124   const size_t kBufferSize = 128;
1125   char buffer[kBufferSize];
1126   size_t buffer_size = kBufferSize;
1127   int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
1128   if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
1129     FATAL("V8 failed to get kernel version");
1130   }
1131   // The buffer now contains a string of the form XX.YY.ZZ, where
1132   // XX is the major kernel version component.
1133   // Make sure the buffer is 0-terminated.
1134   buffer[kBufferSize - 1] = '\0';
1135   char* period_pos = strchr(buffer, '.');
1136   *period_pos = '\0';
1137   int kernel_version_major = static_cast<int>(strtol(buffer, nullptr, 10));
1138   // The constants below are taken from pthreads.s from the XNU kernel
1139   // sources archive at www.opensource.apple.com.
1140   if (kernel_version_major < 11) {
1141     // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
1142     // same offsets.
1143 #if V8_HOST_ARCH_IA32
1144     kMacTlsBaseOffset = 0x48;
1145 #else
1146     kMacTlsBaseOffset = 0x60;
1147 #endif
1148   } else {
1149     // 11.x.x (Lion) changed the offset.
1150     kMacTlsBaseOffset = 0;
1151   }
1152 
1153   tls_base_offset_initialized.store(true, std::memory_order_release);
1154 }
1155 
1156 
CheckFastTls(Thread::LocalStorageKey key)1157 static void CheckFastTls(Thread::LocalStorageKey key) {
1158   void* expected = reinterpret_cast<void*>(0x1234CAFE);
1159   Thread::SetThreadLocal(key, expected);
1160   void* actual = Thread::GetExistingThreadLocal(key);
1161   if (expected != actual) {
1162     FATAL("V8 failed to initialize fast TLS on current kernel");
1163   }
1164   Thread::SetThreadLocal(key, nullptr);
1165 }
1166 
1167 #endif  // V8_FAST_TLS_SUPPORTED
1168 
1169 
CreateThreadLocalKey()1170 Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
1171 #ifdef V8_FAST_TLS_SUPPORTED
1172   bool check_fast_tls = false;
1173   if (!tls_base_offset_initialized.load(std::memory_order_acquire)) {
1174     check_fast_tls = true;
1175     InitializeTlsBaseOffset();
1176   }
1177 #endif
1178   pthread_key_t key;
1179   int result = pthread_key_create(&key, nullptr);
1180   DCHECK_EQ(0, result);
1181   USE(result);
1182   LocalStorageKey local_key = PthreadKeyToLocalKey(key);
1183 #ifdef V8_FAST_TLS_SUPPORTED
1184   // If we just initialized fast TLS support, make sure it works.
1185   if (check_fast_tls) CheckFastTls(local_key);
1186 #endif
1187   return local_key;
1188 }
1189 
1190 
DeleteThreadLocalKey(LocalStorageKey key)1191 void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
1192   pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
1193   int result = pthread_key_delete(pthread_key);
1194   DCHECK_EQ(0, result);
1195   USE(result);
1196 }
1197 
1198 
GetThreadLocal(LocalStorageKey key)1199 void* Thread::GetThreadLocal(LocalStorageKey key) {
1200   pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
1201   return pthread_getspecific(pthread_key);
1202 }
1203 
1204 
SetThreadLocal(LocalStorageKey key,void * value)1205 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
1206   pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
1207   int result = pthread_setspecific(pthread_key, value);
1208   DCHECK_EQ(0, result);
1209   USE(result);
1210 }
1211 
1212 // pthread_getattr_np used below is non portable (hence the _np suffix). We
1213 // keep this version in POSIX as most Linux-compatible derivatives will
1214 // support it. MacOS and FreeBSD are different here.
1215 #if !defined(V8_OS_FREEBSD) && !defined(V8_OS_DARWIN) && !defined(_AIX) && \
1216     !defined(V8_OS_SOLARIS)
1217 
1218 // static
GetStackStart()1219 Stack::StackSlot Stack::GetStackStart() {
1220   pthread_attr_t attr;
1221   int error = pthread_getattr_np(pthread_self(), &attr);
1222   if (!error) {
1223     void* base;
1224     size_t size;
1225     error = pthread_attr_getstack(&attr, &base, &size);
1226     CHECK(!error);
1227     pthread_attr_destroy(&attr);
1228     return reinterpret_cast<uint8_t*>(base) + size;
1229   }
1230 
1231 #if defined(V8_LIBC_GLIBC)
1232   // pthread_getattr_np can fail for the main thread. In this case
1233   // just like NaCl we rely on the __libc_stack_end to give us
1234   // the start of the stack.
1235   // See https://code.google.com/p/nativeclient/issues/detail?id=3431.
1236   return __libc_stack_end;
1237 #else
1238   return nullptr;
1239 #endif  // !defined(V8_LIBC_GLIBC)
1240 }
1241 
1242 #endif  // !defined(V8_OS_FREEBSD) && !defined(V8_OS_DARWIN) &&
1243         // !defined(_AIX) && !defined(V8_OS_SOLARIS)
1244 
1245 // static
GetCurrentStackPosition()1246 Stack::StackSlot Stack::GetCurrentStackPosition() {
1247   return __builtin_frame_address(0);
1248 }
1249 
1250 #undef LOG_TAG
1251 #undef MAP_ANONYMOUS
1252 #undef MADV_FREE
1253 
1254 }  // namespace base
1255 }  // namespace v8
1256