• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // Platform-specific code for MacOS goes here. For the POSIX-compatible
6 // parts, the implementation is in platform-posix.cc.
7 
8 #include <dlfcn.h>
9 #include <mach/mach_init.h>
10 #include <mach-o/dyld.h>
11 #include <mach-o/getsect.h>
12 #include <sys/mman.h>
13 #include <unistd.h>
14 
15 #include <AvailabilityMacros.h>
16 
17 #include <errno.h>
18 #include <libkern/OSAtomic.h>
19 #include <mach/mach.h>
20 #include <mach/semaphore.h>
21 #include <mach/task.h>
22 #include <mach/vm_statistics.h>
23 #include <pthread.h>
24 #include <semaphore.h>
25 #include <signal.h>
26 #include <stdarg.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <sys/resource.h>
30 #include <sys/sysctl.h>
31 #include <sys/time.h>
32 #include <sys/types.h>
33 
34 #include <cmath>
35 
36 #undef MAP_TYPE
37 
38 #include "src/base/macros.h"
39 #include "src/base/platform/platform.h"
40 
41 
42 namespace v8 {
43 namespace base {
44 
45 
46 // Constants used for mmap.
47 // kMmapFd is used to pass vm_alloc flags to tag the region with the user
48 // defined tag 255 This helps identify V8-allocated regions in memory analysis
49 // tools like vmmap(1).
50 static const int kMmapFd = VM_MAKE_TAG(255);
51 static const off_t kMmapFdOffset = 0;
52 
53 
Allocate(const size_t requested,size_t * allocated,bool is_executable)54 void* OS::Allocate(const size_t requested,
55                    size_t* allocated,
56                    bool is_executable) {
57   const size_t msize = RoundUp(requested, getpagesize());
58   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
59   void* mbase = mmap(OS::GetRandomMmapAddr(),
60                      msize,
61                      prot,
62                      MAP_PRIVATE | MAP_ANON,
63                      kMmapFd,
64                      kMmapFdOffset);
65   if (mbase == MAP_FAILED) return NULL;
66   *allocated = msize;
67   return mbase;
68 }
69 
70 
71 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
72  public:
PosixMemoryMappedFile(FILE * file,void * memory,int size)73   PosixMemoryMappedFile(FILE* file, void* memory, int size)
74     : file_(file), memory_(memory), size_(size) { }
75   virtual ~PosixMemoryMappedFile();
memory()76   virtual void* memory() { return memory_; }
size()77   virtual int size() { return size_; }
78  private:
79   FILE* file_;
80   void* memory_;
81   int size_;
82 };
83 
84 
open(const char * name)85 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
86   FILE* file = fopen(name, "r+");
87   if (file == NULL) return NULL;
88 
89   fseek(file, 0, SEEK_END);
90   int size = ftell(file);
91 
92   void* memory =
93       mmap(OS::GetRandomMmapAddr(),
94            size,
95            PROT_READ | PROT_WRITE,
96            MAP_SHARED,
97            fileno(file),
98            0);
99   return new PosixMemoryMappedFile(file, memory, size);
100 }
101 
102 
create(const char * name,int size,void * initial)103 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
104     void* initial) {
105   FILE* file = fopen(name, "w+");
106   if (file == NULL) return NULL;
107   int result = fwrite(initial, size, 1, file);
108   if (result < 1) {
109     fclose(file);
110     return NULL;
111   }
112   void* memory =
113       mmap(OS::GetRandomMmapAddr(),
114           size,
115           PROT_READ | PROT_WRITE,
116           MAP_SHARED,
117           fileno(file),
118           0);
119   return new PosixMemoryMappedFile(file, memory, size);
120 }
121 
122 
~PosixMemoryMappedFile()123 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
124   if (memory_) OS::Free(memory_, size_);
125   fclose(file_);
126 }
127 
128 
GetSharedLibraryAddresses()129 std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
130   std::vector<SharedLibraryAddress> result;
131   unsigned int images_count = _dyld_image_count();
132   for (unsigned int i = 0; i < images_count; ++i) {
133     const mach_header* header = _dyld_get_image_header(i);
134     if (header == NULL) continue;
135 #if V8_HOST_ARCH_X64
136     uint64_t size;
137     char* code_ptr = getsectdatafromheader_64(
138         reinterpret_cast<const mach_header_64*>(header),
139         SEG_TEXT,
140         SECT_TEXT,
141         &size);
142 #else
143     unsigned int size;
144     char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
145 #endif
146     if (code_ptr == NULL) continue;
147     const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
148     const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
149     result.push_back(
150         SharedLibraryAddress(_dyld_get_image_name(i), start, start + size));
151   }
152   return result;
153 }
154 
155 
SignalCodeMovingGC()156 void OS::SignalCodeMovingGC() {
157 }
158 
159 
LocalTimezone(double time,TimezoneCache * cache)160 const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
161   if (std::isnan(time)) return "";
162   time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
163   struct tm* t = localtime(&tv);
164   if (NULL == t) return "";
165   return t->tm_zone;
166 }
167 
168 
LocalTimeOffset(TimezoneCache * cache)169 double OS::LocalTimeOffset(TimezoneCache* cache) {
170   time_t tv = time(NULL);
171   struct tm* t = localtime(&tv);
172   // tm_gmtoff includes any daylight savings offset, so subtract it.
173   return static_cast<double>(t->tm_gmtoff * msPerSecond -
174                              (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
175 }
176 
177 
VirtualMemory()178 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
179 
180 
VirtualMemory(size_t size)181 VirtualMemory::VirtualMemory(size_t size)
182     : address_(ReserveRegion(size)), size_(size) { }
183 
184 
VirtualMemory(size_t size,size_t alignment)185 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
186     : address_(NULL), size_(0) {
187   DCHECK((alignment % OS::AllocateAlignment()) == 0);
188   size_t request_size = RoundUp(size + alignment,
189                                 static_cast<intptr_t>(OS::AllocateAlignment()));
190   void* reservation = mmap(OS::GetRandomMmapAddr(),
191                            request_size,
192                            PROT_NONE,
193                            MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
194                            kMmapFd,
195                            kMmapFdOffset);
196   if (reservation == MAP_FAILED) return;
197 
198   uint8_t* base = static_cast<uint8_t*>(reservation);
199   uint8_t* aligned_base = RoundUp(base, alignment);
200   DCHECK_LE(base, aligned_base);
201 
202   // Unmap extra memory reserved before and after the desired block.
203   if (aligned_base != base) {
204     size_t prefix_size = static_cast<size_t>(aligned_base - base);
205     OS::Free(base, prefix_size);
206     request_size -= prefix_size;
207   }
208 
209   size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
210   DCHECK_LE(aligned_size, request_size);
211 
212   if (aligned_size != request_size) {
213     size_t suffix_size = request_size - aligned_size;
214     OS::Free(aligned_base + aligned_size, suffix_size);
215     request_size -= suffix_size;
216   }
217 
218   DCHECK(aligned_size == request_size);
219 
220   address_ = static_cast<void*>(aligned_base);
221   size_ = aligned_size;
222 }
223 
224 
~VirtualMemory()225 VirtualMemory::~VirtualMemory() {
226   if (IsReserved()) {
227     bool result = ReleaseRegion(address(), size());
228     DCHECK(result);
229     USE(result);
230   }
231 }
232 
233 
IsReserved()234 bool VirtualMemory::IsReserved() {
235   return address_ != NULL;
236 }
237 
238 
Reset()239 void VirtualMemory::Reset() {
240   address_ = NULL;
241   size_ = 0;
242 }
243 
244 
Commit(void * address,size_t size,bool is_executable)245 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
246   return CommitRegion(address, size, is_executable);
247 }
248 
249 
Uncommit(void * address,size_t size)250 bool VirtualMemory::Uncommit(void* address, size_t size) {
251   return UncommitRegion(address, size);
252 }
253 
254 
Guard(void * address)255 bool VirtualMemory::Guard(void* address) {
256   OS::Guard(address, OS::CommitPageSize());
257   return true;
258 }
259 
260 
ReserveRegion(size_t size)261 void* VirtualMemory::ReserveRegion(size_t size) {
262   void* result = mmap(OS::GetRandomMmapAddr(),
263                       size,
264                       PROT_NONE,
265                       MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
266                       kMmapFd,
267                       kMmapFdOffset);
268 
269   if (result == MAP_FAILED) return NULL;
270 
271   return result;
272 }
273 
274 
CommitRegion(void * address,size_t size,bool is_executable)275 bool VirtualMemory::CommitRegion(void* address,
276                                  size_t size,
277                                  bool is_executable) {
278   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
279   if (MAP_FAILED == mmap(address,
280                          size,
281                          prot,
282                          MAP_PRIVATE | MAP_ANON | MAP_FIXED,
283                          kMmapFd,
284                          kMmapFdOffset)) {
285     return false;
286   }
287   return true;
288 }
289 
290 
UncommitRegion(void * address,size_t size)291 bool VirtualMemory::UncommitRegion(void* address, size_t size) {
292   return mmap(address,
293               size,
294               PROT_NONE,
295               MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
296               kMmapFd,
297               kMmapFdOffset) != MAP_FAILED;
298 }
299 
300 
ReleaseRegion(void * address,size_t size)301 bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
302   return munmap(address, size) == 0;
303 }
304 
305 
HasLazyCommits()306 bool VirtualMemory::HasLazyCommits() {
307   return false;
308 }
309 
310 } }  // namespace v8::base
311