1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // Platform-specific code for Linux goes here. For the POSIX-compatible
6 // parts, the implementation is in platform-posix.cc.
7
8 #include <pthread.h>
9 #include <semaphore.h>
10 #include <signal.h>
11 #include <sys/prctl.h>
12 #include <sys/time.h>
13 #include <sys/resource.h>
14 #include <sys/syscall.h>
15 #include <sys/types.h>
16 #include <stdlib.h>
17
18 // Ubuntu Dapper requires memory pages to be marked as
19 // executable. Otherwise, OS raises an exception when executing code
20 // in that page.
21 #include <sys/types.h> // mmap & munmap
22 #include <sys/mman.h> // mmap & munmap
23 #include <sys/stat.h> // open
24 #include <fcntl.h> // open
25 #include <unistd.h> // sysconf
26 #include <strings.h> // index
27 #include <errno.h>
28 #include <stdarg.h>
29
30 // GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
31 // Old versions of the C library <signal.h> didn't define the type.
32 #if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
33 (defined(__arm__) || defined(__aarch64__)) && \
34 !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
35 #include <asm/sigcontext.h>
36 #endif
37
38 #if defined(LEAK_SANITIZER)
39 #include <sanitizer/lsan_interface.h>
40 #endif
41
42 #undef MAP_TYPE
43
44 #include "src/v8.h"
45
46 #include "src/platform.h"
47
48
49 namespace v8 {
50 namespace internal {
51
52
53 #ifdef __arm__
54
ArmUsingHardFloat()55 bool OS::ArmUsingHardFloat() {
56 // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
57 // the Floating Point ABI used (PCS stands for Procedure Call Standard).
58 // We use these as well as a couple of other defines to statically determine
59 // what FP ABI used.
60 // GCC versions 4.4 and below don't support hard-fp.
61 // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
62 // __ARM_PCS_VFP.
63
64 #define GCC_VERSION (__GNUC__ * 10000 \
65 + __GNUC_MINOR__ * 100 \
66 + __GNUC_PATCHLEVEL__)
67 #if GCC_VERSION >= 40600
68 #if defined(__ARM_PCS_VFP)
69 return true;
70 #else
71 return false;
72 #endif
73
74 #elif GCC_VERSION < 40500
75 return false;
76
77 #else
78 #if defined(__ARM_PCS_VFP)
79 return true;
80 #elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
81 !defined(__VFP_FP__)
82 return false;
83 #else
84 #error "Your version of GCC does not report the FP ABI compiled for." \
85 "Please report it on this issue" \
86 "http://code.google.com/p/v8/issues/detail?id=2140"
87
88 #endif
89 #endif
90 #undef GCC_VERSION
91 }
92
93 #endif // def __arm__
94
95
LocalTimezone(double time,TimezoneCache * cache)96 const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
97 if (std::isnan(time)) return "";
98 time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
99 struct tm* t = localtime(&tv);
100 if (NULL == t) return "";
101 return t->tm_zone;
102 }
103
104
LocalTimeOffset(TimezoneCache * cache)105 double OS::LocalTimeOffset(TimezoneCache* cache) {
106 time_t tv = time(NULL);
107 struct tm* t = localtime(&tv);
108 // tm_gmtoff includes any daylight savings offset, so subtract it.
109 return static_cast<double>(t->tm_gmtoff * msPerSecond -
110 (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
111 }
112
113
Allocate(const size_t requested,size_t * allocated,bool is_executable)114 void* OS::Allocate(const size_t requested,
115 size_t* allocated,
116 bool is_executable) {
117 const size_t msize = RoundUp(requested, AllocateAlignment());
118 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
119 void* addr = OS::GetRandomMmapAddr();
120 void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
121 if (mbase == MAP_FAILED) return NULL;
122 *allocated = msize;
123 return mbase;
124 }
125
126
127 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
128 public:
PosixMemoryMappedFile(FILE * file,void * memory,int size)129 PosixMemoryMappedFile(FILE* file, void* memory, int size)
130 : file_(file), memory_(memory), size_(size) { }
131 virtual ~PosixMemoryMappedFile();
memory()132 virtual void* memory() { return memory_; }
size()133 virtual int size() { return size_; }
134 private:
135 FILE* file_;
136 void* memory_;
137 int size_;
138 };
139
140
open(const char * name)141 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
142 FILE* file = fopen(name, "r+");
143 if (file == NULL) return NULL;
144
145 fseek(file, 0, SEEK_END);
146 int size = ftell(file);
147
148 void* memory =
149 mmap(OS::GetRandomMmapAddr(),
150 size,
151 PROT_READ | PROT_WRITE,
152 MAP_SHARED,
153 fileno(file),
154 0);
155 return new PosixMemoryMappedFile(file, memory, size);
156 }
157
158
create(const char * name,int size,void * initial)159 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
160 void* initial) {
161 FILE* file = fopen(name, "w+");
162 if (file == NULL) return NULL;
163 int result = fwrite(initial, size, 1, file);
164 if (result < 1) {
165 fclose(file);
166 return NULL;
167 }
168 void* memory =
169 mmap(OS::GetRandomMmapAddr(),
170 size,
171 PROT_READ | PROT_WRITE,
172 MAP_SHARED,
173 fileno(file),
174 0);
175 return new PosixMemoryMappedFile(file, memory, size);
176 }
177
178
~PosixMemoryMappedFile()179 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
180 if (memory_) OS::Free(memory_, size_);
181 fclose(file_);
182 }
183
184
GetSharedLibraryAddresses()185 std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
186 std::vector<SharedLibraryAddress> result;
187 // This function assumes that the layout of the file is as follows:
188 // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
189 // If we encounter an unexpected situation we abort scanning further entries.
190 FILE* fp = fopen("/proc/self/maps", "r");
191 if (fp == NULL) return result;
192
193 // Allocate enough room to be able to store a full file name.
194 const int kLibNameLen = FILENAME_MAX + 1;
195 char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
196
197 // This loop will terminate once the scanning hits an EOF.
198 while (true) {
199 uintptr_t start, end;
200 char attr_r, attr_w, attr_x, attr_p;
201 // Parse the addresses and permission bits at the beginning of the line.
202 if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
203 if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
204
205 int c;
206 if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
207 // Found a read-only executable entry. Skip characters until we reach
208 // the beginning of the filename or the end of the line.
209 do {
210 c = getc(fp);
211 } while ((c != EOF) && (c != '\n') && (c != '/') && (c != '['));
212 if (c == EOF) break; // EOF: Was unexpected, just exit.
213
214 // Process the filename if found.
215 if ((c == '/') || (c == '[')) {
216 // Push the '/' or '[' back into the stream to be read below.
217 ungetc(c, fp);
218
219 // Read to the end of the line. Exit if the read fails.
220 if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
221
222 // Drop the newline character read by fgets. We do not need to check
223 // for a zero-length string because we know that we at least read the
224 // '/' or '[' character.
225 lib_name[strlen(lib_name) - 1] = '\0';
226 } else {
227 // No library name found, just record the raw address range.
228 snprintf(lib_name, kLibNameLen,
229 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
230 }
231 result.push_back(SharedLibraryAddress(lib_name, start, end));
232 } else {
233 // Entry not describing executable data. Skip to end of line to set up
234 // reading the next entry.
235 do {
236 c = getc(fp);
237 } while ((c != EOF) && (c != '\n'));
238 if (c == EOF) break;
239 }
240 }
241 free(lib_name);
242 fclose(fp);
243 return result;
244 }
245
246
SignalCodeMovingGC()247 void OS::SignalCodeMovingGC() {
248 // Support for ll_prof.py.
249 //
250 // The Linux profiler built into the kernel logs all mmap's with
251 // PROT_EXEC so that analysis tools can properly attribute ticks. We
252 // do a mmap with a name known by ll_prof.py and immediately munmap
253 // it. This injects a GC marker into the stream of events generated
254 // by the kernel and allows us to synchronize V8 code log and the
255 // kernel log.
256 int size = sysconf(_SC_PAGESIZE);
257 FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
258 if (f == NULL) {
259 OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap);
260 OS::Abort();
261 }
262 void* addr = mmap(OS::GetRandomMmapAddr(),
263 size,
264 #if defined(__native_client__)
265 // The Native Client port of V8 uses an interpreter,
266 // so code pages don't need PROT_EXEC.
267 PROT_READ,
268 #else
269 PROT_READ | PROT_EXEC,
270 #endif
271 MAP_PRIVATE,
272 fileno(f),
273 0);
274 ASSERT(addr != MAP_FAILED);
275 OS::Free(addr, size);
276 fclose(f);
277 }
278
279
280 // Constants used for mmap.
281 static const int kMmapFd = -1;
282 static const int kMmapFdOffset = 0;
283
284
VirtualMemory()285 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
286
287
VirtualMemory(size_t size)288 VirtualMemory::VirtualMemory(size_t size)
289 : address_(ReserveRegion(size)), size_(size) { }
290
291
VirtualMemory(size_t size,size_t alignment)292 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
293 : address_(NULL), size_(0) {
294 ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
295 size_t request_size = RoundUp(size + alignment,
296 static_cast<intptr_t>(OS::AllocateAlignment()));
297 void* reservation = mmap(OS::GetRandomMmapAddr(),
298 request_size,
299 PROT_NONE,
300 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
301 kMmapFd,
302 kMmapFdOffset);
303 if (reservation == MAP_FAILED) return;
304
305 Address base = static_cast<Address>(reservation);
306 Address aligned_base = RoundUp(base, alignment);
307 ASSERT_LE(base, aligned_base);
308
309 // Unmap extra memory reserved before and after the desired block.
310 if (aligned_base != base) {
311 size_t prefix_size = static_cast<size_t>(aligned_base - base);
312 OS::Free(base, prefix_size);
313 request_size -= prefix_size;
314 }
315
316 size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
317 ASSERT_LE(aligned_size, request_size);
318
319 if (aligned_size != request_size) {
320 size_t suffix_size = request_size - aligned_size;
321 OS::Free(aligned_base + aligned_size, suffix_size);
322 request_size -= suffix_size;
323 }
324
325 ASSERT(aligned_size == request_size);
326
327 address_ = static_cast<void*>(aligned_base);
328 size_ = aligned_size;
329 #if defined(LEAK_SANITIZER)
330 __lsan_register_root_region(address_, size_);
331 #endif
332 }
333
334
~VirtualMemory()335 VirtualMemory::~VirtualMemory() {
336 if (IsReserved()) {
337 bool result = ReleaseRegion(address(), size());
338 ASSERT(result);
339 USE(result);
340 }
341 }
342
343
IsReserved()344 bool VirtualMemory::IsReserved() {
345 return address_ != NULL;
346 }
347
348
Reset()349 void VirtualMemory::Reset() {
350 address_ = NULL;
351 size_ = 0;
352 }
353
354
Commit(void * address,size_t size,bool is_executable)355 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
356 return CommitRegion(address, size, is_executable);
357 }
358
359
Uncommit(void * address,size_t size)360 bool VirtualMemory::Uncommit(void* address, size_t size) {
361 return UncommitRegion(address, size);
362 }
363
364
Guard(void * address)365 bool VirtualMemory::Guard(void* address) {
366 OS::Guard(address, OS::CommitPageSize());
367 return true;
368 }
369
370
ReserveRegion(size_t size)371 void* VirtualMemory::ReserveRegion(size_t size) {
372 void* result = mmap(OS::GetRandomMmapAddr(),
373 size,
374 PROT_NONE,
375 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
376 kMmapFd,
377 kMmapFdOffset);
378
379 if (result == MAP_FAILED) return NULL;
380
381 #if defined(LEAK_SANITIZER)
382 __lsan_register_root_region(result, size);
383 #endif
384 return result;
385 }
386
387
CommitRegion(void * base,size_t size,bool is_executable)388 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
389 #if defined(__native_client__)
390 // The Native Client port of V8 uses an interpreter,
391 // so code pages don't need PROT_EXEC.
392 int prot = PROT_READ | PROT_WRITE;
393 #else
394 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
395 #endif
396 if (MAP_FAILED == mmap(base,
397 size,
398 prot,
399 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
400 kMmapFd,
401 kMmapFdOffset)) {
402 return false;
403 }
404
405 return true;
406 }
407
408
UncommitRegion(void * base,size_t size)409 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
410 return mmap(base,
411 size,
412 PROT_NONE,
413 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
414 kMmapFd,
415 kMmapFdOffset) != MAP_FAILED;
416 }
417
418
ReleaseRegion(void * base,size_t size)419 bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
420 #if defined(LEAK_SANITIZER)
421 __lsan_unregister_root_region(base, size);
422 #endif
423 return munmap(base, size) == 0;
424 }
425
426
HasLazyCommits()427 bool VirtualMemory::HasLazyCommits() {
428 return true;
429 }
430
431 } } // namespace v8::internal
432