1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <fuchsia/kernel/cpp/fidl.h>
6 #include <lib/fdio/directory.h>
7 #include <lib/zx/resource.h>
8 #include <lib/zx/thread.h>
9 #include <lib/zx/vmar.h>
10 #include <lib/zx/vmo.h>
11
12 #include "src/base/bits.h"
13 #include "src/base/macros.h"
14 #include "src/base/platform/platform-posix-time.h"
15 #include "src/base/platform/platform-posix.h"
16 #include "src/base/platform/platform.h"
17
18 namespace v8 {
19 namespace base {
20
21 namespace {
22
23 static zx_handle_t g_vmex_resource = ZX_HANDLE_INVALID;
24
25 static void* g_root_vmar_base = nullptr;
26
27 #ifdef V8_USE_VMEX_RESOURCE
SetVmexResource()28 void SetVmexResource() {
29 DCHECK_EQ(g_vmex_resource, ZX_HANDLE_INVALID);
30 zx::resource vmex_resource;
31 fuchsia::kernel::VmexResourceSyncPtr vmex_resource_svc;
32 zx_status_t status = fdio_service_connect(
33 "/svc/fuchsia.kernel.VmexResource",
34 vmex_resource_svc.NewRequest().TakeChannel().release());
35 DCHECK_EQ(status, ZX_OK);
36 status = vmex_resource_svc->Get(&vmex_resource);
37 USE(status);
38 DCHECK_EQ(status, ZX_OK);
39 DCHECK(vmex_resource.is_valid());
40 g_vmex_resource = vmex_resource.release();
41 }
42 #endif
43
GetProtectionFromMemoryPermission(OS::MemoryPermission access)44 zx_vm_option_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
45 switch (access) {
46 case OS::MemoryPermission::kNoAccess:
47 case OS::MemoryPermission::kNoAccessWillJitLater:
48 return 0; // no permissions
49 case OS::MemoryPermission::kRead:
50 return ZX_VM_PERM_READ;
51 case OS::MemoryPermission::kReadWrite:
52 return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
53 case OS::MemoryPermission::kReadWriteExecute:
54 return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE;
55 case OS::MemoryPermission::kReadExecute:
56 return ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE;
57 }
58 UNREACHABLE();
59 }
60
61 // Determine ZX_VM_ALIGN_X constant corresponding to the specified alignment.
62 // Returns 0 if there is none.
GetAlignmentOptionFromAlignment(size_t alignment)63 zx_vm_option_t GetAlignmentOptionFromAlignment(size_t alignment) {
64 // The alignment must be one of the ZX_VM_ALIGN_X constants.
65 // See zircon/system/public/zircon/types.h.
66 static_assert(
67 ZX_VM_ALIGN_1KB == (10 << ZX_VM_ALIGN_BASE),
68 "Fuchsia's ZX_VM_ALIGN_1KB constant doesn't match expected value");
69 static_assert(
70 ZX_VM_ALIGN_4GB == (32 << ZX_VM_ALIGN_BASE),
71 "Fuchsia's ZX_VM_ALIGN_4GB constant doesn't match expected value");
72 zx_vm_option_t alignment_log2 = 0;
73 for (int shift = 10; shift <= 32; shift++) {
74 if (alignment == (size_t{1} << shift)) {
75 alignment_log2 = shift;
76 break;
77 }
78 }
79 return alignment_log2 << ZX_VM_ALIGN_BASE;
80 }
81
82 enum class PlacementMode {
83 // Attempt to place the object at the provided address, otherwise elsewhere.
84 kUseHint,
85 // Place the object anywhere it fits.
86 kAnywhere,
87 // Place the object at the provided address, otherwise fail.
88 kFixed
89 };
90
MapVmo(const zx::vmar & vmar,void * vmar_base,size_t page_size,void * address,const zx::vmo & vmo,uint64_t offset,PlacementMode placement,size_t size,size_t alignment,OS::MemoryPermission access)91 void* MapVmo(const zx::vmar& vmar, void* vmar_base, size_t page_size,
92 void* address, const zx::vmo& vmo, uint64_t offset,
93 PlacementMode placement, size_t size, size_t alignment,
94 OS::MemoryPermission access) {
95 DCHECK_EQ(0, size % page_size);
96 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
97 DCHECK_IMPLIES(placement != PlacementMode::kAnywhere, address != nullptr);
98
99 zx_vm_option_t options = GetProtectionFromMemoryPermission(access);
100
101 zx_vm_option_t alignment_option = GetAlignmentOptionFromAlignment(alignment);
102 CHECK_NE(0, alignment_option); // Invalid alignment specified
103 options |= alignment_option;
104
105 size_t vmar_offset = 0;
106 if (placement != PlacementMode::kAnywhere) {
107 // Try placing the mapping at the specified address.
108 uintptr_t target_addr = reinterpret_cast<uintptr_t>(address);
109 uintptr_t base = reinterpret_cast<uintptr_t>(vmar_base);
110 DCHECK_GE(target_addr, base);
111 vmar_offset = target_addr - base;
112 options |= ZX_VM_SPECIFIC;
113 }
114
115 zx_vaddr_t result;
116 zx_status_t status = vmar.map(options, vmar_offset, vmo, 0, size, &result);
117
118 if (status != ZX_OK && placement == PlacementMode::kUseHint) {
119 // If a placement hint was specified but couldn't be used (for example,
120 // because the offset overlapped another mapping), then retry again without
121 // a vmar_offset to let the kernel pick another location.
122 options &= ~(ZX_VM_SPECIFIC);
123 status = vmar.map(options, 0, vmo, 0, size, &result);
124 }
125
126 if (status != ZX_OK) {
127 return nullptr;
128 }
129
130 return reinterpret_cast<void*>(result);
131 }
132
CreateAndMapVmo(const zx::vmar & vmar,void * vmar_base,size_t page_size,void * address,PlacementMode placement,size_t size,size_t alignment,OS::MemoryPermission access)133 void* CreateAndMapVmo(const zx::vmar& vmar, void* vmar_base, size_t page_size,
134 void* address, PlacementMode placement, size_t size,
135 size_t alignment, OS::MemoryPermission access) {
136 zx::vmo vmo;
137 if (zx::vmo::create(size, 0, &vmo) != ZX_OK) {
138 return nullptr;
139 }
140 static const char kVirtualMemoryName[] = "v8-virtualmem";
141 vmo.set_property(ZX_PROP_NAME, kVirtualMemoryName,
142 strlen(kVirtualMemoryName));
143
144 // Always call zx_vmo_replace_as_executable() in case the memory will need
145 // to be marked as executable in the future.
146 // TOOD(https://crbug.com/v8/8899): Only call this when we know that the
147 // region will need to be marked as executable in the future.
148 zx::unowned_resource vmex(g_vmex_resource);
149 if (vmo.replace_as_executable(*vmex, &vmo) != ZX_OK) {
150 return nullptr;
151 }
152
153 return MapVmo(vmar, vmar_base, page_size, address, vmo, 0, placement, size,
154 alignment, access);
155 }
156
UnmapVmo(const zx::vmar & vmar,size_t page_size,void * address,size_t size)157 bool UnmapVmo(const zx::vmar& vmar, size_t page_size, void* address,
158 size_t size) {
159 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
160 DCHECK_EQ(0, size % page_size);
161 return vmar.unmap(reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
162 }
163
SetPermissionsInternal(const zx::vmar & vmar,size_t page_size,void * address,size_t size,OS::MemoryPermission access)164 bool SetPermissionsInternal(const zx::vmar& vmar, size_t page_size,
165 void* address, size_t size,
166 OS::MemoryPermission access) {
167 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
168 DCHECK_EQ(0, size % page_size);
169 uint32_t prot = GetProtectionFromMemoryPermission(access);
170 return vmar.protect(prot, reinterpret_cast<uintptr_t>(address), size) ==
171 ZX_OK;
172 }
173
DiscardSystemPagesInternal(const zx::vmar & vmar,size_t page_size,void * address,size_t size)174 bool DiscardSystemPagesInternal(const zx::vmar& vmar, size_t page_size,
175 void* address, size_t size) {
176 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
177 DCHECK_EQ(0, size % page_size);
178 uint64_t address_int = reinterpret_cast<uint64_t>(address);
179 return vmar.op_range(ZX_VMO_OP_DECOMMIT, address_int, size, nullptr, 0) ==
180 ZX_OK;
181 }
182
CreateAddressSpaceReservationInternal(const zx::vmar & vmar,void * vmar_base,size_t page_size,void * address,PlacementMode placement,size_t size,size_t alignment,OS::MemoryPermission max_permission,zx::vmar * child,zx_vaddr_t * child_addr)183 zx_status_t CreateAddressSpaceReservationInternal(
184 const zx::vmar& vmar, void* vmar_base, size_t page_size, void* address,
185 PlacementMode placement, size_t size, size_t alignment,
186 OS::MemoryPermission max_permission, zx::vmar* child,
187 zx_vaddr_t* child_addr) {
188 DCHECK_EQ(0, size % page_size);
189 DCHECK_EQ(0, alignment % page_size);
190 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % alignment);
191 DCHECK_IMPLIES(placement != PlacementMode::kAnywhere, address != nullptr);
192
193 // TODO(v8) determine these based on max_permission.
194 zx_vm_option_t options = ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
195 ZX_VM_CAN_MAP_EXECUTE | ZX_VM_CAN_MAP_SPECIFIC;
196
197 zx_vm_option_t alignment_option = GetAlignmentOptionFromAlignment(alignment);
198 CHECK_NE(0, alignment_option); // Invalid alignment specified
199 options |= alignment_option;
200
201 size_t vmar_offset = 0;
202 if (placement != PlacementMode::kAnywhere) {
203 // Try placing the mapping at the specified address.
204 uintptr_t target_addr = reinterpret_cast<uintptr_t>(address);
205 uintptr_t base = reinterpret_cast<uintptr_t>(vmar_base);
206 DCHECK_GE(target_addr, base);
207 vmar_offset = target_addr - base;
208 options |= ZX_VM_SPECIFIC;
209 }
210
211 zx_status_t status =
212 vmar.allocate(options, vmar_offset, size, child, child_addr);
213 if (status != ZX_OK && placement == PlacementMode::kUseHint) {
214 // If a placement hint was specified but couldn't be used (for example,
215 // because the offset overlapped another mapping), then retry again without
216 // a vmar_offset to let the kernel pick another location.
217 options &= ~(ZX_VM_SPECIFIC);
218 status = vmar.allocate(options, 0, size, child, child_addr);
219 }
220
221 return status;
222 }
223
224 } // namespace
225
CreateTimezoneCache()226 TimezoneCache* OS::CreateTimezoneCache() {
227 return new PosixDefaultTimezoneCache();
228 }
229
230 // static
Initialize(bool hard_abort,const char * const gc_fake_mmap)231 void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
232 PosixInitializeCommon(hard_abort, gc_fake_mmap);
233
234 // Determine base address of root VMAR.
235 zx_info_vmar_t info;
236 zx_status_t status = zx::vmar::root_self()->get_info(
237 ZX_INFO_VMAR, &info, sizeof(info), nullptr, nullptr);
238 CHECK_EQ(ZX_OK, status);
239 g_root_vmar_base = reinterpret_cast<void*>(info.base);
240
241 #ifdef V8_USE_VMEX_RESOURCE
242 SetVmexResource();
243 #endif
244 }
245
246 // static
Allocate(void * address,size_t size,size_t alignment,MemoryPermission access)247 void* OS::Allocate(void* address, size_t size, size_t alignment,
248 MemoryPermission access) {
249 PlacementMode placement =
250 address != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere;
251 return CreateAndMapVmo(*zx::vmar::root_self(), g_root_vmar_base,
252 AllocatePageSize(), address, placement, size,
253 alignment, access);
254 }
255
256 // static
Free(void * address,size_t size)257 void OS::Free(void* address, size_t size) {
258 CHECK(UnmapVmo(*zx::vmar::root_self(), AllocatePageSize(), address, size));
259 }
260
261 // static
AllocateShared(void * address,size_t size,OS::MemoryPermission access,PlatformSharedMemoryHandle handle,uint64_t offset)262 void* OS::AllocateShared(void* address, size_t size,
263 OS::MemoryPermission access,
264 PlatformSharedMemoryHandle handle, uint64_t offset) {
265 PlacementMode placement =
266 address != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere;
267 zx::unowned_vmo vmo(VMOFromSharedMemoryHandle(handle));
268 return MapVmo(*zx::vmar::root_self(), g_root_vmar_base, AllocatePageSize(),
269 address, *vmo, offset, placement, size, AllocatePageSize(),
270 access);
271 }
272
273 // static
FreeShared(void * address,size_t size)274 void OS::FreeShared(void* address, size_t size) {
275 CHECK(UnmapVmo(*zx::vmar::root_self(), AllocatePageSize(), address, size));
276 }
277
278 // static
Release(void * address,size_t size)279 void OS::Release(void* address, size_t size) { Free(address, size); }
280
281 // static
SetPermissions(void * address,size_t size,MemoryPermission access)282 bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
283 return SetPermissionsInternal(*zx::vmar::root_self(), CommitPageSize(),
284 address, size, access);
285 }
286
287 // static
DiscardSystemPages(void * address,size_t size)288 bool OS::DiscardSystemPages(void* address, size_t size) {
289 return DiscardSystemPagesInternal(*zx::vmar::root_self(), CommitPageSize(),
290 address, size);
291 }
292
293 // static
DecommitPages(void * address,size_t size)294 bool OS::DecommitPages(void* address, size_t size) {
295 // We rely on DiscardSystemPages decommitting the pages immediately (via
296 // ZX_VMO_OP_DECOMMIT) so that they are guaranteed to be zero-initialized
297 // should they be accessed again later on.
298 return SetPermissions(address, size, MemoryPermission::kNoAccess) &&
299 DiscardSystemPages(address, size);
300 }
301
302 // static
CanReserveAddressSpace()303 bool OS::CanReserveAddressSpace() { return true; }
304
305 // static
CreateAddressSpaceReservation(void * hint,size_t size,size_t alignment,MemoryPermission max_permission)306 Optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
307 void* hint, size_t size, size_t alignment,
308 MemoryPermission max_permission) {
309 DCHECK_EQ(0, reinterpret_cast<Address>(hint) % alignment);
310 zx::vmar child;
311 zx_vaddr_t child_addr;
312 PlacementMode placement =
313 hint != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere;
314 zx_status_t status = CreateAddressSpaceReservationInternal(
315 *zx::vmar::root_self(), g_root_vmar_base, AllocatePageSize(), hint,
316 placement, size, alignment, max_permission, &child, &child_addr);
317 if (status != ZX_OK) return {};
318 return AddressSpaceReservation(reinterpret_cast<void*>(child_addr), size,
319 child.release());
320 }
321
322 // static
FreeAddressSpaceReservation(AddressSpaceReservation reservation)323 void OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
324 // Destroy the vmar and release the handle.
325 zx::vmar vmar(reservation.vmar_);
326 CHECK_EQ(ZX_OK, vmar.destroy());
327 }
328
329 // static
CreateSharedMemoryHandleForTesting(size_t size)330 PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) {
331 zx::vmo vmo;
332 if (zx::vmo::create(size, 0, &vmo) != ZX_OK) {
333 return kInvalidSharedMemoryHandle;
334 }
335 return SharedMemoryHandleFromVMO(vmo.release());
336 }
337
338 // static
DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle)339 void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) {
340 DCHECK_NE(kInvalidSharedMemoryHandle, handle);
341 zx_handle_t vmo = VMOFromSharedMemoryHandle(handle);
342 zx_handle_close(vmo);
343 }
344
345 // static
HasLazyCommits()346 bool OS::HasLazyCommits() { return true; }
347
GetSharedLibraryAddresses()348 std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
349 UNREACHABLE(); // TODO(scottmg): Port, https://crbug.com/731217.
350 }
351
SignalCodeMovingGC()352 void OS::SignalCodeMovingGC() {
353 UNREACHABLE(); // TODO(scottmg): Port, https://crbug.com/731217.
354 }
355
GetUserTime(uint32_t * secs,uint32_t * usecs)356 int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
357 const auto kNanosPerMicrosecond = 1000ULL;
358 const auto kMicrosPerSecond = 1000000ULL;
359
360 zx_info_thread_stats_t info = {};
361 if (zx::thread::self()->get_info(ZX_INFO_THREAD_STATS, &info, sizeof(info),
362 nullptr, nullptr) != ZX_OK) {
363 return -1;
364 }
365
366 // First convert to microseconds, rounding up.
367 const uint64_t micros_since_thread_started =
368 (info.total_runtime + kNanosPerMicrosecond - 1ULL) / kNanosPerMicrosecond;
369
370 *secs = static_cast<uint32_t>(micros_since_thread_started / kMicrosPerSecond);
371 *usecs =
372 static_cast<uint32_t>(micros_since_thread_started % kMicrosPerSecond);
373 return 0;
374 }
375
AdjustSchedulingParams()376 void OS::AdjustSchedulingParams() {}
377
GetFreeMemoryRangesWithin(OS::Address boundary_start,OS::Address boundary_end,size_t minimum_size,size_t alignment)378 std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
379 OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size,
380 size_t alignment) {
381 return {};
382 }
383
CreateSubReservation(void * address,size_t size,OS::MemoryPermission max_permission)384 Optional<AddressSpaceReservation> AddressSpaceReservation::CreateSubReservation(
385 void* address, size_t size, OS::MemoryPermission max_permission) {
386 DCHECK(Contains(address, size));
387
388 zx::vmar child;
389 zx_vaddr_t child_addr;
390 zx_status_t status = CreateAddressSpaceReservationInternal(
391 *zx::unowned_vmar(vmar_), base(), OS::AllocatePageSize(), address,
392 PlacementMode::kFixed, size, OS::AllocatePageSize(), max_permission,
393 &child, &child_addr);
394 if (status != ZX_OK) return {};
395 DCHECK_EQ(reinterpret_cast<void*>(child_addr), address);
396 return AddressSpaceReservation(reinterpret_cast<void*>(child_addr), size,
397 child.release());
398 }
399
FreeSubReservation(AddressSpaceReservation reservation)400 bool AddressSpaceReservation::FreeSubReservation(
401 AddressSpaceReservation reservation) {
402 OS::FreeAddressSpaceReservation(reservation);
403 return true;
404 }
405
Allocate(void * address,size_t size,OS::MemoryPermission access)406 bool AddressSpaceReservation::Allocate(void* address, size_t size,
407 OS::MemoryPermission access) {
408 DCHECK(Contains(address, size));
409 void* allocation = CreateAndMapVmo(
410 *zx::unowned_vmar(vmar_), base(), OS::AllocatePageSize(), address,
411 PlacementMode::kFixed, size, OS::AllocatePageSize(), access);
412 DCHECK(!allocation || allocation == address);
413 return allocation != nullptr;
414 }
415
Free(void * address,size_t size)416 bool AddressSpaceReservation::Free(void* address, size_t size) {
417 DCHECK(Contains(address, size));
418 return UnmapVmo(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address,
419 size);
420 }
421
AllocateShared(void * address,size_t size,OS::MemoryPermission access,PlatformSharedMemoryHandle handle,uint64_t offset)422 bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
423 OS::MemoryPermission access,
424 PlatformSharedMemoryHandle handle,
425 uint64_t offset) {
426 DCHECK(Contains(address, size));
427 zx::unowned_vmo vmo(VMOFromSharedMemoryHandle(handle));
428 return MapVmo(*zx::unowned_vmar(vmar_), base(), OS::AllocatePageSize(),
429 address, *vmo, offset, PlacementMode::kFixed, size,
430 OS::AllocatePageSize(), access);
431 }
432
FreeShared(void * address,size_t size)433 bool AddressSpaceReservation::FreeShared(void* address, size_t size) {
434 DCHECK(Contains(address, size));
435 return UnmapVmo(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address,
436 size);
437 }
438
SetPermissions(void * address,size_t size,OS::MemoryPermission access)439 bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
440 OS::MemoryPermission access) {
441 DCHECK(Contains(address, size));
442 return SetPermissionsInternal(*zx::unowned_vmar(vmar_), OS::CommitPageSize(),
443 address, size, access);
444 }
445
DiscardSystemPages(void * address,size_t size)446 bool AddressSpaceReservation::DiscardSystemPages(void* address, size_t size) {
447 DCHECK(Contains(address, size));
448 return DiscardSystemPagesInternal(*zx::unowned_vmar(vmar_),
449 OS::CommitPageSize(), address, size);
450 }
451
DecommitPages(void * address,size_t size)452 bool AddressSpaceReservation::DecommitPages(void* address, size_t size) {
453 DCHECK(Contains(address, size));
454 // See comment in OS::DecommitPages.
455 return SetPermissions(address, size, OS::MemoryPermission::kNoAccess) &&
456 DiscardSystemPages(address, size);
457 }
458
459 } // namespace base
460 } // namespace v8
461