• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "v8config.h"  // NOLINT(build/include_directory)
6 
7 #if !defined(CPPGC_CAGED_HEAP)
8 #error "Must be compiled with caged heap enabled"
9 #endif
10 
11 #include "src/heap/cppgc/caged-heap.h"
12 
13 #include "include/cppgc/internal/caged-heap-local-data.h"
14 #include "include/cppgc/platform.h"
15 #include "src/base/bounded-page-allocator.h"
16 #include "src/base/logging.h"
17 #include "src/base/platform/platform.h"
18 #include "src/heap/cppgc/globals.h"
19 
20 namespace cppgc {
21 namespace internal {
22 
23 STATIC_ASSERT(api_constants::kCagedHeapReservationSize ==
24               kCagedHeapReservationSize);
25 STATIC_ASSERT(api_constants::kCagedHeapReservationAlignment ==
26               kCagedHeapReservationAlignment);
27 
28 namespace {
29 
ReserveCagedHeap(PageAllocator & platform_allocator)30 VirtualMemory ReserveCagedHeap(PageAllocator& platform_allocator) {
31   DCHECK_EQ(0u,
32             kCagedHeapReservationSize % platform_allocator.AllocatePageSize());
33 
34   static constexpr size_t kAllocationTries = 4;
35   for (size_t i = 0; i < kAllocationTries; ++i) {
36     void* hint = reinterpret_cast<void*>(RoundDown(
37         reinterpret_cast<uintptr_t>(platform_allocator.GetRandomMmapAddr()),
38         kCagedHeapReservationAlignment));
39 
40     VirtualMemory memory(&platform_allocator, kCagedHeapReservationSize,
41                          kCagedHeapReservationAlignment, hint);
42     if (memory.IsReserved()) return memory;
43   }
44 
45   FATAL("Fatal process out of memory: Failed to reserve memory for caged heap");
46   UNREACHABLE();
47 }
48 
49 }  // namespace
50 
CagedHeap(HeapBase & heap_base,PageAllocator & platform_allocator)51 CagedHeap::CagedHeap(HeapBase& heap_base, PageAllocator& platform_allocator)
52     : reserved_area_(ReserveCagedHeap(platform_allocator)) {
53   using CagedAddress = CagedHeap::AllocatorType::Address;
54 
55   const bool is_not_oom = platform_allocator.SetPermissions(
56       reserved_area_.address(),
57       RoundUp(sizeof(CagedHeapLocalData), platform_allocator.CommitPageSize()),
58       PageAllocator::kReadWrite);
59   // Failing to commit the reservation means that we are out of memory.
60   CHECK(is_not_oom);
61 
62   new (reserved_area_.address())
63       CagedHeapLocalData(heap_base, platform_allocator);
64 
65   const CagedAddress caged_heap_start =
66       RoundUp(reinterpret_cast<CagedAddress>(reserved_area_.address()) +
67                   sizeof(CagedHeapLocalData),
68               kPageSize);
69   const size_t local_data_size_with_padding =
70       caged_heap_start -
71       reinterpret_cast<CagedAddress>(reserved_area_.address());
72 
73   bounded_allocator_ = std::make_unique<v8::base::BoundedPageAllocator>(
74       &platform_allocator, caged_heap_start,
75       reserved_area_.size() - local_data_size_with_padding, kPageSize,
76       v8::base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized);
77 }
78 
79 }  // namespace internal
80 }  // namespace cppgc
81