• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/init/isolate-allocator.h"
6 
7 #include "src/base/bounded-page-allocator.h"
8 #include "src/execution/isolate.h"
9 #include "src/heap/code-range.h"
10 #include "src/sandbox/sandbox.h"
11 #include "src/utils/memcopy.h"
12 #include "src/utils/utils.h"
13 
14 namespace v8 {
15 namespace internal {
16 
17 #ifdef V8_COMPRESS_POINTERS
18 namespace {
19 
20 // "IsolateRootBiasPage" is an optional region before the 4Gb aligned
21 // reservation. This "IsolateRootBiasPage" page is supposed to be used for
22 // storing part of the Isolate object when Isolate::isolate_root_bias() is
23 // not zero.
GetIsolateRootBiasPageSize(v8::PageAllocator * platform_page_allocator)24 inline size_t GetIsolateRootBiasPageSize(
25     v8::PageAllocator* platform_page_allocator) {
26   return RoundUp(Isolate::isolate_root_bias(),
27                  platform_page_allocator->AllocatePageSize());
28 }
29 
30 }  // namespace
31 
32 struct PtrComprCageReservationParams
33     : public VirtualMemoryCage::ReservationParams {
PtrComprCageReservationParamsv8::internal::PtrComprCageReservationParams34   PtrComprCageReservationParams() {
35     page_allocator = GetPlatformPageAllocator();
36 
37     // This is only used when there is a per-Isolate cage, in which case the
38     // Isolate is allocated within the cage, and the Isolate root is also the
39     // cage base.
40     const size_t kIsolateRootBiasPageSize =
41         COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL
42             ? GetIsolateRootBiasPageSize(page_allocator)
43             : 0;
44     reservation_size = kPtrComprCageReservationSize + kIsolateRootBiasPageSize;
45     base_alignment = kPtrComprCageBaseAlignment;
46     base_bias_size = kIsolateRootBiasPageSize;
47 
48     // Simplify BoundedPageAllocator's life by configuring it to use same page
49     // size as the Heap will use (MemoryChunk::kPageSize).
50     page_size =
51         RoundUp(size_t{1} << kPageSizeBits, page_allocator->AllocatePageSize());
52     requested_start_hint =
53         reinterpret_cast<Address>(page_allocator->GetRandomMmapAddr());
54   }
55 };
56 #endif  // V8_COMPRESS_POINTERS
57 
58 #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
59 namespace {
60 DEFINE_LAZY_LEAKY_OBJECT_GETTER(VirtualMemoryCage, GetProcessWidePtrComprCage)
61 }  // anonymous namespace
62 
63 // static
FreeProcessWidePtrComprCageForTesting()64 void IsolateAllocator::FreeProcessWidePtrComprCageForTesting() {
65   if (std::shared_ptr<CodeRange> code_range =
66           CodeRange::GetProcessWideCodeRange()) {
67     code_range->Free();
68   }
69   GetProcessWidePtrComprCage()->Free();
70 }
71 #endif  // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
72 
73 // static
InitializeOncePerProcess()74 void IsolateAllocator::InitializeOncePerProcess() {
75 #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
76   PtrComprCageReservationParams params;
77   base::AddressRegion existing_reservation;
78 #ifdef V8_SANDBOX
79   // For now, we allow the sandbox to be disabled even when compiling with
80   // v8_enable_sandbox. This fallback will be disallowed in the future, at the
81   // latest once sandboxed pointers are enabled.
82   if (GetProcessWideSandbox()->is_disabled()) {
83     CHECK(kAllowBackingStoresOutsideSandbox);
84   } else {
85     auto sandbox = GetProcessWideSandbox();
86     CHECK(sandbox->is_initialized());
87     // The pointer compression cage must be placed at the start of the sandbox.
88 
89     // TODO(chromium:12180) this currently assumes that no other pages were
90     // allocated through the cage's page allocator in the meantime. In the
91     // future, the cage initialization will happen just before this function
92     // runs, and so this will be guaranteed. Currently however, it is possible
93     // that the embedder accidentally uses the cage's page allocator prior to
94     // initializing V8, in which case this CHECK will likely fail.
95     Address base = sandbox->address_space()->AllocatePages(
96         sandbox->base(), params.reservation_size, params.base_alignment,
97         PagePermissions::kNoAccess);
98     CHECK_EQ(sandbox->base(), base);
99     existing_reservation = base::AddressRegion(base, params.reservation_size);
100     params.page_allocator = sandbox->page_allocator();
101   }
102 #endif
103   if (!GetProcessWidePtrComprCage()->InitReservation(params,
104                                                      existing_reservation)) {
105     V8::FatalProcessOutOfMemory(
106         nullptr,
107         "Failed to reserve virtual memory for process-wide V8 "
108         "pointer compression cage");
109   }
110 #endif
111 }
112 
IsolateAllocator()113 IsolateAllocator::IsolateAllocator() {
114 #if defined(V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE)
115   PtrComprCageReservationParams params;
116   if (!isolate_ptr_compr_cage_.InitReservation(params)) {
117     V8::FatalProcessOutOfMemory(
118         nullptr,
119         "Failed to reserve memory for Isolate V8 pointer compression cage");
120   }
121   page_allocator_ = isolate_ptr_compr_cage_.page_allocator();
122   CommitPagesForIsolate();
123 #elif defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE)
124   // Allocate Isolate in C++ heap when sharing a cage.
125   CHECK(GetProcessWidePtrComprCage()->IsReserved());
126   page_allocator_ = GetProcessWidePtrComprCage()->page_allocator();
127   isolate_memory_ = ::operator new(sizeof(Isolate));
128 #else
129   // Allocate Isolate in C++ heap.
130   page_allocator_ = GetPlatformPageAllocator();
131   isolate_memory_ = ::operator new(sizeof(Isolate));
132 #endif  // V8_COMPRESS_POINTERS
133 
134   CHECK_NOT_NULL(page_allocator_);
135 }
136 
~IsolateAllocator()137 IsolateAllocator::~IsolateAllocator() {
138 #ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
139   if (isolate_ptr_compr_cage_.reservation()->IsReserved()) {
140     // The actual memory will be freed when the |isolate_ptr_compr_cage_| will
141     // die.
142     return;
143   }
144 #endif
145 
146   // The memory was allocated in C++ heap.
147   ::operator delete(isolate_memory_);
148 }
149 
GetPtrComprCage()150 VirtualMemoryCage* IsolateAllocator::GetPtrComprCage() {
151 #if defined V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
152   return &isolate_ptr_compr_cage_;
153 #elif defined V8_COMPRESS_POINTERS_IN_SHARED_CAGE
154   return GetProcessWidePtrComprCage();
155 #else
156   return nullptr;
157 #endif
158 }
159 
GetPtrComprCage() const160 const VirtualMemoryCage* IsolateAllocator::GetPtrComprCage() const {
161   return const_cast<IsolateAllocator*>(this)->GetPtrComprCage();
162 }
163 
164 #ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
CommitPagesForIsolate()165 void IsolateAllocator::CommitPagesForIsolate() {
166   v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
167 
168   CHECK(isolate_ptr_compr_cage_.IsReserved());
169   Address isolate_root = isolate_ptr_compr_cage_.base();
170   CHECK(IsAligned(isolate_root, kPtrComprCageBaseAlignment));
171   CHECK_GE(isolate_ptr_compr_cage_.reservation()->size(),
172            kPtrComprCageReservationSize +
173                GetIsolateRootBiasPageSize(platform_page_allocator));
174   CHECK(isolate_ptr_compr_cage_.reservation()->InVM(
175       isolate_root, kPtrComprCageReservationSize));
176 
177   size_t page_size = page_allocator_->AllocatePageSize();
178   Address isolate_address = isolate_root - Isolate::isolate_root_bias();
179   Address isolate_end = isolate_address + sizeof(Isolate);
180 
181   // Inform the bounded page allocator about reserved pages.
182   {
183     Address reserved_region_address = isolate_root;
184     size_t reserved_region_size =
185         RoundUp(isolate_end, page_size) - reserved_region_address;
186 
187     CHECK(isolate_ptr_compr_cage_.page_allocator()->AllocatePagesAt(
188         reserved_region_address, reserved_region_size,
189         PageAllocator::Permission::kNoAccess));
190   }
191 
192   // Commit pages where the Isolate will be stored.
193   {
194     size_t commit_page_size = platform_page_allocator->CommitPageSize();
195     Address committed_region_address =
196         RoundDown(isolate_address, commit_page_size);
197     size_t committed_region_size =
198         RoundUp(isolate_end, commit_page_size) - committed_region_address;
199 
200     // We are using |isolate_ptr_compr_cage_.reservation()| directly here
201     // because |page_allocator_| has bigger commit page size than we actually
202     // need.
203     CHECK(isolate_ptr_compr_cage_.reservation()->SetPermissions(
204         committed_region_address, committed_region_size,
205         PageAllocator::kReadWrite));
206 
207     if (Heap::ShouldZapGarbage()) {
208       MemsetPointer(reinterpret_cast<Address*>(committed_region_address),
209                     kZapValue, committed_region_size / kSystemPointerSize);
210     }
211   }
212   isolate_memory_ = reinterpret_cast<void*>(isolate_address);
213 }
214 #endif  // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
215 
216 }  // namespace internal
217 }  // namespace v8
218