1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <limits>
6
7 #include "src/heap/heap-inl.h"
8 #include "src/objects-inl.h"
9 #include "src/objects/js-array-buffer-inl.h"
10 #include "src/wasm/wasm-engine.h"
11 #include "src/wasm/wasm-limits.h"
12 #include "src/wasm/wasm-memory.h"
13 #include "src/wasm/wasm-module.h"
14
15 namespace v8 {
16 namespace internal {
17 namespace wasm {
18
19 namespace {
20
21 constexpr size_t kNegativeGuardSize = 1u << 31; // 2GiB
22
AddAllocationStatusSample(Isolate * isolate,WasmMemoryTracker::AllocationStatus status)23 void AddAllocationStatusSample(Isolate* isolate,
24 WasmMemoryTracker::AllocationStatus status) {
25 isolate->counters()->wasm_memory_allocation_result()->AddSample(
26 static_cast<int>(status));
27 }
28
TryAllocateBackingStore(WasmMemoryTracker * memory_tracker,Heap * heap,size_t size,bool require_full_guard_regions,void ** allocation_base,size_t * allocation_length)29 void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
30 size_t size, bool require_full_guard_regions,
31 void** allocation_base,
32 size_t* allocation_length) {
33 using AllocationStatus = WasmMemoryTracker::AllocationStatus;
34 #if V8_TARGET_ARCH_32_BIT
35 DCHECK(!require_full_guard_regions);
36 #endif
37 // We always allocate the largest possible offset into the heap, so the
38 // addressable memory after the guard page can be made inaccessible.
39 //
40 // To protect against 32-bit integer overflow issues, we also protect the 2GiB
41 // before the valid part of the memory buffer.
42 // TODO(7881): do not use static_cast<uint32_t>() here
43 *allocation_length =
44 require_full_guard_regions
45 ? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
46 : RoundUp(
47 base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size)),
48 kWasmPageSize);
49 DCHECK_GE(*allocation_length, size);
50 DCHECK_GE(*allocation_length, kWasmPageSize);
51
52 // Let the WasmMemoryTracker know we are going to reserve a bunch of
53 // address space.
54 // Try up to three times; getting rid of dead JSArrayBuffer allocations might
55 // require two GCs because the first GC maybe incremental and may have
56 // floating garbage.
57 static constexpr int kAllocationRetries = 2;
58 bool did_retry = false;
59 for (int trial = 0;; ++trial) {
60 if (memory_tracker->ReserveAddressSpace(*allocation_length)) break;
61 did_retry = true;
62 // After first and second GC: retry.
63 if (trial == kAllocationRetries) {
64 // We are over the address space limit. Fail.
65 //
66 // When running under the correctness fuzzer (i.e.
67 // --abort-on-stack-or-string-length-overflow is preset), we crash instead
68 // so it is not incorrectly reported as a correctness violation. See
69 // https://crbug.com/828293#c4
70 if (FLAG_abort_on_stack_or_string_length_overflow) {
71 FATAL("could not allocate wasm memory");
72 }
73 AddAllocationStatusSample(
74 heap->isolate(), AllocationStatus::kAddressSpaceLimitReachedFailure);
75 return nullptr;
76 }
77 // Collect garbage and retry.
78 heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
79 }
80
81 // The Reserve makes the whole region inaccessible by default.
82 DCHECK_NULL(*allocation_base);
83 for (int trial = 0;; ++trial) {
84 *allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
85 PageAllocator::kNoAccess);
86 if (*allocation_base != nullptr) break;
87 if (trial == kAllocationRetries) {
88 memory_tracker->ReleaseReservation(*allocation_length);
89 AddAllocationStatusSample(heap->isolate(),
90 AllocationStatus::kOtherFailure);
91 return nullptr;
92 }
93 heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
94 }
95 byte* memory = reinterpret_cast<byte*>(*allocation_base);
96 if (require_full_guard_regions) {
97 memory += kNegativeGuardSize;
98 }
99
100 // Make the part we care about accessible.
101 if (size > 0) {
102 bool result = SetPermissions(memory, RoundUp(size, kWasmPageSize),
103 PageAllocator::kReadWrite);
104 // SetPermissions commits the extra memory, which may put us over the
105 // process memory limit. If so, report this as an OOM.
106 if (!result) {
107 V8::FatalProcessOutOfMemory(nullptr, "TryAllocateBackingStore");
108 }
109 }
110
111 memory_tracker->RegisterAllocation(heap->isolate(), *allocation_base,
112 *allocation_length, memory, size);
113 AddAllocationStatusSample(heap->isolate(),
114 did_retry ? AllocationStatus::kSuccessAfterRetry
115 : AllocationStatus::kSuccess);
116 return memory;
117 }
118 } // namespace
119
~WasmMemoryTracker()120 WasmMemoryTracker::~WasmMemoryTracker() {
121 // All reserved address space should be released before the allocation tracker
122 // is destroyed.
123 DCHECK_EQ(reserved_address_space_, 0u);
124 DCHECK_EQ(allocated_address_space_, 0u);
125 }
126
ReserveAddressSpace(size_t num_bytes)127 bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
128 // Address space reservations are currently only meaningful using guard
129 // regions, which is currently only supported on 64-bit systems. On other
130 // platforms, we always fall back on bounds checks.
131 #if V8_TARGET_ARCH_MIPS64
132 // MIPS64 has a user space of 2^40 bytes on most processors,
133 // address space limits needs to be smaller.
134 constexpr size_t kAddressSpaceLimit = 0x2100000000L; // 132 GiB
135 #elif V8_TARGET_ARCH_64_BIT
136 // We set the limit to 1 TiB + 4 GiB so that there is room for mini-guards
137 // once we fill everything up with full-sized guard regions.
138 constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
139 #else
140 constexpr size_t kAddressSpaceLimit = 0x90000000; // 2 GiB + 256 MiB
141 #endif
142
143 int retries = 5; // cmpxchng can fail, retry some number of times.
144 do {
145 size_t old_count = reserved_address_space_;
146 if ((kAddressSpaceLimit - old_count) < num_bytes) return false;
147 if (reserved_address_space_.compare_exchange_weak(old_count,
148 old_count + num_bytes)) {
149 return true;
150 }
151 } while (retries-- > 0);
152
153 return false;
154 }
155
ReleaseReservation(size_t num_bytes)156 void WasmMemoryTracker::ReleaseReservation(size_t num_bytes) {
157 size_t const old_reserved = reserved_address_space_.fetch_sub(num_bytes);
158 USE(old_reserved);
159 DCHECK_LE(num_bytes, old_reserved);
160 }
161
RegisterAllocation(Isolate * isolate,void * allocation_base,size_t allocation_length,void * buffer_start,size_t buffer_length)162 void WasmMemoryTracker::RegisterAllocation(Isolate* isolate,
163 void* allocation_base,
164 size_t allocation_length,
165 void* buffer_start,
166 size_t buffer_length) {
167 base::LockGuard<base::Mutex> scope_lock(&mutex_);
168
169 allocated_address_space_ += allocation_length;
170 AddAddressSpaceSample(isolate);
171
172 allocations_.emplace(buffer_start,
173 AllocationData{allocation_base, allocation_length,
174 buffer_start, buffer_length});
175 }
176
ReleaseAllocation(Isolate * isolate,const void * buffer_start)177 WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
178 Isolate* isolate, const void* buffer_start) {
179 base::LockGuard<base::Mutex> scope_lock(&mutex_);
180
181 auto find_result = allocations_.find(buffer_start);
182 CHECK_NE(find_result, allocations_.end());
183
184 if (find_result != allocations_.end()) {
185 size_t num_bytes = find_result->second.allocation_length;
186 DCHECK_LE(num_bytes, reserved_address_space_);
187 DCHECK_LE(num_bytes, allocated_address_space_);
188 reserved_address_space_ -= num_bytes;
189 allocated_address_space_ -= num_bytes;
190 // ReleaseAllocation might be called with a nullptr as isolate if the
191 // embedder is releasing the allocation and not a specific isolate. This
192 // happens if the allocation was shared between multiple isolates (threads).
193 if (isolate) AddAddressSpaceSample(isolate);
194
195 AllocationData allocation_data = find_result->second;
196 allocations_.erase(find_result);
197 return allocation_data;
198 }
199 UNREACHABLE();
200 }
201
FindAllocationData(const void * buffer_start)202 const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
203 const void* buffer_start) {
204 base::LockGuard<base::Mutex> scope_lock(&mutex_);
205 const auto& result = allocations_.find(buffer_start);
206 if (result != allocations_.end()) {
207 return &result->second;
208 }
209 return nullptr;
210 }
211
IsWasmMemory(const void * buffer_start)212 bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
213 base::LockGuard<base::Mutex> scope_lock(&mutex_);
214 return allocations_.find(buffer_start) != allocations_.end();
215 }
216
HasFullGuardRegions(const void * buffer_start)217 bool WasmMemoryTracker::HasFullGuardRegions(const void* buffer_start) {
218 base::LockGuard<base::Mutex> scope_lock(&mutex_);
219 const auto allocation = allocations_.find(buffer_start);
220
221 if (allocation == allocations_.end()) {
222 return false;
223 }
224
225 Address start = reinterpret_cast<Address>(buffer_start);
226 Address limit =
227 reinterpret_cast<Address>(allocation->second.allocation_base) +
228 allocation->second.allocation_length;
229 return start + kWasmMaxHeapOffset < limit;
230 }
231
FreeMemoryIfIsWasmMemory(Isolate * isolate,const void * buffer_start)232 bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(Isolate* isolate,
233 const void* buffer_start) {
234 if (IsWasmMemory(buffer_start)) {
235 const AllocationData allocation = ReleaseAllocation(isolate, buffer_start);
236 CHECK(FreePages(allocation.allocation_base, allocation.allocation_length));
237 return true;
238 }
239 return false;
240 }
241
AddAddressSpaceSample(Isolate * isolate)242 void WasmMemoryTracker::AddAddressSpaceSample(Isolate* isolate) {
243 // Report address space usage in MiB so the full range fits in an int on all
244 // platforms.
245 isolate->counters()->wasm_address_space_usage_mb()->AddSample(
246 static_cast<int>(allocated_address_space_ >> 20));
247 }
248
SetupArrayBuffer(Isolate * isolate,void * backing_store,size_t size,bool is_external,SharedFlag shared)249 Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
250 size_t size, bool is_external,
251 SharedFlag shared) {
252 Handle<JSArrayBuffer> buffer =
253 isolate->factory()->NewJSArrayBuffer(shared, TENURED);
254 constexpr bool is_wasm_memory = true;
255 JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store, size,
256 shared, is_wasm_memory);
257 buffer->set_is_neuterable(false);
258 buffer->set_is_growable(true);
259 return buffer;
260 }
261
NewArrayBuffer(Isolate * isolate,size_t size,SharedFlag shared)262 MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
263 SharedFlag shared) {
264 // Enforce engine-limited maximum allocation size.
265 if (size > kV8MaxWasmMemoryBytes) return {};
266 // Enforce flag-limited maximum allocation size.
267 if (size > (FLAG_wasm_max_mem_pages * uint64_t{kWasmPageSize})) return {};
268
269 WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker();
270
271 // Set by TryAllocateBackingStore or GetEmptyBackingStore
272 void* allocation_base = nullptr;
273 size_t allocation_length = 0;
274
275 #if V8_TARGET_ARCH_64_BIT
276 bool require_full_guard_regions = true;
277 #else
278 bool require_full_guard_regions = false;
279 #endif
280 void* memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
281 require_full_guard_regions,
282 &allocation_base, &allocation_length);
283 if (memory == nullptr && FLAG_wasm_trap_handler_fallback) {
284 // If we failed to allocate with full guard regions, fall back on
285 // mini-guards.
286 require_full_guard_regions = false;
287 memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
288 require_full_guard_regions,
289 &allocation_base, &allocation_length);
290 }
291 if (memory == nullptr) {
292 return {};
293 }
294
295 #if DEBUG
296 // Double check the API allocator actually zero-initialized the memory.
297 const byte* bytes = reinterpret_cast<const byte*>(memory);
298 for (size_t i = 0; i < size; ++i) {
299 DCHECK_EQ(0, bytes[i]);
300 }
301 #endif
302
303 reinterpret_cast<v8::Isolate*>(isolate)
304 ->AdjustAmountOfExternalAllocatedMemory(size);
305
306 constexpr bool is_external = false;
307 return SetupArrayBuffer(isolate, memory, size, is_external, shared);
308 }
309
DetachMemoryBuffer(Isolate * isolate,Handle<JSArrayBuffer> buffer,bool free_memory)310 void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
311 bool free_memory) {
312 if (buffer->is_shared()) return; // Detaching shared buffers is impossible.
313 DCHECK(!buffer->is_neuterable());
314
315 const bool is_external = buffer->is_external();
316 DCHECK(!buffer->is_neuterable());
317 if (!is_external) {
318 buffer->set_is_external(true);
319 isolate->heap()->UnregisterArrayBuffer(*buffer);
320 if (free_memory) {
321 // We need to free the memory before neutering the buffer because
322 // FreeBackingStore reads buffer->allocation_base(), which is nulled out
323 // by Neuter. This means there is a dangling pointer until we neuter the
324 // buffer. Since there is no way for the user to directly call
325 // FreeBackingStore, we can ensure this is safe.
326 buffer->FreeBackingStoreFromMainThread();
327 }
328 }
329
330 DCHECK(buffer->is_external());
331 buffer->set_is_wasm_memory(false);
332 buffer->set_is_neuterable(true);
333 buffer->Neuter();
334 }
335
336 } // namespace wasm
337 } // namespace internal
338 } // namespace v8
339