• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/objects/backing-store.h"
6 
7 #include <cstring>
8 
9 #include "src/execution/isolate.h"
10 #include "src/handles/global-handles.h"
11 #include "src/logging/counters.h"
12 #include "src/trap-handler/trap-handler.h"
13 #include "src/wasm/wasm-constants.h"
14 #include "src/wasm/wasm-engine.h"
15 #include "src/wasm/wasm-limits.h"
16 #include "src/wasm/wasm-objects-inl.h"
17 
18 #define TRACE_BS(...)                                  \
19   do {                                                 \
20     if (FLAG_trace_backing_store) PrintF(__VA_ARGS__); \
21   } while (false)
22 
23 namespace v8 {
24 namespace internal {
25 
26 namespace {
27 #if V8_TARGET_ARCH_MIPS64
28 // MIPS64 has a user space of 2^40 bytes on most processors,
29 // address space limits needs to be smaller.
30 constexpr size_t kAddressSpaceLimit = 0x8000000000L;  // 512 GiB
31 #elif V8_TARGET_ARCH_64_BIT
32 constexpr size_t kAddressSpaceLimit = 0x10100000000L;  // 1 TiB + 4 GiB
33 #else
34 constexpr size_t kAddressSpaceLimit = 0xC0000000;  // 3 GiB
35 #endif
36 
37 constexpr uint64_t kNegativeGuardSize = uint64_t{2} * GB;
38 
39 #if V8_TARGET_ARCH_64_BIT
40 constexpr uint64_t kFullGuardSize = uint64_t{10} * GB;
41 #endif
42 
43 std::atomic<uint64_t> reserved_address_space_{0};
44 
45 // Allocation results are reported to UMA
46 //
47 // See wasm_memory_allocation_result in counters.h
48 enum class AllocationStatus {
49   kSuccess,  // Succeeded on the first try
50 
51   kSuccessAfterRetry,  // Succeeded after garbage collection
52 
53   kAddressSpaceLimitReachedFailure,  // Failed because Wasm is at its address
54                                      // space limit
55 
56   kOtherFailure  // Failed for an unknown reason
57 };
58 
GetReservedRegion(bool has_guard_regions,void * buffer_start,size_t byte_capacity)59 base::AddressRegion GetReservedRegion(bool has_guard_regions,
60                                       void* buffer_start,
61                                       size_t byte_capacity) {
62 #if V8_TARGET_ARCH_64_BIT
63   if (has_guard_regions) {
64     // Guard regions always look like this:
65     // |xxx(2GiB)xxx|.......(4GiB)..xxxxx|xxxxxx(4GiB)xxxxxx|
66     //              ^ buffer_start
67     //                              ^ byte_length
68     // ^ negative guard region           ^ positive guard region
69 
70     Address start = reinterpret_cast<Address>(buffer_start);
71     DCHECK_EQ(8, sizeof(size_t));  // only use on 64-bit
72     DCHECK_EQ(0, start % AllocatePageSize());
73     return base::AddressRegion(start - kNegativeGuardSize,
74                                static_cast<size_t>(kFullGuardSize));
75   }
76 #endif
77 
78   DCHECK(!has_guard_regions);
79   return base::AddressRegion(reinterpret_cast<Address>(buffer_start),
80                              byte_capacity);
81 }
82 
GetReservationSize(bool has_guard_regions,size_t byte_capacity)83 size_t GetReservationSize(bool has_guard_regions, size_t byte_capacity) {
84 #if V8_TARGET_ARCH_64_BIT
85   if (has_guard_regions) return kFullGuardSize;
86 #else
87   DCHECK(!has_guard_regions);
88 #endif
89 
90   return byte_capacity;
91 }
92 
RecordStatus(Isolate * isolate,AllocationStatus status)93 void RecordStatus(Isolate* isolate, AllocationStatus status) {
94   isolate->counters()->wasm_memory_allocation_result()->AddSample(
95       static_cast<int>(status));
96 }
97 
DebugCheckZero(void * start,size_t byte_length)98 inline void DebugCheckZero(void* start, size_t byte_length) {
99 #if DEBUG
100   // Double check memory is zero-initialized. Despite being DEBUG-only,
101   // this function is somewhat optimized for the benefit of test suite
102   // execution times (some tests allocate several gigabytes).
103   const byte* bytes = reinterpret_cast<const byte*>(start);
104   const size_t kBaseCase = 32;
105   for (size_t i = 0; i < kBaseCase && i < byte_length; i++) {
106     DCHECK_EQ(0, bytes[i]);
107   }
108   // Having checked the first kBaseCase bytes to be zero, we can now use
109   // {memcmp} to compare the range against itself shifted by that amount,
110   // thereby inductively checking the remaining bytes.
111   if (byte_length > kBaseCase) {
112     DCHECK_EQ(0, memcmp(bytes, bytes + kBaseCase, byte_length - kBaseCase));
113   }
114 #endif
115 }
116 }  // namespace
117 
ReserveAddressSpace(uint64_t num_bytes)118 bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) {
119   uint64_t reservation_limit = kAddressSpaceLimit;
120   uint64_t old_count = reserved_address_space_.load(std::memory_order_relaxed);
121   while (true) {
122     if (old_count > reservation_limit) return false;
123     if (reservation_limit - old_count < num_bytes) return false;
124     if (reserved_address_space_.compare_exchange_weak(
125             old_count, old_count + num_bytes, std::memory_order_acq_rel)) {
126       return true;
127     }
128   }
129 }
130 
ReleaseReservation(uint64_t num_bytes)131 void BackingStore::ReleaseReservation(uint64_t num_bytes) {
132   uint64_t old_reserved = reserved_address_space_.fetch_sub(num_bytes);
133   USE(old_reserved);
134   DCHECK_LE(num_bytes, old_reserved);
135 }
136 
137 // The backing store for a Wasm shared memory remembers all the isolates
138 // with which it has been shared.
139 struct SharedWasmMemoryData {
140   std::vector<Isolate*> isolates_;
141 };
142 
Clear()143 void BackingStore::Clear() {
144   buffer_start_ = nullptr;
145   byte_length_ = 0;
146   has_guard_regions_ = false;
147   if (holds_shared_ptr_to_allocator_) {
148     type_specific_data_.v8_api_array_buffer_allocator_shared
149         .std::shared_ptr<v8::ArrayBuffer::Allocator>::~shared_ptr();
150     holds_shared_ptr_to_allocator_ = false;
151   }
152   type_specific_data_.v8_api_array_buffer_allocator = nullptr;
153 }
154 
~BackingStore()155 BackingStore::~BackingStore() {
156   GlobalBackingStoreRegistry::Unregister(this);
157 
158   if (buffer_start_ == nullptr) {
159     Clear();
160     return;
161   }
162 
163   if (is_wasm_memory_) {
164     DCHECK(free_on_destruct_);
165     DCHECK(!custom_deleter_);
166     size_t reservation_size =
167         GetReservationSize(has_guard_regions_, byte_capacity_);
168     TRACE_BS(
169         "BSw:free  bs=%p mem=%p (length=%zu, capacity=%zu, reservation=%zu)\n",
170         this, buffer_start_, byte_length(), byte_capacity_, reservation_size);
171     if (is_shared_) {
172       // Deallocate the list of attached memory objects.
173       SharedWasmMemoryData* shared_data = get_shared_wasm_memory_data();
174       delete shared_data;
175       type_specific_data_.shared_wasm_memory_data = nullptr;
176     }
177 
178     // Wasm memories are always allocated through the page allocator.
179     auto region =
180         GetReservedRegion(has_guard_regions_, buffer_start_, byte_capacity_);
181 
182     bool pages_were_freed =
183         region.size() == 0 /* no need to free any pages */ ||
184         FreePages(GetPlatformPageAllocator(),
185                   reinterpret_cast<void*>(region.begin()), region.size());
186     CHECK(pages_were_freed);
187     BackingStore::ReleaseReservation(reservation_size);
188     Clear();
189     return;
190   }
191   if (custom_deleter_) {
192     DCHECK(free_on_destruct_);
193     TRACE_BS("BS:custom deleter bs=%p mem=%p (length=%zu, capacity=%zu)\n",
194              this, buffer_start_, byte_length(), byte_capacity_);
195     type_specific_data_.deleter.callback(buffer_start_, byte_length_,
196                                          type_specific_data_.deleter.data);
197     Clear();
198     return;
199   }
200   if (free_on_destruct_) {
201     // JSArrayBuffer backing store. Deallocate through the embedder's allocator.
202     auto allocator = get_v8_api_array_buffer_allocator();
203     TRACE_BS("BS:free   bs=%p mem=%p (length=%zu, capacity=%zu)\n", this,
204              buffer_start_, byte_length(), byte_capacity_);
205     allocator->Free(buffer_start_, byte_length_);
206   }
207   Clear();
208 }
209 
210 // Allocate a backing store using the array buffer allocator from the embedder.
Allocate(Isolate * isolate,size_t byte_length,SharedFlag shared,InitializedFlag initialized)211 std::unique_ptr<BackingStore> BackingStore::Allocate(
212     Isolate* isolate, size_t byte_length, SharedFlag shared,
213     InitializedFlag initialized) {
214   void* buffer_start = nullptr;
215   auto allocator = isolate->array_buffer_allocator();
216   CHECK_NOT_NULL(allocator);
217   if (byte_length != 0) {
218     auto counters = isolate->counters();
219     int mb_length = static_cast<int>(byte_length / MB);
220     if (mb_length > 0) {
221       counters->array_buffer_big_allocations()->AddSample(mb_length);
222     }
223     if (shared == SharedFlag::kShared) {
224       counters->shared_array_allocations()->AddSample(mb_length);
225     }
226     auto allocate_buffer = [allocator, initialized](size_t byte_length) {
227       if (initialized == InitializedFlag::kUninitialized) {
228         return allocator->AllocateUninitialized(byte_length);
229       }
230       void* buffer_start = allocator->Allocate(byte_length);
231       if (buffer_start) {
232         // TODO(wasm): node does not implement the zero-initialization API.
233         // Reenable this debug check when node does implement it properly.
234         constexpr bool
235             kDebugCheckZeroDisabledDueToNodeNotImplementingZeroInitAPI = true;
236         if ((!(kDebugCheckZeroDisabledDueToNodeNotImplementingZeroInitAPI)) &&
237             !FLAG_mock_arraybuffer_allocator) {
238           DebugCheckZero(buffer_start, byte_length);
239         }
240       }
241       return buffer_start;
242     };
243 
244     buffer_start = isolate->heap()->AllocateExternalBackingStore(
245         allocate_buffer, byte_length);
246 
247     if (buffer_start == nullptr) {
248       // Allocation failed.
249       counters->array_buffer_new_size_failures()->AddSample(mb_length);
250       return {};
251     }
252   }
253 
254   auto result = new BackingStore(buffer_start,  // start
255                                  byte_length,   // length
256                                  byte_length,   // capacity
257                                  shared,        // shared
258                                  false,         // is_wasm_memory
259                                  true,          // free_on_destruct
260                                  false,         // has_guard_regions
261                                  false,         // custom_deleter
262                                  false);        // empty_deleter
263 
264   TRACE_BS("BS:alloc  bs=%p mem=%p (length=%zu)\n", result,
265            result->buffer_start(), byte_length);
266   result->SetAllocatorFromIsolate(isolate);
267   return std::unique_ptr<BackingStore>(result);
268 }
269 
270 // Trying to allocate 4 GiB on a 32-bit platform is guaranteed to fail.
271 // We don't lower the official max_mem_pages() limit because that would be
272 // observable upon instantiation; this way the effective limit on 32-bit
273 // platforms is defined by the allocator.
274 constexpr size_t kPlatformMaxPages =
275     std::numeric_limits<size_t>::max() / wasm::kWasmPageSize;
276 
SetAllocatorFromIsolate(Isolate * isolate)277 void BackingStore::SetAllocatorFromIsolate(Isolate* isolate) {
278   if (auto allocator_shared = isolate->array_buffer_allocator_shared()) {
279     holds_shared_ptr_to_allocator_ = true;
280     new (&type_specific_data_.v8_api_array_buffer_allocator_shared)
281         std::shared_ptr<v8::ArrayBuffer::Allocator>(
282             std::move(allocator_shared));
283   } else {
284     type_specific_data_.v8_api_array_buffer_allocator =
285         isolate->array_buffer_allocator();
286   }
287 }
288 
289 // Allocate a backing store for a Wasm memory. Always use the page allocator
290 // and add guard regions.
TryAllocateWasmMemory(Isolate * isolate,size_t initial_pages,size_t maximum_pages,SharedFlag shared)291 std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
292     Isolate* isolate, size_t initial_pages, size_t maximum_pages,
293     SharedFlag shared) {
294   // Cannot reserve 0 pages on some OSes.
295   if (maximum_pages == 0) maximum_pages = 1;
296 
297   TRACE_BS("BSw:try   %zu pages, %zu max\n", initial_pages, maximum_pages);
298 
299   bool guards = trap_handler::IsTrapHandlerEnabled();
300 
301   // For accounting purposes, whether a GC was necessary.
302   bool did_retry = false;
303 
304   // A helper to try running a function up to 3 times, executing a GC
305   // if the first and second attempts failed.
306   auto gc_retry = [&](const std::function<bool()>& fn) {
307     for (int i = 0; i < 3; i++) {
308       if (fn()) return true;
309       // Collect garbage and retry.
310       did_retry = true;
311       // TODO(wasm): try Heap::EagerlyFreeExternalMemory() first?
312       isolate->heap()->MemoryPressureNotification(
313           MemoryPressureLevel::kCritical, true);
314     }
315     return false;
316   };
317 
318   // Compute size of reserved memory.
319 
320   size_t engine_max_pages = wasm::max_mem_pages();
321   maximum_pages = std::min(engine_max_pages, maximum_pages);
322   // If the platform doesn't support so many pages, attempting to allocate
323   // is guaranteed to fail, so we don't even try.
324   if (maximum_pages > kPlatformMaxPages) return {};
325   CHECK_LE(maximum_pages,
326            std::numeric_limits<size_t>::max() / wasm::kWasmPageSize);
327   size_t byte_capacity = maximum_pages * wasm::kWasmPageSize;
328   size_t reservation_size = GetReservationSize(guards, byte_capacity);
329 
330   //--------------------------------------------------------------------------
331   // 1. Enforce maximum address space reservation per engine.
332   //--------------------------------------------------------------------------
333   auto reserve_memory_space = [&] {
334     return BackingStore::ReserveAddressSpace(reservation_size);
335   };
336 
337   if (!gc_retry(reserve_memory_space)) {
338     // Crash on out-of-memory if the correctness fuzzer is running.
339     if (FLAG_correctness_fuzzer_suppressions) {
340       FATAL("could not allocate wasm memory backing store");
341     }
342     RecordStatus(isolate, AllocationStatus::kAddressSpaceLimitReachedFailure);
343     TRACE_BS("BSw:try   failed to reserve address space (size %zu)\n",
344              reservation_size);
345     return {};
346   }
347 
348   //--------------------------------------------------------------------------
349   // 2. Allocate pages (inaccessible by default).
350   //--------------------------------------------------------------------------
351   void* allocation_base = nullptr;
352   auto allocate_pages = [&] {
353     allocation_base =
354         AllocatePages(GetPlatformPageAllocator(), nullptr, reservation_size,
355                       wasm::kWasmPageSize, PageAllocator::kNoAccess);
356     return allocation_base != nullptr;
357   };
358   if (!gc_retry(allocate_pages)) {
359     // Page allocator could not reserve enough pages.
360     BackingStore::ReleaseReservation(reservation_size);
361     RecordStatus(isolate, AllocationStatus::kOtherFailure);
362     TRACE_BS("BSw:try   failed to allocate pages\n");
363     return {};
364   }
365 
366   // Get a pointer to the start of the buffer, skipping negative guard region
367   // if necessary.
368   byte* buffer_start = reinterpret_cast<byte*>(allocation_base) +
369                        (guards ? kNegativeGuardSize : 0);
370 
371   //--------------------------------------------------------------------------
372   // 3. Commit the initial pages (allow read/write).
373   //--------------------------------------------------------------------------
374   size_t byte_length = initial_pages * wasm::kWasmPageSize;
375   auto commit_memory = [&] {
376     return byte_length == 0 ||
377            SetPermissions(GetPlatformPageAllocator(), buffer_start, byte_length,
378                           PageAllocator::kReadWrite);
379   };
380   if (!gc_retry(commit_memory)) {
381     TRACE_BS("BSw:try   failed to set permissions (%p, %zu)\n", buffer_start,
382              byte_length);
383     // SetPermissions put us over the process memory limit.
384     V8::FatalProcessOutOfMemory(nullptr, "BackingStore::AllocateWasmMemory()");
385   }
386 
387   DebugCheckZero(buffer_start, byte_length);  // touch the bytes.
388 
389   RecordStatus(isolate, did_retry ? AllocationStatus::kSuccessAfterRetry
390                                   : AllocationStatus::kSuccess);
391 
392   auto result = new BackingStore(buffer_start,   // start
393                                  byte_length,    // length
394                                  byte_capacity,  // capacity
395                                  shared,         // shared
396                                  true,           // is_wasm_memory
397                                  true,           // free_on_destruct
398                                  guards,         // has_guard_regions
399                                  false,          // custom_deleter
400                                  false);         // empty_deleter
401 
402   TRACE_BS(
403       "BSw:alloc bs=%p mem=%p (length=%zu, capacity=%zu, reservation=%zu)\n",
404       result, result->buffer_start(), byte_length, byte_capacity,
405       reservation_size);
406 
407   // Shared Wasm memories need an anchor for the memory object list.
408   if (shared == SharedFlag::kShared) {
409     result->type_specific_data_.shared_wasm_memory_data =
410         new SharedWasmMemoryData();
411   }
412 
413   return std::unique_ptr<BackingStore>(result);
414 }
415 
416 // Allocate a backing store for a Wasm memory. Always use the page allocator
417 // and add guard regions.
AllocateWasmMemory(Isolate * isolate,size_t initial_pages,size_t maximum_pages,SharedFlag shared)418 std::unique_ptr<BackingStore> BackingStore::AllocateWasmMemory(
419     Isolate* isolate, size_t initial_pages, size_t maximum_pages,
420     SharedFlag shared) {
421   // Wasm pages must be a multiple of the allocation page size.
422   DCHECK_EQ(0, wasm::kWasmPageSize % AllocatePageSize());
423 
424   // Enforce engine limitation on the maximum number of pages.
425   if (initial_pages > wasm::kV8MaxWasmMemoryPages) return nullptr;
426   if (initial_pages > kPlatformMaxPages) return nullptr;
427 
428   auto backing_store =
429       TryAllocateWasmMemory(isolate, initial_pages, maximum_pages, shared);
430   if (maximum_pages == initial_pages) {
431     // If initial pages, and maximum are equal, nothing more to do return early.
432     return backing_store;
433   }
434 
435   // Retry with smaller maximum pages at each retry.
436   const int kAllocationTries = 3;
437   auto delta = (maximum_pages - initial_pages) / (kAllocationTries + 1);
438   size_t sizes[] = {maximum_pages - delta, maximum_pages - 2 * delta,
439                     maximum_pages - 3 * delta, initial_pages};
440 
441   for (size_t i = 0; i < arraysize(sizes) && !backing_store; i++) {
442     backing_store =
443         TryAllocateWasmMemory(isolate, initial_pages, sizes[i], shared);
444   }
445   return backing_store;
446 }
447 
CopyWasmMemory(Isolate * isolate,size_t new_pages)448 std::unique_ptr<BackingStore> BackingStore::CopyWasmMemory(Isolate* isolate,
449                                                            size_t new_pages) {
450   // Note that we could allocate uninitialized to save initialization cost here,
451   // but since Wasm memories are allocated by the page allocator, the zeroing
452   // cost is already built-in.
453   // TODO(titzer): should we use a suitable maximum here?
454   auto new_backing_store = BackingStore::AllocateWasmMemory(
455       isolate, new_pages, new_pages,
456       is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared);
457 
458   if (!new_backing_store ||
459       new_backing_store->has_guard_regions() != has_guard_regions_) {
460     return {};
461   }
462 
463   if (byte_length_ > 0) {
464     // If the allocation was successful, then the new buffer must be at least
465     // as big as the old one.
466     DCHECK_GE(new_pages * wasm::kWasmPageSize, byte_length_);
467     memcpy(new_backing_store->buffer_start(), buffer_start_, byte_length_);
468   }
469 
470   return new_backing_store;
471 }
472 
473 // Try to grow the size of a wasm memory in place, without realloc + copy.
GrowWasmMemoryInPlace(Isolate * isolate,size_t delta_pages,size_t max_pages)474 base::Optional<size_t> BackingStore::GrowWasmMemoryInPlace(Isolate* isolate,
475                                                            size_t delta_pages,
476                                                            size_t max_pages) {
477   // This function grows wasm memory by
478   // * changing the permissions of additional {delta_pages} pages to kReadWrite;
479   // * increment {byte_length_};
480   //
481   // As this code is executed concurrently, the following steps are executed:
482   // 1) Read the current value of {byte_length_};
483   // 2) Change the permission of all pages from {buffer_start_} to
484   //    {byte_length_} + {delta_pages} * {page_size} to kReadWrite;
485   //    * This operation may be executed racefully. The OS takes care of
486   //      synchronization.
487   // 3) Try to update {byte_length_} with a compare_exchange;
488   // 4) Repeat 1) to 3) until the compare_exchange in 3) succeeds;
489   //
490   // The result of this function is the {byte_length_} before growing in pages.
491   // The result of this function appears like the result of an RMW-update on
492   // {byte_length_}, i.e. two concurrent calls to this function will result in
493   // different return values if {delta_pages} != 0.
494   //
495   // Invariants:
496   // * Permissions are always set incrementally, i.e. for any page {b} with
497   //   kReadWrite permission, all pages between the first page {a} and page {b}
498   //   also have kReadWrite permission.
499   // * {byte_length_} is always lower or equal than the amount of memory with
500   //   permissions set to kReadWrite;
501   //     * This is guaranteed by incrementing {byte_length_} with a
502   //       compare_exchange after changing the permissions.
503   //     * This invariant is the reason why we cannot use a fetch_add.
504   DCHECK(is_wasm_memory_);
505   max_pages = std::min(max_pages, byte_capacity_ / wasm::kWasmPageSize);
506 
507   // Do a compare-exchange loop, because we also need to adjust page
508   // permissions. Note that multiple racing grows both try to set page
509   // permissions for the entire range (to be RW), so the operating system
510   // should deal with that raciness. We know we succeeded when we can
511   // compare/swap the old length with the new length.
512   size_t old_length = byte_length_.load(std::memory_order_relaxed);
513 
514   if (delta_pages == 0)
515     return {old_length / wasm::kWasmPageSize};  // degenerate grow.
516   if (delta_pages > max_pages) return {};       // would never work.
517 
518   size_t new_length = 0;
519   while (true) {
520     size_t current_pages = old_length / wasm::kWasmPageSize;
521 
522     // Check if we have exceed the supplied maximum.
523     if (current_pages > (max_pages - delta_pages)) return {};
524 
525     new_length = (current_pages + delta_pages) * wasm::kWasmPageSize;
526 
527     // Try to adjust the permissions on the memory.
528     if (!i::SetPermissions(GetPlatformPageAllocator(), buffer_start_,
529                            new_length, PageAllocator::kReadWrite)) {
530       return {};
531     }
532     if (byte_length_.compare_exchange_weak(old_length, new_length,
533                                            std::memory_order_acq_rel)) {
534       // Successfully updated both the length and permissions.
535       break;
536     }
537   }
538 
539   if (!is_shared_ && free_on_destruct_) {
540     // Only do per-isolate accounting for non-shared backing stores.
541     reinterpret_cast<v8::Isolate*>(isolate)
542         ->AdjustAmountOfExternalAllocatedMemory(new_length - old_length);
543   }
544   return {old_length / wasm::kWasmPageSize};
545 }
546 
AttachSharedWasmMemoryObject(Isolate * isolate,Handle<WasmMemoryObject> memory_object)547 void BackingStore::AttachSharedWasmMemoryObject(
548     Isolate* isolate, Handle<WasmMemoryObject> memory_object) {
549   DCHECK(is_wasm_memory_);
550   DCHECK(is_shared_);
551   // We need to take the global registry lock for this operation.
552   GlobalBackingStoreRegistry::AddSharedWasmMemoryObject(isolate, this,
553                                                         memory_object);
554 }
555 
BroadcastSharedWasmMemoryGrow(Isolate * isolate,std::shared_ptr<BackingStore> backing_store)556 void BackingStore::BroadcastSharedWasmMemoryGrow(
557     Isolate* isolate, std::shared_ptr<BackingStore> backing_store) {
558   GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow(isolate,
559                                                             backing_store);
560 }
561 
RemoveSharedWasmMemoryObjects(Isolate * isolate)562 void BackingStore::RemoveSharedWasmMemoryObjects(Isolate* isolate) {
563   GlobalBackingStoreRegistry::Purge(isolate);
564 }
565 
UpdateSharedWasmMemoryObjects(Isolate * isolate)566 void BackingStore::UpdateSharedWasmMemoryObjects(Isolate* isolate) {
567   GlobalBackingStoreRegistry::UpdateSharedWasmMemoryObjects(isolate);
568 }
569 
WrapAllocation(Isolate * isolate,void * allocation_base,size_t allocation_length,SharedFlag shared,bool free_on_destruct)570 std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
571     Isolate* isolate, void* allocation_base, size_t allocation_length,
572     SharedFlag shared, bool free_on_destruct) {
573   auto result = new BackingStore(allocation_base,    // start
574                                  allocation_length,  // length
575                                  allocation_length,  // capacity
576                                  shared,             // shared
577                                  false,              // is_wasm_memory
578                                  free_on_destruct,   // free_on_destruct
579                                  false,              // has_guard_regions
580                                  false,              // custom_deleter
581                                  false);             // empty_deleter
582   result->SetAllocatorFromIsolate(isolate);
583   TRACE_BS("BS:wrap   bs=%p mem=%p (length=%zu)\n", result,
584            result->buffer_start(), result->byte_length());
585   return std::unique_ptr<BackingStore>(result);
586 }
587 
WrapAllocation(void * allocation_base,size_t allocation_length,v8::BackingStore::DeleterCallback deleter,void * deleter_data,SharedFlag shared)588 std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
589     void* allocation_base, size_t allocation_length,
590     v8::BackingStore::DeleterCallback deleter, void* deleter_data,
591     SharedFlag shared) {
592   bool is_empty_deleter = (deleter == v8::BackingStore::EmptyDeleter);
593   auto result = new BackingStore(allocation_base,    // start
594                                  allocation_length,  // length
595                                  allocation_length,  // capacity
596                                  shared,             // shared
597                                  false,              // is_wasm_memory
598                                  true,               // free_on_destruct
599                                  false,              // has_guard_regions
600                                  true,               // custom_deleter
601                                  is_empty_deleter);  // empty_deleter
602   result->type_specific_data_.deleter = {deleter, deleter_data};
603   TRACE_BS("BS:wrap   bs=%p mem=%p (length=%zu)\n", result,
604            result->buffer_start(), result->byte_length());
605   return std::unique_ptr<BackingStore>(result);
606 }
607 
EmptyBackingStore(SharedFlag shared)608 std::unique_ptr<BackingStore> BackingStore::EmptyBackingStore(
609     SharedFlag shared) {
610   auto result = new BackingStore(nullptr,  // start
611                                  0,        // length
612                                  0,        // capacity
613                                  shared,   // shared
614                                  false,    // is_wasm_memory
615                                  true,     // free_on_destruct
616                                  false,    // has_guard_regions
617                                  false,    // custom_deleter
618                                  false);   // empty_deleter
619 
620   return std::unique_ptr<BackingStore>(result);
621 }
622 
Reallocate(Isolate * isolate,size_t new_byte_length)623 bool BackingStore::Reallocate(Isolate* isolate, size_t new_byte_length) {
624   CHECK(!is_wasm_memory_ && !custom_deleter_ && !globally_registered_ &&
625         free_on_destruct_);
626   auto allocator = get_v8_api_array_buffer_allocator();
627   CHECK_EQ(isolate->array_buffer_allocator(), allocator);
628   CHECK_EQ(byte_length_, byte_capacity_);
629   void* new_start =
630       allocator->Reallocate(buffer_start_, byte_length_, new_byte_length);
631   if (!new_start) return false;
632   buffer_start_ = new_start;
633   byte_capacity_ = new_byte_length;
634   byte_length_ = new_byte_length;
635   return true;
636 }
637 
get_v8_api_array_buffer_allocator()638 v8::ArrayBuffer::Allocator* BackingStore::get_v8_api_array_buffer_allocator() {
639   CHECK(!is_wasm_memory_);
640   auto array_buffer_allocator =
641       holds_shared_ptr_to_allocator_
642           ? type_specific_data_.v8_api_array_buffer_allocator_shared.get()
643           : type_specific_data_.v8_api_array_buffer_allocator;
644   CHECK_NOT_NULL(array_buffer_allocator);
645   return array_buffer_allocator;
646 }
647 
get_shared_wasm_memory_data()648 SharedWasmMemoryData* BackingStore::get_shared_wasm_memory_data() {
649   CHECK(is_wasm_memory_ && is_shared_);
650   auto shared_wasm_memory_data = type_specific_data_.shared_wasm_memory_data;
651   CHECK(shared_wasm_memory_data);
652   return shared_wasm_memory_data;
653 }
654 
655 namespace {
656 // Implementation details of GlobalBackingStoreRegistry.
657 struct GlobalBackingStoreRegistryImpl {
658   GlobalBackingStoreRegistryImpl() = default;
659   base::Mutex mutex_;
660   std::unordered_map<const void*, std::weak_ptr<BackingStore>> map_;
661 };
662 base::LazyInstance<GlobalBackingStoreRegistryImpl>::type global_registry_impl_ =
663     LAZY_INSTANCE_INITIALIZER;
impl()664 inline GlobalBackingStoreRegistryImpl* impl() {
665   return global_registry_impl_.Pointer();
666 }
667 }  // namespace
668 
Register(std::shared_ptr<BackingStore> backing_store)669 void GlobalBackingStoreRegistry::Register(
670     std::shared_ptr<BackingStore> backing_store) {
671   if (!backing_store || !backing_store->buffer_start()) return;
672 
673   if (!backing_store->free_on_destruct()) {
674     // If the backing store buffer is managed by the embedder,
675     // then we don't have to guarantee that there is single unique
676     // BackingStore per buffer_start() because the destructor of
677     // of the BackingStore will be a no-op in that case.
678 
679     // All Wasm memory has to be registered.
680     CHECK(!backing_store->is_wasm_memory());
681     return;
682   }
683 
684   base::MutexGuard scope_lock(&impl()->mutex_);
685   if (backing_store->globally_registered_) return;
686   TRACE_BS("BS:reg    bs=%p mem=%p (length=%zu, capacity=%zu)\n",
687            backing_store.get(), backing_store->buffer_start(),
688            backing_store->byte_length(), backing_store->byte_capacity());
689   std::weak_ptr<BackingStore> weak = backing_store;
690   auto result = impl()->map_.insert({backing_store->buffer_start(), weak});
691   CHECK(result.second);
692   backing_store->globally_registered_ = true;
693 }
694 
Unregister(BackingStore * backing_store)695 void GlobalBackingStoreRegistry::Unregister(BackingStore* backing_store) {
696   if (!backing_store->globally_registered_) return;
697 
698   DCHECK_NOT_NULL(backing_store->buffer_start());
699 
700   base::MutexGuard scope_lock(&impl()->mutex_);
701   const auto& result = impl()->map_.find(backing_store->buffer_start());
702   if (result != impl()->map_.end()) {
703     DCHECK(!result->second.lock());
704     impl()->map_.erase(result);
705   }
706   backing_store->globally_registered_ = false;
707 }
708 
Lookup(void * buffer_start,size_t length)709 std::shared_ptr<BackingStore> GlobalBackingStoreRegistry::Lookup(
710     void* buffer_start, size_t length) {
711   base::MutexGuard scope_lock(&impl()->mutex_);
712   TRACE_BS("BS:lookup   mem=%p (%zu bytes)\n", buffer_start, length);
713   const auto& result = impl()->map_.find(buffer_start);
714   if (result == impl()->map_.end()) {
715     return std::shared_ptr<BackingStore>();
716   }
717   auto backing_store = result->second.lock();
718   CHECK_EQ(buffer_start, backing_store->buffer_start());
719   if (backing_store->is_wasm_memory()) {
720     // Grow calls to shared WebAssembly threads can be triggered from different
721     // workers, length equality cannot be guaranteed here.
722     CHECK_LE(length, backing_store->byte_length());
723   } else {
724     CHECK_EQ(length, backing_store->byte_length());
725   }
726   return backing_store;
727 }
728 
Purge(Isolate * isolate)729 void GlobalBackingStoreRegistry::Purge(Isolate* isolate) {
730   // We need to keep a reference to all backing stores that are inspected
731   // in the purging loop below. Otherwise, we might get a deadlock
732   // if the temporary backing store reference created in the loop is
733   // the last reference. In that case the destructor of the backing store
734   // may try to take the &impl()->mutex_ in order to unregister itself.
735   std::vector<std::shared_ptr<BackingStore>> prevent_destruction_under_lock;
736   base::MutexGuard scope_lock(&impl()->mutex_);
737   // Purge all entries in the map that refer to the given isolate.
738   for (auto& entry : impl()->map_) {
739     auto backing_store = entry.second.lock();
740     prevent_destruction_under_lock.emplace_back(backing_store);
741     if (!backing_store) continue;  // skip entries where weak ptr is null
742     if (!backing_store->is_wasm_memory()) continue;  // skip non-wasm memory
743     if (!backing_store->is_shared()) continue;       // skip non-shared memory
744     SharedWasmMemoryData* shared_data =
745         backing_store->get_shared_wasm_memory_data();
746     // Remove this isolate from the isolates list.
747     auto& isolates = shared_data->isolates_;
748     for (size_t i = 0; i < isolates.size(); i++) {
749       if (isolates[i] == isolate) isolates[i] = nullptr;
750     }
751   }
752 }
753 
AddSharedWasmMemoryObject(Isolate * isolate,BackingStore * backing_store,Handle<WasmMemoryObject> memory_object)754 void GlobalBackingStoreRegistry::AddSharedWasmMemoryObject(
755     Isolate* isolate, BackingStore* backing_store,
756     Handle<WasmMemoryObject> memory_object) {
757   // Add to the weak array list of shared memory objects in the isolate.
758   isolate->AddSharedWasmMemory(memory_object);
759 
760   // Add the isolate to the list of isolates sharing this backing store.
761   base::MutexGuard scope_lock(&impl()->mutex_);
762   SharedWasmMemoryData* shared_data =
763       backing_store->get_shared_wasm_memory_data();
764   auto& isolates = shared_data->isolates_;
765   int free_entry = -1;
766   for (size_t i = 0; i < isolates.size(); i++) {
767     if (isolates[i] == isolate) return;
768     if (isolates[i] == nullptr) free_entry = static_cast<int>(i);
769   }
770   if (free_entry >= 0)
771     isolates[free_entry] = isolate;
772   else
773     isolates.push_back(isolate);
774 }
775 
BroadcastSharedWasmMemoryGrow(Isolate * isolate,std::shared_ptr<BackingStore> backing_store)776 void GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow(
777     Isolate* isolate, std::shared_ptr<BackingStore> backing_store) {
778   {
779     // The global lock protects the list of isolates per backing store.
780     base::MutexGuard scope_lock(&impl()->mutex_);
781     SharedWasmMemoryData* shared_data =
782         backing_store->get_shared_wasm_memory_data();
783     for (Isolate* other : shared_data->isolates_) {
784       if (other && other != isolate) {
785         other->stack_guard()->RequestGrowSharedMemory();
786       }
787     }
788   }
789   // Update memory objects in this isolate.
790   UpdateSharedWasmMemoryObjects(isolate);
791 }
792 
UpdateSharedWasmMemoryObjects(Isolate * isolate)793 void GlobalBackingStoreRegistry::UpdateSharedWasmMemoryObjects(
794     Isolate* isolate) {
795   HandleScope scope(isolate);
796   Handle<WeakArrayList> shared_wasm_memories =
797       isolate->factory()->shared_wasm_memories();
798 
799   for (int i = 0; i < shared_wasm_memories->length(); i++) {
800     HeapObject obj;
801     if (!shared_wasm_memories->Get(i).GetHeapObject(&obj)) continue;
802 
803     Handle<WasmMemoryObject> memory_object(WasmMemoryObject::cast(obj),
804                                            isolate);
805     Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate);
806     std::shared_ptr<BackingStore> backing_store = old_buffer->GetBackingStore();
807 
808     Handle<JSArrayBuffer> new_buffer =
809         isolate->factory()->NewJSSharedArrayBuffer(std::move(backing_store));
810     memory_object->update_instances(isolate, new_buffer);
811   }
812 }
813 
814 }  // namespace internal
815 }  // namespace v8
816 
817 #undef TRACE_BS
818