• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if !V8_ENABLE_WEBASSEMBLY
6 #error This header should only be included if WebAssembly is enabled.
7 #endif  // !V8_ENABLE_WEBASSEMBLY
8 
9 #ifndef V8_WASM_WASM_CODE_MANAGER_H_
10 #define V8_WASM_WASM_CODE_MANAGER_H_
11 
12 #include <atomic>
13 #include <map>
14 #include <memory>
15 #include <set>
16 #include <utility>
17 #include <vector>
18 
19 #include "src/base/address-region.h"
20 #include "src/base/bit-field.h"
21 #include "src/base/macros.h"
22 #include "src/base/optional.h"
23 #include "src/base/vector.h"
24 #include "src/builtins/builtins.h"
25 #include "src/handles/handles.h"
26 #include "src/tasks/operations-barrier.h"
27 #include "src/trap-handler/trap-handler.h"
28 #include "src/wasm/compilation-environment.h"
29 #include "src/wasm/memory-protection-key.h"
30 #include "src/wasm/wasm-features.h"
31 #include "src/wasm/wasm-limits.h"
32 #include "src/wasm/wasm-module-sourcemap.h"
33 #include "src/wasm/wasm-tier.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 class Code;
39 class CodeDesc;
40 class Isolate;
41 
42 namespace wasm {
43 
44 class DebugInfo;
45 class NativeModule;
46 struct WasmCompilationResult;
47 class WasmEngine;
48 class WasmImportWrapperCache;
49 struct WasmModule;
50 
51 // Convenience macro listing all wasm runtime stubs. Note that the first few
52 // elements of the list coincide with {compiler::TrapId}, order matters.
53 #define WASM_RUNTIME_STUB_LIST(V, VTRAP)  \
54   FOREACH_WASM_TRAPREASON(VTRAP)          \
55   V(WasmCompileLazy)                      \
56   V(WasmTriggerTierUp)                    \
57   V(WasmDebugBreak)                       \
58   V(WasmInt32ToHeapNumber)                \
59   V(WasmTaggedNonSmiToInt32)              \
60   V(WasmFloat32ToNumber)                  \
61   V(WasmFloat64ToNumber)                  \
62   V(WasmTaggedToFloat64)                  \
63   V(WasmAllocateJSArray)                  \
64   V(WasmAtomicNotify)                     \
65   V(WasmI32AtomicWait32)                  \
66   V(WasmI32AtomicWait64)                  \
67   V(WasmI64AtomicWait32)                  \
68   V(WasmI64AtomicWait64)                  \
69   V(WasmGetOwnProperty)                   \
70   V(WasmRefFunc)                          \
71   V(WasmMemoryGrow)                       \
72   V(WasmTableInit)                        \
73   V(WasmTableCopy)                        \
74   V(WasmTableFill)                        \
75   V(WasmTableGrow)                        \
76   V(WasmTableGet)                         \
77   V(WasmTableSet)                         \
78   V(WasmStackGuard)                       \
79   V(WasmStackOverflow)                    \
80   V(WasmAllocateFixedArray)               \
81   V(WasmThrow)                            \
82   V(WasmRethrow)                          \
83   V(WasmRethrowExplicitContext)           \
84   V(WasmTraceEnter)                       \
85   V(WasmTraceExit)                        \
86   V(WasmTraceMemory)                      \
87   V(BigIntToI32Pair)                      \
88   V(BigIntToI64)                          \
89   V(CallRefIC)                            \
90   V(DoubleToI)                            \
91   V(I32PairToBigInt)                      \
92   V(I64ToBigInt)                          \
93   V(RecordWriteEmitRememberedSetSaveFP)   \
94   V(RecordWriteOmitRememberedSetSaveFP)   \
95   V(RecordWriteEmitRememberedSetIgnoreFP) \
96   V(RecordWriteOmitRememberedSetIgnoreFP) \
97   V(ToNumber)                             \
98   IF_TSAN(V, TSANRelaxedStore8IgnoreFP)   \
99   IF_TSAN(V, TSANRelaxedStore8SaveFP)     \
100   IF_TSAN(V, TSANRelaxedStore16IgnoreFP)  \
101   IF_TSAN(V, TSANRelaxedStore16SaveFP)    \
102   IF_TSAN(V, TSANRelaxedStore32IgnoreFP)  \
103   IF_TSAN(V, TSANRelaxedStore32SaveFP)    \
104   IF_TSAN(V, TSANRelaxedStore64IgnoreFP)  \
105   IF_TSAN(V, TSANRelaxedStore64SaveFP)    \
106   IF_TSAN(V, TSANSeqCstStore8IgnoreFP)    \
107   IF_TSAN(V, TSANSeqCstStore8SaveFP)      \
108   IF_TSAN(V, TSANSeqCstStore16IgnoreFP)   \
109   IF_TSAN(V, TSANSeqCstStore16SaveFP)     \
110   IF_TSAN(V, TSANSeqCstStore32IgnoreFP)   \
111   IF_TSAN(V, TSANSeqCstStore32SaveFP)     \
112   IF_TSAN(V, TSANSeqCstStore64IgnoreFP)   \
113   IF_TSAN(V, TSANSeqCstStore64SaveFP)     \
114   IF_TSAN(V, TSANRelaxedLoad32IgnoreFP)   \
115   IF_TSAN(V, TSANRelaxedLoad32SaveFP)     \
116   IF_TSAN(V, TSANRelaxedLoad64IgnoreFP)   \
117   IF_TSAN(V, TSANRelaxedLoad64SaveFP)     \
118   V(WasmAllocateArray_Uninitialized)      \
119   V(WasmAllocateArray_InitNull)           \
120   V(WasmAllocateArray_InitZero)           \
121   V(WasmArrayCopy)                        \
122   V(WasmArrayCopyWithChecks)              \
123   V(WasmArrayInitFromData)                \
124   V(WasmAllocateStructWithRtt)            \
125   V(WasmSubtypeCheck)                     \
126   V(WasmOnStackReplace)                   \
127   V(WasmSuspend)
128 
129 // Sorted, disjoint and non-overlapping memory regions. A region is of the
130 // form [start, end). So there's no [start, end), [end, other_end),
131 // because that should have been reduced to [start, other_end).
132 class V8_EXPORT_PRIVATE DisjointAllocationPool final {
133  public:
134   MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(DisjointAllocationPool);
DisjointAllocationPool(base::AddressRegion region)135   explicit DisjointAllocationPool(base::AddressRegion region)
136       : regions_({region}) {}
137 
138   // Merge the parameter region into this object. The assumption is that the
139   // passed parameter is not intersecting this object - for example, it was
140   // obtained from a previous Allocate. Returns the merged region.
141   base::AddressRegion Merge(base::AddressRegion);
142 
143   // Allocate a contiguous region of size {size}. Return an empty pool on
144   // failure.
145   base::AddressRegion Allocate(size_t size);
146 
147   // Allocate a contiguous region of size {size} within {region}. Return an
148   // empty pool on failure.
149   base::AddressRegion AllocateInRegion(size_t size, base::AddressRegion);
150 
IsEmpty()151   bool IsEmpty() const { return regions_.empty(); }
152 
regions()153   const auto& regions() const { return regions_; }
154 
155  private:
156   std::set<base::AddressRegion, base::AddressRegion::StartAddressLess> regions_;
157 };
158 
159 class V8_EXPORT_PRIVATE WasmCode final {
160  public:
161   enum Kind { kWasmFunction, kWasmToCapiWrapper, kWasmToJsWrapper, kJumpTable };
162 
163   // Each runtime stub is identified by an id. This id is used to reference the
164   // stub via {RelocInfo::WASM_STUB_CALL} and gets resolved during relocation.
165   enum RuntimeStubId {
166 #define DEF_ENUM(Name) k##Name,
167 #define DEF_ENUM_TRAP(Name) kThrowWasm##Name,
168     WASM_RUNTIME_STUB_LIST(DEF_ENUM, DEF_ENUM_TRAP)
169 #undef DEF_ENUM_TRAP
170 #undef DEF_ENUM
171         kRuntimeStubCount
172   };
173 
GetRecordWriteStub(RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode)174   static constexpr RuntimeStubId GetRecordWriteStub(
175       RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
176     switch (remembered_set_action) {
177       case RememberedSetAction::kEmit:
178         switch (fp_mode) {
179           case SaveFPRegsMode::kIgnore:
180             return RuntimeStubId::kRecordWriteEmitRememberedSetIgnoreFP;
181           case SaveFPRegsMode::kSave:
182             return RuntimeStubId::kRecordWriteEmitRememberedSetSaveFP;
183         }
184       case RememberedSetAction::kOmit:
185         switch (fp_mode) {
186           case SaveFPRegsMode::kIgnore:
187             return RuntimeStubId::kRecordWriteOmitRememberedSetIgnoreFP;
188           case SaveFPRegsMode::kSave:
189             return RuntimeStubId::kRecordWriteOmitRememberedSetSaveFP;
190         }
191     }
192   }
193 
194 #ifdef V8_IS_TSAN
GetTSANStoreStub(SaveFPRegsMode fp_mode,int size,std::memory_order order)195   static RuntimeStubId GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
196                                         std::memory_order order) {
197     if (order == std::memory_order_relaxed) {
198       if (size == kInt8Size) {
199         return fp_mode == SaveFPRegsMode::kIgnore
200                    ? RuntimeStubId::kTSANRelaxedStore8IgnoreFP
201                    : RuntimeStubId::kTSANRelaxedStore8SaveFP;
202       } else if (size == kInt16Size) {
203         return fp_mode == SaveFPRegsMode::kIgnore
204                    ? RuntimeStubId::kTSANRelaxedStore16IgnoreFP
205                    : RuntimeStubId::kTSANRelaxedStore16SaveFP;
206       } else if (size == kInt32Size) {
207         return fp_mode == SaveFPRegsMode::kIgnore
208                    ? RuntimeStubId::kTSANRelaxedStore32IgnoreFP
209                    : RuntimeStubId::kTSANRelaxedStore32SaveFP;
210       } else {
211         CHECK_EQ(size, kInt64Size);
212         return fp_mode == SaveFPRegsMode::kIgnore
213                    ? RuntimeStubId::kTSANRelaxedStore64IgnoreFP
214                    : RuntimeStubId::kTSANRelaxedStore64SaveFP;
215       }
216     } else {
217       DCHECK_EQ(order, std::memory_order_seq_cst);
218       if (size == kInt8Size) {
219         return fp_mode == SaveFPRegsMode::kIgnore
220                    ? RuntimeStubId::kTSANSeqCstStore8IgnoreFP
221                    : RuntimeStubId::kTSANSeqCstStore8SaveFP;
222       } else if (size == kInt16Size) {
223         return fp_mode == SaveFPRegsMode::kIgnore
224                    ? RuntimeStubId::kTSANSeqCstStore16IgnoreFP
225                    : RuntimeStubId::kTSANSeqCstStore16SaveFP;
226       } else if (size == kInt32Size) {
227         return fp_mode == SaveFPRegsMode::kIgnore
228                    ? RuntimeStubId::kTSANSeqCstStore32IgnoreFP
229                    : RuntimeStubId::kTSANSeqCstStore32SaveFP;
230       } else {
231         CHECK_EQ(size, kInt64Size);
232         return fp_mode == SaveFPRegsMode::kIgnore
233                    ? RuntimeStubId::kTSANSeqCstStore64IgnoreFP
234                    : RuntimeStubId::kTSANSeqCstStore64SaveFP;
235       }
236     }
237   }
238 
GetTSANRelaxedLoadStub(SaveFPRegsMode fp_mode,int size)239   static RuntimeStubId GetTSANRelaxedLoadStub(SaveFPRegsMode fp_mode,
240                                               int size) {
241     if (size == kInt32Size) {
242       return fp_mode == SaveFPRegsMode::kIgnore
243                  ? RuntimeStubId::kTSANRelaxedLoad32IgnoreFP
244                  : RuntimeStubId::kTSANRelaxedLoad32SaveFP;
245     } else {
246       CHECK_EQ(size, kInt64Size);
247       return fp_mode == SaveFPRegsMode::kIgnore
248                  ? RuntimeStubId::kTSANRelaxedLoad64IgnoreFP
249                  : RuntimeStubId::kTSANRelaxedLoad64SaveFP;
250     }
251   }
252 #endif  // V8_IS_TSAN
253 
instructions()254   base::Vector<byte> instructions() const {
255     return base::VectorOf(instructions_,
256                           static_cast<size_t>(instructions_size_));
257   }
instruction_start()258   Address instruction_start() const {
259     return reinterpret_cast<Address>(instructions_);
260   }
reloc_info()261   base::Vector<const byte> reloc_info() const {
262     return {protected_instructions_data().end(),
263             static_cast<size_t>(reloc_info_size_)};
264   }
source_positions()265   base::Vector<const byte> source_positions() const {
266     return {reloc_info().end(), static_cast<size_t>(source_positions_size_)};
267   }
268 
index()269   int index() const { return index_; }
270   // Anonymous functions are functions that don't carry an index.
IsAnonymous()271   bool IsAnonymous() const { return index_ == kAnonymousFuncIndex; }
kind()272   Kind kind() const { return KindField::decode(flags_); }
native_module()273   NativeModule* native_module() const { return native_module_; }
tier()274   ExecutionTier tier() const { return ExecutionTierField::decode(flags_); }
275   Address constant_pool() const;
276   Address handler_table() const;
277   int handler_table_size() const;
278   Address code_comments() const;
279   int code_comments_size() const;
constant_pool_offset()280   int constant_pool_offset() const { return constant_pool_offset_; }
safepoint_table_offset()281   int safepoint_table_offset() const { return safepoint_table_offset_; }
handler_table_offset()282   int handler_table_offset() const { return handler_table_offset_; }
code_comments_offset()283   int code_comments_offset() const { return code_comments_offset_; }
unpadded_binary_size()284   int unpadded_binary_size() const { return unpadded_binary_size_; }
stack_slots()285   int stack_slots() const { return stack_slots_; }
first_tagged_parameter_slot()286   uint16_t first_tagged_parameter_slot() const {
287     return tagged_parameter_slots_ >> 16;
288   }
num_tagged_parameter_slots()289   uint16_t num_tagged_parameter_slots() const {
290     return tagged_parameter_slots_ & 0xFFFF;
291   }
raw_tagged_parameter_slots_for_serialization()292   uint32_t raw_tagged_parameter_slots_for_serialization() const {
293     return tagged_parameter_slots_;
294   }
295 
is_liftoff()296   bool is_liftoff() const { return tier() == ExecutionTier::kLiftoff; }
297 
is_turbofan()298   bool is_turbofan() const { return tier() == ExecutionTier::kTurbofan; }
299 
contains(Address pc)300   bool contains(Address pc) const {
301     return reinterpret_cast<Address>(instructions_) <= pc &&
302            pc < reinterpret_cast<Address>(instructions_ + instructions_size_);
303   }
304 
305   // Only Liftoff code that was generated for debugging can be inspected
306   // (otherwise debug side table positions would not match up).
is_inspectable()307   bool is_inspectable() const { return is_liftoff() && for_debugging(); }
308 
protected_instructions_data()309   base::Vector<const uint8_t> protected_instructions_data() const {
310     return {meta_data_.get(),
311             static_cast<size_t>(protected_instructions_size_)};
312   }
313 
314   base::Vector<const trap_handler::ProtectedInstructionData>
protected_instructions()315   protected_instructions() const {
316     return base::Vector<const trap_handler::ProtectedInstructionData>::cast(
317         protected_instructions_data());
318   }
319 
320   void Validate() const;
321   void Print(const char* name = nullptr) const;
322   void MaybePrint() const;
323   void Disassemble(const char* name, std::ostream& os,
324                    Address current_pc = kNullAddress) const;
325 
326   static bool ShouldBeLogged(Isolate* isolate);
327   void LogCode(Isolate* isolate, const char* source_url, int script_id) const;
328 
329   WasmCode(const WasmCode&) = delete;
330   WasmCode& operator=(const WasmCode&) = delete;
331   ~WasmCode();
332 
IncRef()333   void IncRef() {
334     int old_val = ref_count_.fetch_add(1, std::memory_order_acq_rel);
335     DCHECK_LE(1, old_val);
336     DCHECK_GT(kMaxInt, old_val);
337     USE(old_val);
338   }
339 
340   // Decrement the ref count. Returns whether this code becomes dead and needs
341   // to be freed.
DecRef()342   V8_WARN_UNUSED_RESULT bool DecRef() {
343     int old_count = ref_count_.load(std::memory_order_acquire);
344     while (true) {
345       DCHECK_LE(1, old_count);
346       if (V8_UNLIKELY(old_count == 1)) return DecRefOnPotentiallyDeadCode();
347       if (ref_count_.compare_exchange_weak(old_count, old_count - 1,
348                                            std::memory_order_acq_rel)) {
349         return false;
350       }
351     }
352   }
353 
354   // Decrement the ref count on code that is known to be in use (i.e. the ref
355   // count cannot drop to zero here).
DecRefOnLiveCode()356   void DecRefOnLiveCode() {
357     int old_count = ref_count_.fetch_sub(1, std::memory_order_acq_rel);
358     DCHECK_LE(2, old_count);
359     USE(old_count);
360   }
361 
362   // Decrement the ref count on code that is known to be dead, even though there
363   // might still be C++ references. Returns whether this drops the last
364   // reference and the code needs to be freed.
DecRefOnDeadCode()365   V8_WARN_UNUSED_RESULT bool DecRefOnDeadCode() {
366     return ref_count_.fetch_sub(1, std::memory_order_acq_rel) == 1;
367   }
368 
369   // Decrement the ref count on a set of {WasmCode} objects, potentially
370   // belonging to different {NativeModule}s. Dead code will be deleted.
371   static void DecrementRefCount(base::Vector<WasmCode* const>);
372 
373   // Returns the last source position before {offset}.
374   int GetSourcePositionBefore(int offset);
375 
376   // Returns whether this code was generated for debugging. If this returns
377   // {kForDebugging}, but {tier()} is not {kLiftoff}, then Liftoff compilation
378   // bailed out.
for_debugging()379   ForDebugging for_debugging() const {
380     return ForDebuggingField::decode(flags_);
381   }
382 
383   enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
384 
385  private:
386   friend class NativeModule;
387 
WasmCode(NativeModule * native_module,int index,base::Vector<byte> instructions,int stack_slots,uint32_t tagged_parameter_slots,int safepoint_table_offset,int handler_table_offset,int constant_pool_offset,int code_comments_offset,int unpadded_binary_size,base::Vector<const byte> protected_instructions_data,base::Vector<const byte> reloc_info,base::Vector<const byte> source_position_table,Kind kind,ExecutionTier tier,ForDebugging for_debugging)388   WasmCode(NativeModule* native_module, int index,
389            base::Vector<byte> instructions, int stack_slots,
390            uint32_t tagged_parameter_slots, int safepoint_table_offset,
391            int handler_table_offset, int constant_pool_offset,
392            int code_comments_offset, int unpadded_binary_size,
393            base::Vector<const byte> protected_instructions_data,
394            base::Vector<const byte> reloc_info,
395            base::Vector<const byte> source_position_table, Kind kind,
396            ExecutionTier tier, ForDebugging for_debugging)
397       : native_module_(native_module),
398         instructions_(instructions.begin()),
399         flags_(KindField::encode(kind) | ExecutionTierField::encode(tier) |
400                ForDebuggingField::encode(for_debugging)),
401         meta_data_(ConcatenateBytes(
402             {protected_instructions_data, reloc_info, source_position_table})),
403         instructions_size_(instructions.length()),
404         reloc_info_size_(reloc_info.length()),
405         source_positions_size_(source_position_table.length()),
406         protected_instructions_size_(protected_instructions_data.length()),
407         index_(index),
408         constant_pool_offset_(constant_pool_offset),
409         stack_slots_(stack_slots),
410         tagged_parameter_slots_(tagged_parameter_slots),
411         safepoint_table_offset_(safepoint_table_offset),
412         handler_table_offset_(handler_table_offset),
413         code_comments_offset_(code_comments_offset),
414         unpadded_binary_size_(unpadded_binary_size) {
415     DCHECK_LE(safepoint_table_offset, unpadded_binary_size);
416     DCHECK_LE(handler_table_offset, unpadded_binary_size);
417     DCHECK_LE(code_comments_offset, unpadded_binary_size);
418     DCHECK_LE(constant_pool_offset, unpadded_binary_size);
419   }
420 
421   std::unique_ptr<const byte[]> ConcatenateBytes(
422       std::initializer_list<base::Vector<const byte>>);
423 
424   // Tries to get a reasonable name. Lazily looks up the name section, and falls
425   // back to the function index. Return value is guaranteed to not be empty.
426   std::string DebugName() const;
427 
428   // Code objects that have been registered with the global trap handler within
429   // this process, will have a {trap_handler_index} associated with them.
trap_handler_index()430   int trap_handler_index() const {
431     CHECK(has_trap_handler_index());
432     return trap_handler_index_;
433   }
set_trap_handler_index(int value)434   void set_trap_handler_index(int value) {
435     CHECK(!has_trap_handler_index());
436     trap_handler_index_ = value;
437   }
has_trap_handler_index()438   bool has_trap_handler_index() const { return trap_handler_index_ >= 0; }
439 
440   // Register protected instruction information with the trap handler. Sets
441   // trap_handler_index.
442   void RegisterTrapHandlerData();
443 
444   // Slow path for {DecRef}: The code becomes potentially dead.
445   // Returns whether this code becomes dead and needs to be freed.
446   V8_NOINLINE bool DecRefOnPotentiallyDeadCode();
447 
448   NativeModule* const native_module_ = nullptr;
449   byte* const instructions_;
450   const uint8_t flags_;  // Bit field, see below.
451   // {meta_data_} contains several byte vectors concatenated into one:
452   //  - protected instructions data of size {protected_instructions_size_}
453   //  - relocation info of size {reloc_info_size_}
454   //  - source positions of size {source_positions_size_}
455   // Note that the protected instructions come first to ensure alignment.
456   std::unique_ptr<const byte[]> meta_data_;
457   const int instructions_size_;
458   const int reloc_info_size_;
459   const int source_positions_size_;
460   const int protected_instructions_size_;
461   const int index_;
462   const int constant_pool_offset_;
463   const int stack_slots_;
464   // Number and position of tagged parameters passed to this function via the
465   // stack, packed into a single uint32. These values are used by the stack
466   // walker (e.g. GC) to find references.
467   const uint32_t tagged_parameter_slots_;
468   // We care about safepoint data for wasm-to-js functions, since there may be
469   // stack/register tagged values for large number conversions.
470   const int safepoint_table_offset_;
471   const int handler_table_offset_;
472   const int code_comments_offset_;
473   const int unpadded_binary_size_;
474   int trap_handler_index_ = -1;
475 
476   // Bits encoded in {flags_}:
477   using KindField = base::BitField8<Kind, 0, 3>;
478   using ExecutionTierField = KindField::Next<ExecutionTier, 2>;
479   using ForDebuggingField = ExecutionTierField::Next<ForDebugging, 2>;
480 
481   // WasmCode is ref counted. Counters are held by:
482   //   1) The jump table / code table.
483   //   2) {WasmCodeRefScope}s.
484   //   3) The set of potentially dead code in the {WasmEngine}.
485   // If a decrement of (1) would drop the ref count to 0, that code becomes a
486   // candidate for garbage collection. At that point, we add a ref count for (3)
487   // *before* decrementing the counter to ensure the code stays alive as long as
488   // it's being used. Once the ref count drops to zero (i.e. after being removed
489   // from (3) and all (2)), the code object is deleted and the memory for the
490   // machine code is freed.
491   std::atomic<int> ref_count_{1};
492 };
493 
494 // Check that {WasmCode} objects are sufficiently small. We create many of them,
495 // often for rather small functions.
496 // Increase the limit if needed, but first check if the size increase is
497 // justified.
498 #ifndef V8_GC_MOLE
499 STATIC_ASSERT(sizeof(WasmCode) <= 88);
500 #endif
501 
502 WasmCode::Kind GetCodeKind(const WasmCompilationResult& result);
503 
504 // Return a textual description of the kind.
505 const char* GetWasmCodeKindAsString(WasmCode::Kind);
506 
507 // Manages the code reservations and allocations of a single {NativeModule}.
508 class WasmCodeAllocator {
509  public:
510 #if V8_TARGET_ARCH_ARM64
511   // ARM64 only supports direct calls within a 128 MB range.
512   static constexpr size_t kMaxCodeSpaceSize = 128 * MB;
513 #elif V8_TARGET_ARCH_PPC64
514   // branches only takes 26 bits
515   static constexpr size_t kMaxCodeSpaceSize = 32 * MB;
516 #else
517   // Use 1024 MB limit for code spaces on other platforms. This is smaller than
518   // the total allowed code space (kMaxWasmCodeMemory) to avoid unnecessarily
519   // big reservations, and to ensure that distances within a code space fit
520   // within a 32-bit signed integer.
521   static constexpr size_t kMaxCodeSpaceSize = 1024 * MB;
522 #endif
523 
524   explicit WasmCodeAllocator(std::shared_ptr<Counters> async_counters);
525   ~WasmCodeAllocator();
526 
527   // Call before use, after the {NativeModule} is set up completely.
528   void Init(VirtualMemory code_space);
529 
committed_code_space()530   size_t committed_code_space() const {
531     return committed_code_space_.load(std::memory_order_acquire);
532   }
generated_code_size()533   size_t generated_code_size() const {
534     return generated_code_size_.load(std::memory_order_acquire);
535   }
freed_code_size()536   size_t freed_code_size() const {
537     return freed_code_size_.load(std::memory_order_acquire);
538   }
539 
540   // Allocate code space. Returns a valid buffer or fails with OOM (crash).
541   // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
542   base::Vector<byte> AllocateForCode(NativeModule*, size_t size);
543 
544   // Allocate code space within a specific region. Returns a valid buffer or
545   // fails with OOM (crash).
546   // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
547   base::Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
548                                              base::AddressRegion);
549 
550   // Increases or decreases the {writers_count_} field. While there is at least
551   // one writer, it is allowed to call {MakeWritable} to make regions writable.
552   // When the last writer is removed, all code is switched back to
553   // write-protected.
554   // Hold the {NativeModule}'s {allocation_mutex_} when calling one of these
555   // methods. The methods should only be called via {CodeSpaceWriteScope}.
556   V8_EXPORT_PRIVATE void AddWriter();
557   V8_EXPORT_PRIVATE void RemoveWriter();
558 
559   // Make a code region writable. Only allowed if there is at lease one writer
560   // (see above).
561   // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
562   V8_EXPORT_PRIVATE void MakeWritable(base::AddressRegion);
563 
564   // Free memory pages of all given code objects. Used for wasm code GC.
565   // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
566   void FreeCode(base::Vector<WasmCode* const>);
567 
568   // Retrieve the number of separately reserved code spaces.
569   // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
570   size_t GetNumCodeSpaces() const;
571 
counters()572   Counters* counters() const { return async_counters_.get(); }
573 
574  private:
575   // Sentinel value to be used for {AllocateForCodeInRegion} for specifying no
576   // restriction on the region to allocate in.
577   static constexpr base::AddressRegion kUnrestrictedRegion{
578       kNullAddress, std::numeric_limits<size_t>::max()};
579 
580   void InsertIntoWritableRegions(base::AddressRegion region,
581                                  bool switch_to_writable);
582 
583   //////////////////////////////////////////////////////////////////////////////
584   // These fields are protected by the mutex in {NativeModule}.
585 
586   // Code space that was reserved and is available for allocations (subset of
587   // {owned_code_space_}).
588   DisjointAllocationPool free_code_space_;
589   // Code space that was allocated for code (subset of {owned_code_space_}).
590   DisjointAllocationPool allocated_code_space_;
591   // Code space that was allocated before but is dead now. Full pages within
592   // this region are discarded. It's still a subset of {owned_code_space_}.
593   DisjointAllocationPool freed_code_space_;
594   std::vector<VirtualMemory> owned_code_space_;
595 
596   // The following two fields are only used if {protect_code_memory_} is true.
597   int writers_count_{0};
598   std::set<base::AddressRegion, base::AddressRegion::StartAddressLess>
599       writable_memory_;
600 
601   // End of fields protected by {mutex_}.
602   //////////////////////////////////////////////////////////////////////////////
603 
604   // {protect_code_memory_} is true if traditional memory permission switching
605   // is used to protect code space. It is false if {MAP_JIT} on Mac or PKU is
606   // being used, or protection is completely disabled.
607   const bool protect_code_memory_;
608   std::atomic<size_t> committed_code_space_{0};
609   std::atomic<size_t> generated_code_size_{0};
610   std::atomic<size_t> freed_code_size_{0};
611 
612   std::shared_ptr<Counters> async_counters_;
613 };
614 
615 class V8_EXPORT_PRIVATE NativeModule final {
616  public:
617 #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64 || \
618     V8_TARGET_ARCH_PPC64
619   static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = true;
620 #else
621   static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = false;
622 #endif
623 
624   NativeModule(const NativeModule&) = delete;
625   NativeModule& operator=(const NativeModule&) = delete;
626   ~NativeModule();
627 
628   // {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
629   // code below, i.e. it can be called concurrently from background threads.
630   // The returned code still needs to be published via {PublishCode}.
631   std::unique_ptr<WasmCode> AddCode(
632       int index, const CodeDesc& desc, int stack_slots,
633       uint32_t tagged_parameter_slots,
634       base::Vector<const byte> protected_instructions,
635       base::Vector<const byte> source_position_table, WasmCode::Kind kind,
636       ExecutionTier tier, ForDebugging for_debugging);
637 
638   // {PublishCode} makes the code available to the system by entering it into
639   // the code table and patching the jump table. It returns a raw pointer to the
640   // given {WasmCode} object. Ownership is transferred to the {NativeModule}.
641   WasmCode* PublishCode(std::unique_ptr<WasmCode>);
642   std::vector<WasmCode*> PublishCode(base::Vector<std::unique_ptr<WasmCode>>);
643 
644   // ReinstallDebugCode does a subset of PublishCode: It installs the code in
645   // the code table and patches the jump table. The given code must be debug
646   // code (with breakpoints) and must be owned by this {NativeModule} already.
647   // This method is used to re-instantiate code that was removed from the code
648   // table and jump table via another {PublishCode}.
649   void ReinstallDebugCode(WasmCode*);
650 
651   struct JumpTablesRef {
652     Address jump_table_start = kNullAddress;
653     Address far_jump_table_start = kNullAddress;
654 
is_validJumpTablesRef655     bool is_valid() const { return far_jump_table_start != kNullAddress; }
656   };
657 
658   std::pair<base::Vector<uint8_t>, JumpTablesRef> AllocateForDeserializedCode(
659       size_t total_code_size);
660 
661   std::unique_ptr<WasmCode> AddDeserializedCode(
662       int index, base::Vector<byte> instructions, int stack_slots,
663       uint32_t tagged_parameter_slots, int safepoint_table_offset,
664       int handler_table_offset, int constant_pool_offset,
665       int code_comments_offset, int unpadded_binary_size,
666       base::Vector<const byte> protected_instructions_data,
667       base::Vector<const byte> reloc_info,
668       base::Vector<const byte> source_position_table, WasmCode::Kind kind,
669       ExecutionTier tier);
670 
671   // Adds anonymous code for testing purposes.
672   WasmCode* AddCodeForTesting(Handle<Code> code);
673 
674   // Use {UseLazyStub} to setup lazy compilation per function. It will use the
675   // existing {WasmCode::kWasmCompileLazy} runtime stub and populate the jump
676   // table with trampolines accordingly.
677   void UseLazyStub(uint32_t func_index);
678 
679   // Creates a snapshot of the current state of the code table. This is useful
680   // to get a consistent view of the table (e.g. used by the serializer).
681   std::vector<WasmCode*> SnapshotCodeTable() const;
682   // Creates a snapshot of all {owned_code_}, will transfer new code (if any) to
683   // {owned_code_}.
684   std::vector<WasmCode*> SnapshotAllOwnedCode() const;
685 
686   WasmCode* GetCode(uint32_t index) const;
687   bool HasCode(uint32_t index) const;
688   bool HasCodeWithTier(uint32_t index, ExecutionTier tier) const;
689 
690   void SetWasmSourceMap(std::unique_ptr<WasmModuleSourceMap> source_map);
691   WasmModuleSourceMap* GetWasmSourceMap() const;
692 
jump_table_start()693   Address jump_table_start() const {
694     return main_jump_table_ ? main_jump_table_->instruction_start()
695                             : kNullAddress;
696   }
697 
698   uint32_t GetJumpTableOffset(uint32_t func_index) const;
699 
700   // Returns the canonical target to call for the given function (the slot in
701   // the first jump table).
702   Address GetCallTargetForFunction(uint32_t func_index) const;
703 
704   // Finds the jump tables that should be used for given code region. This
705   // information is then passed to {GetNearCallTargetForFunction} and
706   // {GetNearRuntimeStubEntry} to avoid the overhead of looking this information
707   // up there. Return an empty struct if no suitable jump tables exist.
708   JumpTablesRef FindJumpTablesForRegionLocked(base::AddressRegion) const;
709 
710   // Similarly to {GetCallTargetForFunction}, but uses the jump table previously
711   // looked up via {FindJumpTablesForRegionLocked}.
712   Address GetNearCallTargetForFunction(uint32_t func_index,
713                                        const JumpTablesRef&) const;
714 
715   // Get a runtime stub entry (which is a far jump table slot) in the jump table
716   // previously looked up via {FindJumpTablesForRegionLocked}.
717   Address GetNearRuntimeStubEntry(WasmCode::RuntimeStubId index,
718                                   const JumpTablesRef&) const;
719 
720   // Reverse lookup from a given call target (which must be a jump table slot)
721   // to a function index.
722   uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
723 
AddWriter()724   void AddWriter() {
725     base::RecursiveMutexGuard guard{&allocation_mutex_};
726     code_allocator_.AddWriter();
727   }
728 
RemoveWriter()729   void RemoveWriter() {
730     base::RecursiveMutexGuard guard{&allocation_mutex_};
731     code_allocator_.RemoveWriter();
732   }
733 
MakeWritable(base::AddressRegion region)734   void MakeWritable(base::AddressRegion region) {
735     base::RecursiveMutexGuard guard{&allocation_mutex_};
736     code_allocator_.MakeWritable(region);
737   }
738 
739   // For cctests, where we build both WasmModule and the runtime objects
740   // on the fly, and bypass the instance builder pipeline.
741   void ReserveCodeTableForTesting(uint32_t max_functions);
742 
743   void LogWasmCodes(Isolate*, Script);
744 
compilation_state()745   CompilationState* compilation_state() const {
746     return compilation_state_.get();
747   }
748 
749   // Create a {CompilationEnv} object for compilation. The caller has to ensure
750   // that the {WasmModule} pointer stays valid while the {CompilationEnv} is
751   // being used.
752   CompilationEnv CreateCompilationEnv() const;
753 
num_functions()754   uint32_t num_functions() const {
755     return module_->num_declared_functions + module_->num_imported_functions;
756   }
num_imported_functions()757   uint32_t num_imported_functions() const {
758     return module_->num_imported_functions;
759   }
bounds_checks()760   BoundsCheckStrategy bounds_checks() const { return bounds_checks_; }
set_lazy_compile_frozen(bool frozen)761   void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
lazy_compile_frozen()762   bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
wire_bytes()763   base::Vector<const uint8_t> wire_bytes() const {
764     return std::atomic_load(&wire_bytes_)->as_vector();
765   }
module()766   const WasmModule* module() const { return module_.get(); }
shared_module()767   std::shared_ptr<const WasmModule> shared_module() const { return module_; }
committed_code_space()768   size_t committed_code_space() const {
769     return code_allocator_.committed_code_space();
770   }
generated_code_size()771   size_t generated_code_size() const {
772     return code_allocator_.generated_code_size();
773   }
liftoff_bailout_count()774   size_t liftoff_bailout_count() const { return liftoff_bailout_count_.load(); }
liftoff_code_size()775   size_t liftoff_code_size() const { return liftoff_code_size_.load(); }
turbofan_code_size()776   size_t turbofan_code_size() const { return turbofan_code_size_.load(); }
baseline_compilation_cpu_duration()777   size_t baseline_compilation_cpu_duration() const {
778     return baseline_compilation_cpu_duration_.load();
779   }
tier_up_cpu_duration()780   size_t tier_up_cpu_duration() const { return tier_up_cpu_duration_.load(); }
781 
HasWireBytes()782   bool HasWireBytes() const {
783     auto wire_bytes = std::atomic_load(&wire_bytes_);
784     return wire_bytes && !wire_bytes->empty();
785   }
786   void SetWireBytes(base::OwnedVector<const uint8_t> wire_bytes);
787 
788   void UpdateCPUDuration(size_t cpu_duration, ExecutionTier tier);
AddLiftoffBailout()789   void AddLiftoffBailout() {
790     liftoff_bailout_count_.fetch_add(1, std::memory_order_relaxed);
791   }
792 
793   WasmCode* Lookup(Address) const;
794 
import_wrapper_cache()795   WasmImportWrapperCache* import_wrapper_cache() const {
796     return import_wrapper_cache_.get();
797   }
798 
enabled_features()799   const WasmFeatures& enabled_features() const { return enabled_features_; }
800 
801   // Returns the runtime stub id that corresponds to the given address (which
802   // must be a far jump table slot). Returns {kRuntimeStubCount} on failure.
803   WasmCode::RuntimeStubId GetRuntimeStubId(Address runtime_stub_target) const;
804 
805   // Sample the current code size of this modules to the given counters.
806   enum CodeSamplingTime : int8_t { kAfterBaseline, kAfterTopTier, kSampling };
807   void SampleCodeSize(Counters*, CodeSamplingTime) const;
808 
809   V8_WARN_UNUSED_RESULT std::unique_ptr<WasmCode> AddCompiledCode(
810       WasmCompilationResult);
811   V8_WARN_UNUSED_RESULT std::vector<std::unique_ptr<WasmCode>> AddCompiledCode(
812       base::Vector<WasmCompilationResult>);
813 
814   // Set a new tiering state, but don't trigger any recompilation yet; use
815   // {RecompileForTiering} for that. The two steps are split because In some
816   // scenarios we need to drop locks before triggering recompilation.
817   void SetTieringState(TieringState);
818 
819   // Check whether this modules is tiered down for debugging.
820   bool IsTieredDown();
821 
822   // Fully recompile this module in the tier set previously via
823   // {SetTieringState}. The calling thread contributes to compilation and only
824   // returns once recompilation is done.
825   void RecompileForTiering();
826 
827   // Find all functions that need to be recompiled for a new tier. Note that
828   // compilation jobs might run concurrently, so this method only considers the
829   // compilation state of this native module at the time of the call.
830   // Returns a vector of function indexes to recompile.
831   std::vector<int> FindFunctionsToRecompile(TieringState);
832 
833   // Free a set of functions of this module. Uncommits whole pages if possible.
834   // The given vector must be ordered by the instruction start address, and all
835   // {WasmCode} objects must not be used any more.
836   // Should only be called via {WasmEngine::FreeDeadCode}, so the engine can do
837   // its accounting.
838   void FreeCode(base::Vector<WasmCode* const>);
839 
840   // Retrieve the number of separately reserved code spaces for this module.
841   size_t GetNumberOfCodeSpacesForTesting() const;
842 
843   // Check whether there is DebugInfo for this NativeModule.
844   bool HasDebugInfo() const;
845 
846   // Get or create the debug info for this NativeModule.
847   DebugInfo* GetDebugInfo();
848 
tiering_budget_array()849   uint32_t* tiering_budget_array() { return tiering_budgets_.get(); }
850 
counters()851   Counters* counters() const { return code_allocator_.counters(); }
852 
853  private:
854   friend class WasmCode;
855   friend class WasmCodeAllocator;
856   friend class WasmCodeManager;
857   friend class CodeSpaceWriteScope;
858 
859   struct CodeSpaceData {
860     base::AddressRegion region;
861     WasmCode* jump_table;
862     WasmCode* far_jump_table;
863   };
864 
865   // Private constructor, called via {WasmCodeManager::NewNativeModule()}.
866   NativeModule(const WasmFeatures& enabled_features,
867                DynamicTiering dynamic_tiering, VirtualMemory code_space,
868                std::shared_ptr<const WasmModule> module,
869                std::shared_ptr<Counters> async_counters,
870                std::shared_ptr<NativeModule>* shared_this);
871 
872   std::unique_ptr<WasmCode> AddCodeWithCodeSpace(
873       int index, const CodeDesc& desc, int stack_slots,
874       uint32_t tagged_parameter_slots,
875       base::Vector<const byte> protected_instructions_data,
876       base::Vector<const byte> source_position_table, WasmCode::Kind kind,
877       ExecutionTier tier, ForDebugging for_debugging,
878       base::Vector<uint8_t> code_space, const JumpTablesRef& jump_tables_ref);
879 
880   WasmCode* CreateEmptyJumpTableInRegionLocked(int jump_table_size,
881                                                base::AddressRegion);
882 
883   void UpdateCodeSize(size_t, ExecutionTier, ForDebugging);
884 
885   // Hold the {allocation_mutex_} when calling one of these methods.
886   // {slot_index} is the index in the declared functions, i.e. function index
887   // minus the number of imported functions.
888   void PatchJumpTablesLocked(uint32_t slot_index, Address target);
889   void PatchJumpTableLocked(const CodeSpaceData&, uint32_t slot_index,
890                             Address target);
891 
892   // Called by the {WasmCodeAllocator} to register a new code space.
893   void AddCodeSpaceLocked(base::AddressRegion);
894 
895   // Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
896   WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);
897 
898   // Transfer owned code from {new_owned_code_} to {owned_code_}.
899   void TransferNewOwnedCodeLocked() const;
900 
901   // Add code to the code cache, if it meets criteria for being cached and we do
902   // not have code in the cache yet.
903   void InsertToCodeCache(WasmCode* code);
904 
905   // -- Fields of {NativeModule} start here.
906 
907   // Keep the engine alive as long as this NativeModule is alive. In its
908   // destructor, the NativeModule still communicates with the WasmCodeManager,
909   // owned by the engine. This fields comes before other fields which also still
910   // access the engine (like the code allocator), so that it's destructor runs
911   // last.
912   OperationsBarrier::Token engine_scope_;
913 
914   // {WasmCodeAllocator} manages all code reservations and allocations for this
915   // {NativeModule}.
916   WasmCodeAllocator code_allocator_;
917 
918   // Features enabled for this module. We keep a copy of the features that
919   // were enabled at the time of the creation of this native module,
920   // to be consistent across asynchronous compilations later.
921   const WasmFeatures enabled_features_;
922 
923   // The decoded module, stored in a shared_ptr such that background compile
924   // tasks can keep this alive.
925   std::shared_ptr<const WasmModule> module_;
926 
927   std::unique_ptr<WasmModuleSourceMap> source_map_;
928 
929   // Wire bytes, held in a shared_ptr so they can be kept alive by the
930   // {WireBytesStorage}, held by background compile tasks.
931   std::shared_ptr<base::OwnedVector<const uint8_t>> wire_bytes_;
932 
933   // The first allocated jump table. Always used by external calls (from JS).
934   // Wasm calls might use one of the other jump tables stored in
935   // {code_space_data_}.
936   WasmCode* main_jump_table_ = nullptr;
937 
938   // The first allocated far jump table.
939   WasmCode* main_far_jump_table_ = nullptr;
940 
941   // Lazy compile stub table, containing entries to jump to the
942   // {WasmCompileLazy} builtin, passing the function index.
943   WasmCode* lazy_compile_table_ = nullptr;
944 
945   // The compilation state keeps track of compilation tasks for this module.
946   // Note that its destructor blocks until all tasks are finished/aborted and
947   // hence needs to be destructed first when this native module dies.
948   std::unique_ptr<CompilationState> compilation_state_;
949 
950   // A cache of the import wrappers, keyed on the kind and signature.
951   std::unique_ptr<WasmImportWrapperCache> import_wrapper_cache_;
952 
953   // Array to handle number of function calls.
954   std::unique_ptr<uint32_t[]> tiering_budgets_;
955 
956   // This mutex protects concurrent calls to {AddCode} and friends.
957   // TODO(dlehmann): Revert this to a regular {Mutex} again.
958   // This needs to be a {RecursiveMutex} only because of {CodeSpaceWriteScope}
959   // usages, which are (1) either at places that already hold the
960   // {allocation_mutex_} or (2) because of multiple open {CodeSpaceWriteScope}s
961   // in the call hierarchy. Both are fixable.
962   mutable base::RecursiveMutex allocation_mutex_;
963 
964   //////////////////////////////////////////////////////////////////////////////
965   // Protected by {allocation_mutex_}:
966 
967   // Holds allocated code objects for fast lookup and deletion. For lookup based
968   // on pc, the key is the instruction start address of the value. Filled lazily
969   // from {new_owned_code_} (below).
970   mutable std::map<Address, std::unique_ptr<WasmCode>> owned_code_;
971 
972   // Holds owned code which is not inserted into {owned_code_} yet. It will be
973   // inserted on demand. This has much better performance than inserting
974   // individual code objects.
975   mutable std::vector<std::unique_ptr<WasmCode>> new_owned_code_;
976 
977   // Table of the latest code object per function, updated on initial
978   // compilation and tier up. The number of entries is
979   // {WasmModule::num_declared_functions}, i.e. there are no entries for
980   // imported functions.
981   std::unique_ptr<WasmCode*[]> code_table_;
982 
983   // Data (especially jump table) per code space.
984   std::vector<CodeSpaceData> code_space_data_;
985 
986   // Debug information for this module. You only need to hold the allocation
987   // mutex while getting the {DebugInfo} pointer, or initializing this field.
988   // Further accesses to the {DebugInfo} do not need to be protected by the
989   // mutex.
990   std::unique_ptr<DebugInfo> debug_info_;
991 
992   TieringState tiering_state_ = kTieredUp;
993 
994   // Cache both baseline and top-tier code if we are debugging, to speed up
995   // repeated enabling/disabling of the debugger or profiler.
996   // Maps <tier, function_index> to WasmCode.
997   std::unique_ptr<std::map<std::pair<ExecutionTier, int>, WasmCode*>>
998       cached_code_;
999 
1000   // End of fields protected by {allocation_mutex_}.
1001   //////////////////////////////////////////////////////////////////////////////
1002 
1003   const BoundsCheckStrategy bounds_checks_;
1004   bool lazy_compile_frozen_ = false;
1005   std::atomic<size_t> liftoff_bailout_count_{0};
1006   std::atomic<size_t> liftoff_code_size_{0};
1007   std::atomic<size_t> turbofan_code_size_{0};
1008   std::atomic<size_t> baseline_compilation_cpu_duration_{0};
1009   std::atomic<size_t> tier_up_cpu_duration_{0};
1010 };
1011 
1012 class V8_EXPORT_PRIVATE WasmCodeManager final {
1013  public:
1014   WasmCodeManager();
1015   WasmCodeManager(const WasmCodeManager&) = delete;
1016   WasmCodeManager& operator=(const WasmCodeManager&) = delete;
1017 
1018   ~WasmCodeManager();
1019 
1020 #if defined(V8_OS_WIN64)
1021   static bool CanRegisterUnwindInfoForNonABICompliantCodeRange();
1022 #endif  // V8_OS_WIN64
1023 
1024   NativeModule* LookupNativeModule(Address pc) const;
1025   WasmCode* LookupCode(Address pc) const;
committed_code_space()1026   size_t committed_code_space() const {
1027     return total_committed_code_space_.load();
1028   }
1029 
1030   // Estimate the needed code space for a Liftoff function based on the size of
1031   // the function body (wasm byte code).
1032   static size_t EstimateLiftoffCodeSize(int body_size);
1033   // Estimate the needed code space from a completely decoded module.
1034   static size_t EstimateNativeModuleCodeSize(const WasmModule* module,
1035                                              bool include_liftoff,
1036                                              DynamicTiering dynamic_tiering);
1037   // Estimate the needed code space from the number of functions and total code
1038   // section length.
1039   static size_t EstimateNativeModuleCodeSize(int num_functions,
1040                                              int num_imported_functions,
1041                                              int code_section_length,
1042                                              bool include_liftoff,
1043                                              DynamicTiering dynamic_tiering);
1044   // Estimate the size of meta data needed for the NativeModule, excluding
1045   // generated code. This data still be stored on the C++ heap.
1046   static size_t EstimateNativeModuleMetaDataSize(const WasmModule* module);
1047 
1048   // Set this thread's permission of all owned code space to read-write or
1049   // read-only (if {writable} is false). Can only be called if
1050   // {HasMemoryProtectionKeySupport()} is {true}.
1051   // Since the permission is thread-local, there is no requirement to hold any
1052   // lock when calling this method.
1053   void SetThreadWritable(bool writable);
1054 
1055   // Returns true if there is hardware support for PKU. Use
1056   // {MemoryProtectionKeysEnabled} to also check if PKU usage is enabled via
1057   // flags.
1058   bool HasMemoryProtectionKeySupport() const;
1059 
1060   // Returns true if PKU should be used.
1061   bool MemoryProtectionKeysEnabled() const;
1062 
1063   // Returns {true} if the memory protection key is write-enabled for the
1064   // current thread.
1065   // Can only be called if {HasMemoryProtectionKeySupport()} is {true}.
1066   bool MemoryProtectionKeyWritable() const;
1067 
1068   // Initialize the current thread's permissions for the memory protection key,
1069   // if we have support.
1070   void InitializeMemoryProtectionKeyPermissionsIfSupported() const;
1071 
1072  private:
1073   friend class WasmCodeAllocator;
1074   friend class WasmEngine;
1075 
1076   std::shared_ptr<NativeModule> NewNativeModule(
1077       Isolate* isolate, const WasmFeatures& enabled_features,
1078       size_t code_size_estimate, std::shared_ptr<const WasmModule> module);
1079 
1080   V8_WARN_UNUSED_RESULT VirtualMemory TryAllocate(size_t size,
1081                                                   void* hint = nullptr);
1082   void Commit(base::AddressRegion);
1083   void Decommit(base::AddressRegion);
1084 
1085   void FreeNativeModule(base::Vector<VirtualMemory> owned_code,
1086                         size_t committed_size);
1087 
1088   void AssignRange(base::AddressRegion, NativeModule*);
1089 
1090   const size_t max_committed_code_space_;
1091 
1092   std::atomic<size_t> total_committed_code_space_{0};
1093   // If the committed code space exceeds {critical_committed_code_space_}, then
1094   // we trigger a GC before creating the next module. This value is set to the
1095   // currently committed space plus 50% of the available code space on creation
1096   // and updated after each GC.
1097   std::atomic<size_t> critical_committed_code_space_;
1098 
1099   int memory_protection_key_;
1100 
1101   mutable base::Mutex native_modules_mutex_;
1102 
1103   //////////////////////////////////////////////////////////////////////////////
1104   // Protected by {native_modules_mutex_}:
1105 
1106   std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
1107 
1108   // End of fields protected by {native_modules_mutex_}.
1109   //////////////////////////////////////////////////////////////////////////////
1110 };
1111 
1112 // {WasmCodeRefScope}s form a perfect stack. New {WasmCode} pointers generated
1113 // by e.g. creating new code or looking up code by its address are added to the
1114 // top-most {WasmCodeRefScope}.
1115 class V8_EXPORT_PRIVATE V8_NODISCARD WasmCodeRefScope {
1116  public:
1117   WasmCodeRefScope();
1118   WasmCodeRefScope(const WasmCodeRefScope&) = delete;
1119   WasmCodeRefScope& operator=(const WasmCodeRefScope&) = delete;
1120   ~WasmCodeRefScope();
1121 
1122   // Register a {WasmCode} reference in the current {WasmCodeRefScope}. Fails if
1123   // there is no current scope.
1124   static void AddRef(WasmCode*);
1125 
1126  private:
1127   WasmCodeRefScope* const previous_scope_;
1128   std::vector<WasmCode*> code_ptrs_;
1129 };
1130 
1131 // Similarly to a global handle, a {GlobalWasmCodeRef} stores a single
1132 // ref-counted pointer to a {WasmCode} object.
1133 class GlobalWasmCodeRef {
1134  public:
GlobalWasmCodeRef(WasmCode * code,std::shared_ptr<NativeModule> native_module)1135   explicit GlobalWasmCodeRef(WasmCode* code,
1136                              std::shared_ptr<NativeModule> native_module)
1137       : code_(code), native_module_(std::move(native_module)) {
1138     code_->IncRef();
1139   }
1140 
1141   GlobalWasmCodeRef(const GlobalWasmCodeRef&) = delete;
1142   GlobalWasmCodeRef& operator=(const GlobalWasmCodeRef&) = delete;
1143 
~GlobalWasmCodeRef()1144   ~GlobalWasmCodeRef() { WasmCode::DecrementRefCount({&code_, 1}); }
1145 
1146   // Get a pointer to the contained {WasmCode} object. This is only guaranteed
1147   // to exist as long as this {GlobalWasmCodeRef} exists.
code()1148   WasmCode* code() const { return code_; }
1149 
1150  private:
1151   WasmCode* const code_;
1152   // Also keep the {NativeModule} alive.
1153   const std::shared_ptr<NativeModule> native_module_;
1154 };
1155 
1156 Builtin RuntimeStubIdToBuiltinName(WasmCode::RuntimeStubId);
1157 const char* GetRuntimeStubName(WasmCode::RuntimeStubId);
1158 
1159 }  // namespace wasm
1160 }  // namespace internal
1161 }  // namespace v8
1162 
1163 #endif  // V8_WASM_WASM_CODE_MANAGER_H_
1164