• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_WASM_WASM_CODE_MANAGER_H_
6 #define V8_WASM_WASM_CODE_MANAGER_H_
7 
8 #include <atomic>
9 #include <map>
10 #include <memory>
11 #include <set>
12 #include <unordered_set>
13 #include <utility>
14 #include <vector>
15 
16 #include "src/base/address-region.h"
17 #include "src/base/bit-field.h"
18 #include "src/base/macros.h"
19 #include "src/base/optional.h"
20 #include "src/builtins/builtins-definitions.h"
21 #include "src/handles/handles.h"
22 #include "src/trap-handler/trap-handler.h"
23 #include "src/utils/vector.h"
24 #include "src/wasm/compilation-environment.h"
25 #include "src/wasm/wasm-features.h"
26 #include "src/wasm/wasm-limits.h"
27 #include "src/wasm/wasm-module-sourcemap.h"
28 #include "src/wasm/wasm-tier.h"
29 
30 namespace v8 {
31 namespace internal {
32 
33 class Code;
34 class CodeDesc;
35 class Isolate;
36 
37 namespace wasm {
38 
39 class DebugInfo;
40 class NativeModule;
41 class WasmCodeManager;
42 struct WasmCompilationResult;
43 class WasmEngine;
44 class WasmImportWrapperCache;
45 struct WasmModule;
46 
47 // Convenience macro listing all wasm runtime stubs. Note that the first few
48 // elements of the list coincide with {compiler::TrapId}, order matters.
49 #define WASM_RUNTIME_STUB_LIST(V, VTRAP) \
50   FOREACH_WASM_TRAPREASON(VTRAP)         \
51   V(WasmCompileLazy)                     \
52   V(WasmTriggerTierUp)                   \
53   V(WasmDebugBreak)                      \
54   V(WasmInt32ToHeapNumber)               \
55   V(WasmTaggedNonSmiToInt32)             \
56   V(WasmFloat32ToNumber)                 \
57   V(WasmFloat64ToNumber)                 \
58   V(WasmTaggedToFloat64)                 \
59   V(WasmAllocateJSArray)                 \
60   V(WasmAllocatePair)                    \
61   V(WasmAtomicNotify)                    \
62   V(WasmI32AtomicWait32)                 \
63   V(WasmI32AtomicWait64)                 \
64   V(WasmI64AtomicWait32)                 \
65   V(WasmI64AtomicWait64)                 \
66   V(WasmRefFunc)                         \
67   V(WasmMemoryGrow)                      \
68   V(WasmTableInit)                       \
69   V(WasmTableCopy)                       \
70   V(WasmTableGet)                        \
71   V(WasmTableSet)                        \
72   V(WasmStackGuard)                      \
73   V(WasmStackOverflow)                   \
74   V(WasmThrow)                           \
75   V(WasmRethrow)                         \
76   V(WasmTraceEnter)                      \
77   V(WasmTraceExit)                       \
78   V(WasmTraceMemory)                     \
79   V(ArgumentsAdaptorTrampoline)          \
80   V(BigIntToI32Pair)                     \
81   V(BigIntToI64)                         \
82   V(DoubleToI)                           \
83   V(I32PairToBigInt)                     \
84   V(I64ToBigInt)                         \
85   V(RecordWrite)                         \
86   V(ToNumber)
87 
88 // Sorted, disjoint and non-overlapping memory regions. A region is of the
89 // form [start, end). So there's no [start, end), [end, other_end),
90 // because that should have been reduced to [start, other_end).
91 class V8_EXPORT_PRIVATE DisjointAllocationPool final {
92  public:
93   MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(DisjointAllocationPool);
DisjointAllocationPool(base::AddressRegion region)94   explicit DisjointAllocationPool(base::AddressRegion region)
95       : regions_({region}) {}
96 
97   // Merge the parameter region into this object. The assumption is that the
98   // passed parameter is not intersecting this object - for example, it was
99   // obtained from a previous Allocate. Returns the merged region.
100   base::AddressRegion Merge(base::AddressRegion);
101 
102   // Allocate a contiguous region of size {size}. Return an empty pool on
103   // failure.
104   base::AddressRegion Allocate(size_t size);
105 
106   // Allocate a contiguous region of size {size} within {region}. Return an
107   // empty pool on failure.
108   base::AddressRegion AllocateInRegion(size_t size, base::AddressRegion);
109 
IsEmpty()110   bool IsEmpty() const { return regions_.empty(); }
111 
regions()112   const auto& regions() const { return regions_; }
113 
114  private:
115   std::set<base::AddressRegion, base::AddressRegion::StartAddressLess> regions_;
116 };
117 
118 class V8_EXPORT_PRIVATE WasmCode final {
119  public:
120   enum Kind {
121     kFunction,
122     kWasmToCapiWrapper,
123     kWasmToJsWrapper,
124     kJumpTable
125   };
126 
127   // Each runtime stub is identified by an id. This id is used to reference the
128   // stub via {RelocInfo::WASM_STUB_CALL} and gets resolved during relocation.
129   enum RuntimeStubId {
130 #define DEF_ENUM(Name) k##Name,
131 #define DEF_ENUM_TRAP(Name) kThrowWasm##Name,
132     WASM_RUNTIME_STUB_LIST(DEF_ENUM, DEF_ENUM_TRAP)
133 #undef DEF_ENUM_TRAP
134 #undef DEF_ENUM
135         kRuntimeStubCount
136   };
137 
instructions()138   Vector<byte> instructions() const {
139     return VectorOf(instructions_, static_cast<size_t>(instructions_size_));
140   }
instruction_start()141   Address instruction_start() const {
142     return reinterpret_cast<Address>(instructions_);
143   }
reloc_info()144   Vector<const byte> reloc_info() const {
145     return {protected_instructions_data().end(),
146             static_cast<size_t>(reloc_info_size_)};
147   }
source_positions()148   Vector<const byte> source_positions() const {
149     return {reloc_info().end(), static_cast<size_t>(source_positions_size_)};
150   }
151 
152   // TODO(clemensb): Make this return int.
index()153   uint32_t index() const {
154     DCHECK_LE(0, index_);
155     return index_;
156   }
157   // Anonymous functions are functions that don't carry an index.
IsAnonymous()158   bool IsAnonymous() const { return index_ == kAnonymousFuncIndex; }
kind()159   Kind kind() const { return KindField::decode(flags_); }
native_module()160   NativeModule* native_module() const { return native_module_; }
tier()161   ExecutionTier tier() const { return ExecutionTierField::decode(flags_); }
162   Address constant_pool() const;
163   Address handler_table() const;
164   int handler_table_size() const;
165   Address code_comments() const;
166   int code_comments_size() const;
constant_pool_offset()167   int constant_pool_offset() const { return constant_pool_offset_; }
safepoint_table_offset()168   int safepoint_table_offset() const { return safepoint_table_offset_; }
handler_table_offset()169   int handler_table_offset() const { return handler_table_offset_; }
code_comments_offset()170   int code_comments_offset() const { return code_comments_offset_; }
unpadded_binary_size()171   int unpadded_binary_size() const { return unpadded_binary_size_; }
stack_slots()172   int stack_slots() const { return stack_slots_; }
tagged_parameter_slots()173   int tagged_parameter_slots() const { return tagged_parameter_slots_; }
is_liftoff()174   bool is_liftoff() const { return tier() == ExecutionTier::kLiftoff; }
contains(Address pc)175   bool contains(Address pc) const {
176     return reinterpret_cast<Address>(instructions_) <= pc &&
177            pc < reinterpret_cast<Address>(instructions_ + instructions_size_);
178   }
179 
180   // Only Liftoff code that was generated for debugging can be inspected
181   // (otherwise debug side table positions would not match up).
is_inspectable()182   bool is_inspectable() const { return is_liftoff() && for_debugging(); }
183 
protected_instructions_data()184   Vector<const uint8_t> protected_instructions_data() const {
185     return {meta_data_.get(),
186             static_cast<size_t>(protected_instructions_size_)};
187   }
188 
protected_instructions()189   Vector<const trap_handler::ProtectedInstructionData> protected_instructions()
190       const {
191     return Vector<const trap_handler::ProtectedInstructionData>::cast(
192         protected_instructions_data());
193   }
194 
195   void Validate() const;
196   void Print(const char* name = nullptr) const;
197   void MaybePrint(const char* name = nullptr) const;
198   void Disassemble(const char* name, std::ostream& os,
199                    Address current_pc = kNullAddress) const;
200 
201   static bool ShouldBeLogged(Isolate* isolate);
202   void LogCode(Isolate* isolate) const;
203 
204   WasmCode(const WasmCode&) = delete;
205   WasmCode& operator=(const WasmCode&) = delete;
206   ~WasmCode();
207 
IncRef()208   void IncRef() {
209     int old_val = ref_count_.fetch_add(1, std::memory_order_acq_rel);
210     DCHECK_LE(1, old_val);
211     DCHECK_GT(kMaxInt, old_val);
212     USE(old_val);
213   }
214 
215   // Decrement the ref count. Returns whether this code becomes dead and needs
216   // to be freed.
DecRef()217   V8_WARN_UNUSED_RESULT bool DecRef() {
218     int old_count = ref_count_.load(std::memory_order_acquire);
219     while (true) {
220       DCHECK_LE(1, old_count);
221       if (V8_UNLIKELY(old_count == 1)) return DecRefOnPotentiallyDeadCode();
222       if (ref_count_.compare_exchange_weak(old_count, old_count - 1,
223                                            std::memory_order_acq_rel)) {
224         return false;
225       }
226     }
227   }
228 
229   // Decrement the ref count on code that is known to be dead, even though there
230   // might still be C++ references. Returns whether this drops the last
231   // reference and the code needs to be freed.
DecRefOnDeadCode()232   V8_WARN_UNUSED_RESULT bool DecRefOnDeadCode() {
233     return ref_count_.fetch_sub(1, std::memory_order_acq_rel) == 1;
234   }
235 
236   // Decrement the ref count on a set of {WasmCode} objects, potentially
237   // belonging to different {NativeModule}s. Dead code will be deleted.
238   static void DecrementRefCount(Vector<WasmCode* const>);
239 
240   // Returns the last source position before {offset}.
241   int GetSourcePositionBefore(int offset);
242 
243   // Returns whether this code was generated for debugging. If this returns
244   // {kForDebugging}, but {tier()} is not {kLiftoff}, then Liftoff compilation
245   // bailed out.
for_debugging()246   ForDebugging for_debugging() const {
247     return ForDebuggingField::decode(flags_);
248   }
249 
250   enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
251 
252  private:
253   friend class NativeModule;
254 
WasmCode(NativeModule * native_module,int index,Vector<byte> instructions,int stack_slots,int tagged_parameter_slots,int safepoint_table_offset,int handler_table_offset,int constant_pool_offset,int code_comments_offset,int unpadded_binary_size,Vector<const byte> protected_instructions_data,Vector<const byte> reloc_info,Vector<const byte> source_position_table,Kind kind,ExecutionTier tier,ForDebugging for_debugging)255   WasmCode(NativeModule* native_module, int index, Vector<byte> instructions,
256            int stack_slots, int tagged_parameter_slots,
257            int safepoint_table_offset, int handler_table_offset,
258            int constant_pool_offset, int code_comments_offset,
259            int unpadded_binary_size,
260            Vector<const byte> protected_instructions_data,
261            Vector<const byte> reloc_info,
262            Vector<const byte> source_position_table, Kind kind,
263            ExecutionTier tier, ForDebugging for_debugging)
264       : native_module_(native_module),
265         instructions_(instructions.begin()),
266         flags_(KindField::encode(kind) | ExecutionTierField::encode(tier) |
267                ForDebuggingField::encode(for_debugging)),
268         meta_data_(ConcatenateBytes(
269             {protected_instructions_data, reloc_info, source_position_table})),
270         instructions_size_(instructions.length()),
271         reloc_info_size_(reloc_info.length()),
272         source_positions_size_(source_position_table.length()),
273         protected_instructions_size_(protected_instructions_data.length()),
274         index_(index),
275         constant_pool_offset_(constant_pool_offset),
276         stack_slots_(stack_slots),
277         tagged_parameter_slots_(tagged_parameter_slots),
278         safepoint_table_offset_(safepoint_table_offset),
279         handler_table_offset_(handler_table_offset),
280         code_comments_offset_(code_comments_offset),
281         unpadded_binary_size_(unpadded_binary_size) {
282     DCHECK_LE(safepoint_table_offset, unpadded_binary_size);
283     DCHECK_LE(handler_table_offset, unpadded_binary_size);
284     DCHECK_LE(code_comments_offset, unpadded_binary_size);
285     DCHECK_LE(constant_pool_offset, unpadded_binary_size);
286   }
287 
288   std::unique_ptr<const byte[]> ConcatenateBytes(
289       std::initializer_list<Vector<const byte>>);
290 
291   // Code objects that have been registered with the global trap handler within
292   // this process, will have a {trap_handler_index} associated with them.
trap_handler_index()293   int trap_handler_index() const {
294     CHECK(has_trap_handler_index());
295     return trap_handler_index_;
296   }
set_trap_handler_index(int value)297   void set_trap_handler_index(int value) {
298     CHECK(!has_trap_handler_index());
299     trap_handler_index_ = value;
300   }
has_trap_handler_index()301   bool has_trap_handler_index() const { return trap_handler_index_ >= 0; }
302 
303   // Register protected instruction information with the trap handler. Sets
304   // trap_handler_index.
305   void RegisterTrapHandlerData();
306 
307   // Slow path for {DecRef}: The code becomes potentially dead.
308   // Returns whether this code becomes dead and needs to be freed.
309   V8_NOINLINE bool DecRefOnPotentiallyDeadCode();
310 
311   NativeModule* const native_module_ = nullptr;
312   byte* const instructions_;
313   const uint8_t flags_;  // Bit field, see below.
314   // {meta_data_} contains several byte vectors concatenated into one:
315   //  - protected instructions data of size {protected_instructions_size_}
316   //  - relocation info of size {reloc_info_size_}
317   //  - source positions of size {source_positions_size_}
318   // Note that the protected instructions come first to ensure alignment.
319   std::unique_ptr<const byte[]> meta_data_;
320   const int instructions_size_;
321   const int reloc_info_size_;
322   const int source_positions_size_;
323   const int protected_instructions_size_;
324   const int index_;
325   const int constant_pool_offset_;
326   const int stack_slots_;
327   // Number of tagged parameters passed to this function via the stack. This
328   // value is used by the stack walker (e.g. GC) to find references.
329   const int tagged_parameter_slots_;
330   // We care about safepoint data for wasm-to-js functions, since there may be
331   // stack/register tagged values for large number conversions.
332   const int safepoint_table_offset_;
333   const int handler_table_offset_;
334   const int code_comments_offset_;
335   const int unpadded_binary_size_;
336   int trap_handler_index_ = -1;
337 
338   // Bits encoded in {flags_}:
339   using KindField = base::BitField8<Kind, 0, 3>;
340   using ExecutionTierField = KindField::Next<ExecutionTier, 2>;
341   using ForDebuggingField = ExecutionTierField::Next<ForDebugging, 2>;
342 
343   // WasmCode is ref counted. Counters are held by:
344   //   1) The jump table / code table.
345   //   2) {WasmCodeRefScope}s.
346   //   3) The set of potentially dead code in the {WasmEngine}.
347   // If a decrement of (1) would drop the ref count to 0, that code becomes a
348   // candidate for garbage collection. At that point, we add a ref count for (3)
349   // *before* decrementing the counter to ensure the code stays alive as long as
350   // it's being used. Once the ref count drops to zero (i.e. after being removed
351   // from (3) and all (2)), the code object is deleted and the memory for the
352   // machine code is freed.
353   std::atomic<int> ref_count_{1};
354 };
355 
356 // Check that {WasmCode} objects are sufficiently small. We create many of them,
357 // often for rather small functions.
358 // Increase the limit if needed, but first check if the size increase is
359 // justified.
360 STATIC_ASSERT(sizeof(WasmCode) <= 88);
361 
362 WasmCode::Kind GetCodeKind(const WasmCompilationResult& result);
363 
364 // Return a textual description of the kind.
365 const char* GetWasmCodeKindAsString(WasmCode::Kind);
366 
367 // Manages the code reservations and allocations of a single {NativeModule}.
368 class WasmCodeAllocator {
369  public:
370 #if V8_TARGET_ARCH_ARM64
371   // ARM64 only supports direct calls within a 128 MB range.
372   static constexpr size_t kMaxCodeSpaceSize = 128 * MB;
373 #else
374   // Use 1024 MB limit for code spaces on other platforms. This is smaller than
375   // the total allowed code space (kMaxWasmCodeMemory) to avoid unnecessarily
376   // big reservations, and to ensure that distances within a code space fit
377   // within a 32-bit signed integer.
378   static constexpr size_t kMaxCodeSpaceSize = 1024 * MB;
379 #endif
380 
381   // {OptionalLock} is passed between {WasmCodeAllocator} and {NativeModule} to
382   // indicate that the lock on the {WasmCodeAllocator} is already taken. It's
383   // optional to allow to also call methods without holding the lock.
384   class OptionalLock {
385    public:
386     // External users can only instantiate a non-locked {OptionalLock}.
387     OptionalLock() = default;
388     ~OptionalLock();
is_locked()389     bool is_locked() const { return allocator_ != nullptr; }
390 
391    private:
392     friend class WasmCodeAllocator;
393     // {Lock} is called from the {WasmCodeAllocator} if no locked {OptionalLock}
394     // is passed.
395     void Lock(WasmCodeAllocator*);
396 
397     WasmCodeAllocator* allocator_ = nullptr;
398   };
399 
400   WasmCodeAllocator(WasmCodeManager*, VirtualMemory code_space,
401                     std::shared_ptr<Counters> async_counters);
402   ~WasmCodeAllocator();
403 
404   // Call before use, after the {NativeModule} is set up completely.
405   void Init(NativeModule*);
406 
committed_code_space()407   size_t committed_code_space() const {
408     return committed_code_space_.load(std::memory_order_acquire);
409   }
generated_code_size()410   size_t generated_code_size() const {
411     return generated_code_size_.load(std::memory_order_acquire);
412   }
freed_code_size()413   size_t freed_code_size() const {
414     return freed_code_size_.load(std::memory_order_acquire);
415   }
416 
417   // Allocate code space. Returns a valid buffer or fails with OOM (crash).
418   Vector<byte> AllocateForCode(NativeModule*, size_t size);
419 
420   // Allocate code space within a specific region. Returns a valid buffer or
421   // fails with OOM (crash).
422   Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
423                                        base::AddressRegion,
424                                        const WasmCodeAllocator::OptionalLock&);
425 
426   // Sets permissions of all owned code space to executable, or read-write (if
427   // {executable} is false). Returns true on success.
428   V8_EXPORT_PRIVATE bool SetExecutable(bool executable);
429 
430   // Free memory pages of all given code objects. Used for wasm code GC.
431   void FreeCode(Vector<WasmCode* const>);
432 
433   // Retrieve the number of separately reserved code spaces.
434   size_t GetNumCodeSpaces() const;
435 
436  private:
437   // Sentinel value to be used for {AllocateForCodeInRegion} for specifying no
438   // restriction on the region to allocate in.
439   static constexpr base::AddressRegion kUnrestrictedRegion{
440       kNullAddress, std::numeric_limits<size_t>::max()};
441 
442   // The engine-wide wasm code manager.
443   WasmCodeManager* const code_manager_;
444 
445   mutable base::Mutex mutex_;
446 
447   //////////////////////////////////////////////////////////////////////////////
448   // Protected by {mutex_}:
449 
450   // Code space that was reserved and is available for allocations (subset of
451   // {owned_code_space_}).
452   DisjointAllocationPool free_code_space_;
453   // Code space that was allocated for code (subset of {owned_code_space_}).
454   DisjointAllocationPool allocated_code_space_;
455   // Code space that was allocated before but is dead now. Full pages within
456   // this region are discarded. It's still a subset of {owned_code_space_}.
457   DisjointAllocationPool freed_code_space_;
458   std::vector<VirtualMemory> owned_code_space_;
459 
460   // End of fields protected by {mutex_}.
461   //////////////////////////////////////////////////////////////////////////////
462 
463   std::atomic<size_t> committed_code_space_{0};
464   std::atomic<size_t> generated_code_size_{0};
465   std::atomic<size_t> freed_code_size_{0};
466 
467   bool is_executable_ = false;
468 
469   std::shared_ptr<Counters> async_counters_;
470 };
471 
472 class V8_EXPORT_PRIVATE NativeModule final {
473  public:
474 #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64
475   static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = true;
476 #else
477   static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = false;
478 #endif
479 
480   NativeModule(const NativeModule&) = delete;
481   NativeModule& operator=(const NativeModule&) = delete;
482   ~NativeModule();
483 
484   // {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
485   // code below, i.e. it can be called concurrently from background threads.
486   // The returned code still needs to be published via {PublishCode}.
487   std::unique_ptr<WasmCode> AddCode(int index, const CodeDesc& desc,
488                                     int stack_slots, int tagged_parameter_slots,
489                                     Vector<const byte> protected_instructions,
490                                     Vector<const byte> source_position_table,
491                                     WasmCode::Kind kind, ExecutionTier tier,
492                                     ForDebugging for_debugging);
493 
494   // {PublishCode} makes the code available to the system by entering it into
495   // the code table and patching the jump table. It returns a raw pointer to the
496   // given {WasmCode} object. Ownership is transferred to the {NativeModule}.
497   WasmCode* PublishCode(std::unique_ptr<WasmCode>);
498   std::vector<WasmCode*> PublishCode(Vector<std::unique_ptr<WasmCode>>);
499 
500   WasmCode* AddDeserializedCode(
501       int index, Vector<const byte> instructions, int stack_slots,
502       int tagged_parameter_slots, int safepoint_table_offset,
503       int handler_table_offset, int constant_pool_offset,
504       int code_comments_offset, int unpadded_binary_size,
505       Vector<const byte> protected_instructions_data,
506       Vector<const byte> reloc_info, Vector<const byte> source_position_table,
507       WasmCode::Kind kind, ExecutionTier tier);
508 
509   // Adds anonymous code for testing purposes.
510   WasmCode* AddCodeForTesting(Handle<Code> code);
511 
512   // Use {UseLazyStub} to setup lazy compilation per function. It will use the
513   // existing {WasmCode::kWasmCompileLazy} runtime stub and populate the jump
514   // table with trampolines accordingly.
515   void UseLazyStub(uint32_t func_index);
516 
517   // Creates a snapshot of the current state of the code table. This is useful
518   // to get a consistent view of the table (e.g. used by the serializer).
519   std::vector<WasmCode*> SnapshotCodeTable() const;
520 
521   WasmCode* GetCode(uint32_t index) const;
522   bool HasCode(uint32_t index) const;
523   bool HasCodeWithTier(uint32_t index, ExecutionTier tier) const;
524 
525   void SetWasmSourceMap(std::unique_ptr<WasmModuleSourceMap> source_map);
526   WasmModuleSourceMap* GetWasmSourceMap() const;
527 
jump_table_start()528   Address jump_table_start() const {
529     return main_jump_table_ ? main_jump_table_->instruction_start()
530                             : kNullAddress;
531   }
532 
533   uint32_t GetJumpTableOffset(uint32_t func_index) const;
534 
535   // Returns the canonical target to call for the given function (the slot in
536   // the first jump table).
537   Address GetCallTargetForFunction(uint32_t func_index) const;
538 
539   struct JumpTablesRef {
540     const Address jump_table_start = kNullAddress;
541     const Address far_jump_table_start = kNullAddress;
542 
is_validJumpTablesRef543     bool is_valid() const { return far_jump_table_start != kNullAddress; }
544   };
545 
546   // Finds the jump tables that should be used for given code region. This
547   // information is then passed to {GetNearCallTargetForFunction} and
548   // {GetNearRuntimeStubEntry} to avoid the overhead of looking this information
549   // up there. Return an empty struct if no suitable jump tables exist.
550   JumpTablesRef FindJumpTablesForRegion(base::AddressRegion) const;
551 
552   // Similarly to {GetCallTargetForFunction}, but uses the jump table previously
553   // looked up via {FindJumpTablesForRegion}.
554   Address GetNearCallTargetForFunction(uint32_t func_index,
555                                        const JumpTablesRef&) const;
556 
557   // Get a runtime stub entry (which is a far jump table slot) in the jump table
558   // previously looked up via {FindJumpTablesForRegion}.
559   Address GetNearRuntimeStubEntry(WasmCode::RuntimeStubId index,
560                                   const JumpTablesRef&) const;
561 
562   // Reverse lookup from a given call target (which must be a jump table slot)
563   // to a function index.
564   uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
565 
SetExecutable(bool executable)566   bool SetExecutable(bool executable) {
567     return code_allocator_.SetExecutable(executable);
568   }
569 
570   // For cctests, where we build both WasmModule and the runtime objects
571   // on the fly, and bypass the instance builder pipeline.
572   void ReserveCodeTableForTesting(uint32_t max_functions);
573 
574   void LogWasmCodes(Isolate* isolate);
575 
compilation_state()576   CompilationState* compilation_state() { return compilation_state_.get(); }
577 
578   // Create a {CompilationEnv} object for compilation. The caller has to ensure
579   // that the {WasmModule} pointer stays valid while the {CompilationEnv} is
580   // being used.
581   CompilationEnv CreateCompilationEnv() const;
582 
num_functions()583   uint32_t num_functions() const {
584     return module_->num_declared_functions + module_->num_imported_functions;
585   }
num_imported_functions()586   uint32_t num_imported_functions() const {
587     return module_->num_imported_functions;
588   }
use_trap_handler()589   UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
set_lazy_compile_frozen(bool frozen)590   void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
lazy_compile_frozen()591   bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
wire_bytes()592   Vector<const uint8_t> wire_bytes() const {
593     return std::atomic_load(&wire_bytes_)->as_vector();
594   }
module()595   const WasmModule* module() const { return module_.get(); }
shared_module()596   std::shared_ptr<const WasmModule> shared_module() const { return module_; }
committed_code_space()597   size_t committed_code_space() const {
598     return code_allocator_.committed_code_space();
599   }
generated_code_size()600   size_t generated_code_size() const {
601     return code_allocator_.generated_code_size();
602   }
liftoff_bailout_count()603   size_t liftoff_bailout_count() const { return liftoff_bailout_count_.load(); }
liftoff_code_size()604   size_t liftoff_code_size() const { return liftoff_code_size_.load(); }
turbofan_code_size()605   size_t turbofan_code_size() const { return turbofan_code_size_.load(); }
engine()606   WasmEngine* engine() const { return engine_; }
607 
HasWireBytes()608   bool HasWireBytes() const {
609     auto wire_bytes = std::atomic_load(&wire_bytes_);
610     return wire_bytes && !wire_bytes->empty();
611   }
612   void SetWireBytes(OwnedVector<const uint8_t> wire_bytes);
613 
614   WasmCode* Lookup(Address) const;
615 
import_wrapper_cache()616   WasmImportWrapperCache* import_wrapper_cache() const {
617     return import_wrapper_cache_.get();
618   }
619 
enabled_features()620   const WasmFeatures& enabled_features() const { return enabled_features_; }
621 
622   // Returns the runtime stub id that corresponds to the given address (which
623   // must be a far jump table slot). Returns {kRuntimeStubCount} on failure.
624   WasmCode::RuntimeStubId GetRuntimeStubId(Address runtime_stub_target) const;
625 
626   // Sample the current code size of this modules to the given counters.
627   enum CodeSamplingTime : int8_t { kAfterBaseline, kAfterTopTier, kSampling };
628   void SampleCodeSize(Counters*, CodeSamplingTime) const;
629 
630   V8_WARN_UNUSED_RESULT std::unique_ptr<WasmCode> AddCompiledCode(
631       WasmCompilationResult);
632   V8_WARN_UNUSED_RESULT std::vector<std::unique_ptr<WasmCode>> AddCompiledCode(
633       Vector<WasmCompilationResult>);
634 
635   // Set a new tiering state, but don't trigger any recompilation yet; use
636   // {RecompileForTiering} for that. The two steps are split because In some
637   // scenarios we need to drop locks before triggering recompilation.
638   void SetTieringState(TieringState);
639 
640   // Check whether this modules is tiered down for debugging.
641   bool IsTieredDown();
642 
643   // Fully recompile this module in the tier set previously via
644   // {SetTieringState}. The calling thread contributes to compilation and only
645   // returns once recompilation is done.
646   void RecompileForTiering();
647 
648   // Find all functions that need to be recompiled for a new tier. Note that
649   // compilation jobs might run concurrently, so this method only considers the
650   // compilation state of this native module at the time of the call.
651   // Returns a vector of function indexes to recompile.
652   std::vector<int> FindFunctionsToRecompile(TieringState);
653 
654   // Free a set of functions of this module. Uncommits whole pages if possible.
655   // The given vector must be ordered by the instruction start address, and all
656   // {WasmCode} objects must not be used any more.
657   // Should only be called via {WasmEngine::FreeDeadCode}, so the engine can do
658   // its accounting.
659   void FreeCode(Vector<WasmCode* const>);
660 
661   // Retrieve the number of separately reserved code spaces for this module.
662   size_t GetNumberOfCodeSpacesForTesting() const;
663 
664   // Check whether there is DebugInfo for this NativeModule.
665   bool HasDebugInfo() const;
666 
667   // Get or create the debug info for this NativeModule.
668   DebugInfo* GetDebugInfo();
669 
num_liftoff_function_calls_array()670   uint32_t* num_liftoff_function_calls_array() {
671     return num_liftoff_function_calls_.get();
672   }
673 
674  private:
675   friend class WasmCode;
676   friend class WasmCodeAllocator;
677   friend class WasmCodeManager;
678   friend class NativeModuleModificationScope;
679 
680   struct CodeSpaceData {
681     base::AddressRegion region;
682     WasmCode* jump_table;
683     WasmCode* far_jump_table;
684   };
685 
686   // Private constructor, called via {WasmCodeManager::NewNativeModule()}.
687   NativeModule(WasmEngine* engine, const WasmFeatures& enabled_features,
688                VirtualMemory code_space,
689                std::shared_ptr<const WasmModule> module,
690                std::shared_ptr<Counters> async_counters,
691                std::shared_ptr<NativeModule>* shared_this);
692 
693   std::unique_ptr<WasmCode> AddCodeWithCodeSpace(
694       int index, const CodeDesc& desc, int stack_slots,
695       int tagged_parameter_slots,
696       Vector<const byte> protected_instructions_data,
697       Vector<const byte> source_position_table, WasmCode::Kind kind,
698       ExecutionTier tier, ForDebugging for_debugging,
699       Vector<uint8_t> code_space, const JumpTablesRef& jump_tables_ref);
700 
701   WasmCode* CreateEmptyJumpTableInRegion(
702       int jump_table_size, base::AddressRegion,
703       const WasmCodeAllocator::OptionalLock&);
704 
705   void UpdateCodeSize(size_t, ExecutionTier, ForDebugging);
706 
707   // Hold the {allocation_mutex_} when calling one of these methods.
708   // {slot_index} is the index in the declared functions, i.e. function index
709   // minus the number of imported functions.
710   void PatchJumpTablesLocked(uint32_t slot_index, Address target);
711   void PatchJumpTableLocked(const CodeSpaceData&, uint32_t slot_index,
712                             Address target);
713 
714   // Called by the {WasmCodeAllocator} to register a new code space.
715   void AddCodeSpace(base::AddressRegion,
716                     const WasmCodeAllocator::OptionalLock&);
717 
718   // Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
719   WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);
720 
721   // {WasmCodeAllocator} manages all code reservations and allocations for this
722   // {NativeModule}.
723   WasmCodeAllocator code_allocator_;
724 
725   // Features enabled for this module. We keep a copy of the features that
726   // were enabled at the time of the creation of this native module,
727   // to be consistent across asynchronous compilations later.
728   const WasmFeatures enabled_features_;
729 
730   // The decoded module, stored in a shared_ptr such that background compile
731   // tasks can keep this alive.
732   std::shared_ptr<const WasmModule> module_;
733 
734   std::unique_ptr<WasmModuleSourceMap> source_map_;
735 
736   // Wire bytes, held in a shared_ptr so they can be kept alive by the
737   // {WireBytesStorage}, held by background compile tasks.
738   std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
739 
740   // The first allocated jump table. Always used by external calls (from JS).
741   // Wasm calls might use one of the other jump tables stored in
742   // {code_space_data_}.
743   WasmCode* main_jump_table_ = nullptr;
744 
745   // The first allocated far jump table.
746   WasmCode* main_far_jump_table_ = nullptr;
747 
748   // Lazy compile stub table, containing entries to jump to the
749   // {WasmCompileLazy} builtin, passing the function index.
750   WasmCode* lazy_compile_table_ = nullptr;
751 
752   // The compilation state keeps track of compilation tasks for this module.
753   // Note that its destructor blocks until all tasks are finished/aborted and
754   // hence needs to be destructed first when this native module dies.
755   std::unique_ptr<CompilationState> compilation_state_;
756 
757   // A cache of the import wrappers, keyed on the kind and signature.
758   std::unique_ptr<WasmImportWrapperCache> import_wrapper_cache_;
759 
760   // Array to handle number of function calls.
761   std::unique_ptr<uint32_t[]> num_liftoff_function_calls_;
762 
763   // This mutex protects concurrent calls to {AddCode} and friends.
764   mutable base::Mutex allocation_mutex_;
765 
766   //////////////////////////////////////////////////////////////////////////////
767   // Protected by {allocation_mutex_}:
768 
769   // Holds all allocated code objects. For lookup based on pc, the key is the
770   // instruction start address of the value.
771   std::map<Address, std::unique_ptr<WasmCode>> owned_code_;
772 
773   // Table of the latest code object per function, updated on initial
774   // compilation and tier up. The number of entries is
775   // {WasmModule::num_declared_functions}, i.e. there are no entries for
776   // imported functions.
777   std::unique_ptr<WasmCode*[]> code_table_;
778 
779   // Data (especially jump table) per code space.
780   std::vector<CodeSpaceData> code_space_data_;
781 
782   // Debug information for this module. You only need to hold the allocation
783   // mutex while getting the {DebugInfo} pointer, or initializing this field.
784   // Further accesses to the {DebugInfo} do not need to be protected by the
785   // mutex.
786   std::unique_ptr<DebugInfo> debug_info_;
787 
788   TieringState tiering_state_ = kTieredUp;
789 
790   // End of fields protected by {allocation_mutex_}.
791   //////////////////////////////////////////////////////////////////////////////
792 
793   WasmEngine* const engine_;
794   int modification_scope_depth_ = 0;
795   UseTrapHandler use_trap_handler_ = kNoTrapHandler;
796   bool lazy_compile_frozen_ = false;
797   std::atomic<size_t> liftoff_bailout_count_{0};
798   std::atomic<size_t> liftoff_code_size_{0};
799   std::atomic<size_t> turbofan_code_size_{0};
800 };
801 
802 class V8_EXPORT_PRIVATE WasmCodeManager final {
803  public:
804   explicit WasmCodeManager(size_t max_committed);
805   WasmCodeManager(const WasmCodeManager&) = delete;
806   WasmCodeManager& operator=(const WasmCodeManager&) = delete;
807 
808 #ifdef DEBUG
~WasmCodeManager()809   ~WasmCodeManager() {
810     // No more committed code space.
811     DCHECK_EQ(0, total_committed_code_space_.load());
812   }
813 #endif
814 
815 #if defined(V8_OS_WIN64)
816   bool CanRegisterUnwindInfoForNonABICompliantCodeRange() const;
817 #endif  // V8_OS_WIN64
818 
819   NativeModule* LookupNativeModule(Address pc) const;
820   WasmCode* LookupCode(Address pc) const;
committed_code_space()821   size_t committed_code_space() const {
822     return total_committed_code_space_.load();
823   }
824 
825   // Estimate the needed code space for a Liftoff function based on the size of
826   // the function body (wasm byte code).
827   static size_t EstimateLiftoffCodeSize(int body_size);
828   // Estimate the needed code space from a completely decoded module.
829   static size_t EstimateNativeModuleCodeSize(const WasmModule* module,
830                                              bool include_liftoff);
831   // Estimate the needed code space from the number of functions and total code
832   // section length.
833   static size_t EstimateNativeModuleCodeSize(int num_functions,
834                                              int num_imported_functions,
835                                              int code_section_length,
836                                              bool include_liftoff);
837   // Estimate the size of meta data needed for the NativeModule, excluding
838   // generated code. This data still be stored on the C++ heap.
839   static size_t EstimateNativeModuleMetaDataSize(const WasmModule* module);
840 
841  private:
842   friend class WasmCodeAllocator;
843   friend class WasmEngine;
844 
845   std::shared_ptr<NativeModule> NewNativeModule(
846       WasmEngine* engine, Isolate* isolate,
847       const WasmFeatures& enabled_features, size_t code_size_estimate,
848       std::shared_ptr<const WasmModule> module);
849 
850   V8_WARN_UNUSED_RESULT VirtualMemory TryAllocate(size_t size,
851                                                   void* hint = nullptr);
852   void Commit(base::AddressRegion);
853   void Decommit(base::AddressRegion);
854 
855   void FreeNativeModule(Vector<VirtualMemory> owned_code,
856                         size_t committed_size);
857 
858   void AssignRange(base::AddressRegion, NativeModule*);
859 
860   const size_t max_committed_code_space_;
861 
862   std::atomic<size_t> total_committed_code_space_{0};
863   // If the committed code space exceeds {critical_committed_code_space_}, then
864   // we trigger a GC before creating the next module. This value is set to the
865   // currently committed space plus 50% of the available code space on creation
866   // and updated after each GC.
867   std::atomic<size_t> critical_committed_code_space_;
868 
869   mutable base::Mutex native_modules_mutex_;
870 
871   //////////////////////////////////////////////////////////////////////////////
872   // Protected by {native_modules_mutex_}:
873 
874   std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
875 
876   // End of fields protected by {native_modules_mutex_}.
877   //////////////////////////////////////////////////////////////////////////////
878 };
879 
880 // Within the scope, the native_module is writable and not executable.
881 // At the scope's destruction, the native_module is executable and not writable.
882 // The states inside the scope and at the scope termination are irrespective of
883 // native_module's state when entering the scope.
884 // We currently mark the entire module's memory W^X:
885 //  - for AOT, that's as efficient as it can be.
886 //  - for Lazy, we don't have a heuristic for functions that may need patching,
887 //    and even if we did, the resulting set of pages may be fragmented.
888 //    Currently, we try and keep the number of syscalls low.
889 // -  similar argument for debug time.
890 class NativeModuleModificationScope final {
891  public:
892   explicit NativeModuleModificationScope(NativeModule* native_module);
893   ~NativeModuleModificationScope();
894 
895  private:
896   NativeModule* native_module_;
897 };
898 
899 // {WasmCodeRefScope}s form a perfect stack. New {WasmCode} pointers generated
900 // by e.g. creating new code or looking up code by its address are added to the
901 // top-most {WasmCodeRefScope}.
902 class V8_EXPORT_PRIVATE WasmCodeRefScope {
903  public:
904   WasmCodeRefScope();
905   WasmCodeRefScope(const WasmCodeRefScope&) = delete;
906   WasmCodeRefScope& operator=(const WasmCodeRefScope&) = delete;
907   ~WasmCodeRefScope();
908 
909   // Register a {WasmCode} reference in the current {WasmCodeRefScope}. Fails if
910   // there is no current scope.
911   static void AddRef(WasmCode*);
912 
913  private:
914   WasmCodeRefScope* const previous_scope_;
915   std::unordered_set<WasmCode*> code_ptrs_;
916 };
917 
918 // Similarly to a global handle, a {GlobalWasmCodeRef} stores a single
919 // ref-counted pointer to a {WasmCode} object.
920 class GlobalWasmCodeRef {
921  public:
GlobalWasmCodeRef(WasmCode * code,std::shared_ptr<NativeModule> native_module)922   explicit GlobalWasmCodeRef(WasmCode* code,
923                              std::shared_ptr<NativeModule> native_module)
924       : code_(code), native_module_(std::move(native_module)) {
925     code_->IncRef();
926   }
927 
928   GlobalWasmCodeRef(const GlobalWasmCodeRef&) = delete;
929   GlobalWasmCodeRef& operator=(const GlobalWasmCodeRef&) = delete;
930 
~GlobalWasmCodeRef()931   ~GlobalWasmCodeRef() { WasmCode::DecrementRefCount({&code_, 1}); }
932 
933   // Get a pointer to the contained {WasmCode} object. This is only guaranteed
934   // to exist as long as this {GlobalWasmCodeRef} exists.
code()935   WasmCode* code() const { return code_; }
936 
937  private:
938   WasmCode* const code_;
939   // Also keep the {NativeModule} alive.
940   const std::shared_ptr<NativeModule> native_module_;
941 };
942 
943 const char* GetRuntimeStubName(WasmCode::RuntimeStubId);
944 
945 }  // namespace wasm
946 }  // namespace internal
947 }  // namespace v8
948 
949 #endif  // V8_WASM_WASM_CODE_MANAGER_H_
950