• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/wasm/wasm-code-manager.h"
6 
7 #include <iomanip>
8 
9 #include "src/base/build_config.h"
10 #include "src/base/iterator.h"
11 #include "src/base/macros.h"
12 #include "src/base/platform/platform.h"
13 #include "src/base/small-vector.h"
14 #include "src/codegen/assembler-inl.h"
15 #include "src/codegen/macro-assembler-inl.h"
16 #include "src/codegen/macro-assembler.h"
17 #include "src/common/globals.h"
18 #include "src/diagnostics/disassembler.h"
19 #include "src/logging/counters.h"
20 #include "src/logging/log.h"
21 #include "src/objects/objects-inl.h"
22 #include "src/snapshot/embedded/embedded-data.h"
23 #include "src/utils/ostreams.h"
24 #include "src/utils/vector.h"
25 #include "src/wasm/code-space-access.h"
26 #include "src/wasm/compilation-environment.h"
27 #include "src/wasm/function-compiler.h"
28 #include "src/wasm/jump-table-assembler.h"
29 #include "src/wasm/module-compiler.h"
30 #include "src/wasm/wasm-debug.h"
31 #include "src/wasm/wasm-import-wrapper-cache.h"
32 #include "src/wasm/wasm-module-sourcemap.h"
33 #include "src/wasm/wasm-module.h"
34 #include "src/wasm/wasm-objects-inl.h"
35 #include "src/wasm/wasm-objects.h"
36 
37 #if defined(V8_OS_WIN64)
38 #include "src/diagnostics/unwinding-info-win64.h"
39 #endif  // V8_OS_WIN64
40 
41 #define TRACE_HEAP(...)                                   \
42   do {                                                    \
43     if (FLAG_trace_wasm_native_heap) PrintF(__VA_ARGS__); \
44   } while (false)
45 
46 namespace v8 {
47 namespace internal {
48 namespace wasm {
49 
50 using trap_handler::ProtectedInstructionData;
51 
52 #if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
53 thread_local int CodeSpaceWriteScope::code_space_write_nesting_level_ = 0;
54 #endif
55 
Merge(base::AddressRegion new_region)56 base::AddressRegion DisjointAllocationPool::Merge(
57     base::AddressRegion new_region) {
58   // Find the possible insertion position by identifying the first region whose
59   // start address is not less than that of {new_region}. Since there cannot be
60   // any overlap between regions, this also means that the start of {above} is
61   // bigger or equal than the *end* of {new_region}.
62   auto above = regions_.lower_bound(new_region);
63   DCHECK(above == regions_.end() || above->begin() >= new_region.end());
64 
65   // Check whether to merge with {above}.
66   if (above != regions_.end() && new_region.end() == above->begin()) {
67     base::AddressRegion merged_region{new_region.begin(),
68                                       new_region.size() + above->size()};
69     DCHECK_EQ(merged_region.end(), above->end());
70     // Check whether to also merge with the region below.
71     if (above != regions_.begin()) {
72       auto below = above;
73       --below;
74       if (below->end() == new_region.begin()) {
75         merged_region = {below->begin(), below->size() + merged_region.size()};
76         regions_.erase(below);
77       }
78     }
79     auto insert_pos = regions_.erase(above);
80     regions_.insert(insert_pos, merged_region);
81     return merged_region;
82   }
83 
84   // No element below, and not adjavent to {above}: insert and done.
85   if (above == regions_.begin()) {
86     regions_.insert(above, new_region);
87     return new_region;
88   }
89 
90   auto below = above;
91   --below;
92   // Consistency check:
93   DCHECK(above == regions_.end() || below->end() < above->begin());
94 
95   // Adjacent to {below}: merge and done.
96   if (below->end() == new_region.begin()) {
97     base::AddressRegion merged_region{below->begin(),
98                                       below->size() + new_region.size()};
99     DCHECK_EQ(merged_region.end(), new_region.end());
100     regions_.erase(below);
101     regions_.insert(above, merged_region);
102     return merged_region;
103   }
104 
105   // Not adjacent to any existing region: insert between {below} and {above}.
106   DCHECK_LT(below->end(), new_region.begin());
107   regions_.insert(above, new_region);
108   return new_region;
109 }
110 
Allocate(size_t size)111 base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {
112   return AllocateInRegion(size,
113                           {kNullAddress, std::numeric_limits<size_t>::max()});
114 }
115 
AllocateInRegion(size_t size,base::AddressRegion region)116 base::AddressRegion DisjointAllocationPool::AllocateInRegion(
117     size_t size, base::AddressRegion region) {
118   // Get an iterator to the first contained region whose start address is not
119   // smaller than the start address of {region}. Start the search from the
120   // region one before that (the last one whose start address is smaller).
121   auto it = regions_.lower_bound(region);
122   if (it != regions_.begin()) --it;
123 
124   for (auto end = regions_.end(); it != end; ++it) {
125     base::AddressRegion overlap = it->GetOverlap(region);
126     if (size > overlap.size()) continue;
127     base::AddressRegion ret{overlap.begin(), size};
128     base::AddressRegion old = *it;
129     auto insert_pos = regions_.erase(it);
130     if (size == old.size()) {
131       // We use the full region --> nothing to add back.
132     } else if (ret.begin() == old.begin()) {
133       // We return a region at the start --> shrink old region from front.
134       regions_.insert(insert_pos, {old.begin() + size, old.size() - size});
135     } else if (ret.end() == old.end()) {
136       // We return a region at the end --> shrink remaining region.
137       regions_.insert(insert_pos, {old.begin(), old.size() - size});
138     } else {
139       // We return something in the middle --> split the remaining region
140       // (insert the region with smaller address first).
141       regions_.insert(insert_pos, {old.begin(), ret.begin() - old.begin()});
142       regions_.insert(insert_pos, {ret.end(), old.end() - ret.end()});
143     }
144     return ret;
145   }
146   return {};
147 }
148 
constant_pool() const149 Address WasmCode::constant_pool() const {
150   if (FLAG_enable_embedded_constant_pool) {
151     if (constant_pool_offset_ < code_comments_offset_) {
152       return instruction_start() + constant_pool_offset_;
153     }
154   }
155   return kNullAddress;
156 }
157 
handler_table() const158 Address WasmCode::handler_table() const {
159   return instruction_start() + handler_table_offset_;
160 }
161 
handler_table_size() const162 int WasmCode::handler_table_size() const {
163   DCHECK_GE(constant_pool_offset_, handler_table_offset_);
164   return static_cast<int>(constant_pool_offset_ - handler_table_offset_);
165 }
166 
code_comments() const167 Address WasmCode::code_comments() const {
168   return instruction_start() + code_comments_offset_;
169 }
170 
code_comments_size() const171 int WasmCode::code_comments_size() const {
172   DCHECK_GE(unpadded_binary_size_, code_comments_offset_);
173   return static_cast<int>(unpadded_binary_size_ - code_comments_offset_);
174 }
175 
ConcatenateBytes(std::initializer_list<Vector<const byte>> vectors)176 std::unique_ptr<const byte[]> WasmCode::ConcatenateBytes(
177     std::initializer_list<Vector<const byte>> vectors) {
178   size_t total_size = 0;
179   for (auto& vec : vectors) total_size += vec.size();
180   // Use default-initialization (== no initialization).
181   std::unique_ptr<byte[]> result{new byte[total_size]};
182   byte* ptr = result.get();
183   for (auto& vec : vectors) {
184     if (vec.empty()) continue;  // Avoid nullptr in {memcpy}.
185     memcpy(ptr, vec.begin(), vec.size());
186     ptr += vec.size();
187   }
188   return result;
189 }
190 
RegisterTrapHandlerData()191 void WasmCode::RegisterTrapHandlerData() {
192   DCHECK(!has_trap_handler_index());
193   if (kind() != WasmCode::kFunction) return;
194   if (protected_instructions_size_ == 0) return;
195 
196   Address base = instruction_start();
197 
198   size_t size = instructions().size();
199   auto protected_instruction_data = this->protected_instructions();
200   const int index =
201       RegisterHandlerData(base, size, protected_instruction_data.size(),
202                           protected_instruction_data.begin());
203 
204   // TODO(eholk): if index is negative, fail.
205   CHECK_LE(0, index);
206   set_trap_handler_index(index);
207   DCHECK(has_trap_handler_index());
208 }
209 
ShouldBeLogged(Isolate * isolate)210 bool WasmCode::ShouldBeLogged(Isolate* isolate) {
211   // The return value is cached in {WasmEngine::IsolateData::log_codes}. Ensure
212   // to call {WasmEngine::EnableCodeLogging} if this return value would change
213   // for any isolate. Otherwise we might lose code events.
214   return isolate->logger()->is_listening_to_code_events() ||
215          isolate->code_event_dispatcher()->IsListeningToCodeEvents() ||
216          isolate->is_profiling();
217 }
218 
LogCode(Isolate * isolate) const219 void WasmCode::LogCode(Isolate* isolate) const {
220   DCHECK(ShouldBeLogged(isolate));
221   if (IsAnonymous()) return;
222 
223   ModuleWireBytes wire_bytes(native_module()->wire_bytes());
224   WireBytesRef name_ref =
225       native_module()->module()->lazily_generated_names.LookupFunctionName(
226           wire_bytes, index(),
227           VectorOf(native_module()->module()->export_table));
228   WasmName name = wire_bytes.GetNameOrNull(name_ref);
229 
230   const WasmDebugSymbols& debug_symbols =
231       native_module()->module()->debug_symbols;
232   auto load_wasm_source_map = isolate->wasm_load_source_map_callback();
233   auto source_map = native_module()->GetWasmSourceMap();
234   if (!source_map && debug_symbols.type == WasmDebugSymbols::Type::SourceMap &&
235       !debug_symbols.external_url.is_empty() && load_wasm_source_map) {
236     WasmName external_url =
237         wire_bytes.GetNameOrNull(debug_symbols.external_url);
238     std::string external_url_string(external_url.data(), external_url.size());
239     HandleScope scope(isolate);
240     v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
241     Local<v8::String> source_map_str =
242         load_wasm_source_map(v8_isolate, external_url_string.c_str());
243     native_module()->SetWasmSourceMap(
244         std::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str));
245   }
246 
247   std::string name_buffer;
248   if (kind() == kWasmToJsWrapper) {
249     name_buffer = "wasm-to-js:";
250     size_t prefix_len = name_buffer.size();
251     constexpr size_t kMaxSigLength = 128;
252     name_buffer.resize(prefix_len + kMaxSigLength);
253     const FunctionSig* sig = native_module()->module()->functions[index_].sig;
254     size_t sig_length =
255         PrintSignature(VectorOf(&name_buffer[prefix_len], kMaxSigLength), sig);
256     name_buffer.resize(prefix_len + sig_length);
257     // If the import has a name, also append that (separated by "-").
258     if (!name.empty()) {
259       name_buffer += '-';
260       name_buffer.append(name.begin(), name.size());
261     }
262     name = VectorOf(name_buffer);
263   } else if (name.empty()) {
264     name_buffer.resize(32);
265     name_buffer.resize(
266         SNPrintF(VectorOf(&name_buffer.front(), name_buffer.size()),
267                  "wasm-function[%d]", index()));
268     name = VectorOf(name_buffer);
269   }
270   PROFILE(isolate,
271           CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this, name));
272 
273   if (!source_positions().empty()) {
274     LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instruction_start(),
275                                                        source_positions()));
276   }
277 }
278 
Validate() const279 void WasmCode::Validate() const {
280 #ifdef DEBUG
281   // Scope for foreign WasmCode pointers.
282   WasmCodeRefScope code_ref_scope;
283   // We expect certain relocation info modes to never appear in {WasmCode}
284   // objects or to be restricted to a small set of valid values. Hence the
285   // iteration below does not use a mask, but visits all relocation data.
286   for (RelocIterator it(instructions(), reloc_info(), constant_pool());
287        !it.done(); it.next()) {
288     RelocInfo::Mode mode = it.rinfo()->rmode();
289     switch (mode) {
290       case RelocInfo::WASM_CALL: {
291         Address target = it.rinfo()->wasm_call_address();
292         WasmCode* code = native_module_->Lookup(target);
293         CHECK_NOT_NULL(code);
294         CHECK_EQ(WasmCode::kJumpTable, code->kind());
295         CHECK(code->contains(target));
296         break;
297       }
298       case RelocInfo::WASM_STUB_CALL: {
299         Address target = it.rinfo()->wasm_stub_call_address();
300         WasmCode* code = native_module_->Lookup(target);
301         CHECK_NOT_NULL(code);
302         CHECK_EQ(WasmCode::kJumpTable, code->kind());
303         CHECK(code->contains(target));
304         break;
305       }
306       case RelocInfo::INTERNAL_REFERENCE:
307       case RelocInfo::INTERNAL_REFERENCE_ENCODED: {
308         Address target = it.rinfo()->target_internal_reference();
309         CHECK(contains(target));
310         break;
311       }
312       case RelocInfo::EXTERNAL_REFERENCE:
313       case RelocInfo::CONST_POOL:
314       case RelocInfo::VENEER_POOL:
315         // These are OK to appear.
316         break;
317       default:
318         FATAL("Unexpected mode: %d", mode);
319     }
320   }
321 #endif
322 }
323 
MaybePrint(const char * name) const324 void WasmCode::MaybePrint(const char* name) const {
325   // Determines whether flags want this code to be printed.
326   bool function_index_matches =
327       (!IsAnonymous() &&
328        FLAG_print_wasm_code_function_index == static_cast<int>(index()));
329   if (FLAG_print_code ||
330       (kind() == kFunction ? (FLAG_print_wasm_code || function_index_matches)
331                            : FLAG_print_wasm_stub_code)) {
332     Print(name);
333   }
334 }
335 
Print(const char * name) const336 void WasmCode::Print(const char* name) const {
337   StdoutStream os;
338   os << "--- WebAssembly code ---\n";
339   Disassemble(name, os);
340   if (native_module_->HasDebugInfo()) {
341     if (auto* debug_side_table =
342             native_module_->GetDebugInfo()->GetDebugSideTableIfExists(this)) {
343       debug_side_table->Print(os);
344     }
345   }
346   os << "--- End code ---\n";
347 }
348 
Disassemble(const char * name,std::ostream & os,Address current_pc) const349 void WasmCode::Disassemble(const char* name, std::ostream& os,
350                            Address current_pc) const {
351   if (name) os << "name: " << name << "\n";
352   if (!IsAnonymous()) os << "index: " << index() << "\n";
353   os << "kind: " << GetWasmCodeKindAsString(kind()) << "\n";
354   os << "compiler: " << (is_liftoff() ? "Liftoff" : "TurboFan") << "\n";
355   size_t padding = instructions().size() - unpadded_binary_size_;
356   os << "Body (size = " << instructions().size() << " = "
357      << unpadded_binary_size_ << " + " << padding << " padding)\n";
358 
359 #ifdef ENABLE_DISASSEMBLER
360   int instruction_size = unpadded_binary_size_;
361   if (constant_pool_offset_ < instruction_size) {
362     instruction_size = constant_pool_offset_;
363   }
364   if (safepoint_table_offset_ && safepoint_table_offset_ < instruction_size) {
365     instruction_size = safepoint_table_offset_;
366   }
367   if (handler_table_offset_ < instruction_size) {
368     instruction_size = handler_table_offset_;
369   }
370   DCHECK_LT(0, instruction_size);
371   os << "Instructions (size = " << instruction_size << ")\n";
372   Disassembler::Decode(nullptr, &os, instructions().begin(),
373                        instructions().begin() + instruction_size,
374                        CodeReference(this), current_pc);
375   os << "\n";
376 
377   if (handler_table_size() > 0) {
378     HandlerTable table(this);
379     os << "Exception Handler Table (size = " << table.NumberOfReturnEntries()
380        << "):\n";
381     table.HandlerTableReturnPrint(os);
382     os << "\n";
383   }
384 
385   if (protected_instructions_size_ > 0) {
386     os << "Protected instructions:\n pc offset  land pad\n";
387     for (auto& data : protected_instructions()) {
388       os << std::setw(10) << std::hex << data.instr_offset << std::setw(10)
389          << std::hex << data.landing_offset << "\n";
390     }
391     os << "\n";
392   }
393 
394   if (!source_positions().empty()) {
395     os << "Source positions:\n pc offset  position\n";
396     for (SourcePositionTableIterator it(source_positions()); !it.done();
397          it.Advance()) {
398       os << std::setw(10) << std::hex << it.code_offset() << std::dec
399          << std::setw(10) << it.source_position().ScriptOffset()
400          << (it.is_statement() ? "  statement" : "") << "\n";
401     }
402     os << "\n";
403   }
404 
405   if (safepoint_table_offset_ > 0) {
406     SafepointTable table(this);
407     os << "Safepoints (size = " << table.size() << ")\n";
408     for (uint32_t i = 0; i < table.length(); i++) {
409       uintptr_t pc_offset = table.GetPcOffset(i);
410       os << reinterpret_cast<const void*>(instruction_start() + pc_offset);
411       os << std::setw(6) << std::hex << pc_offset << "  " << std::dec;
412       table.PrintEntry(i, os);
413       os << " (sp -> fp)";
414       SafepointEntry entry = table.GetEntry(i);
415       if (entry.trampoline_pc() != -1) {
416         os << " trampoline: " << std::hex << entry.trampoline_pc() << std::dec;
417       }
418       if (entry.has_deoptimization_index()) {
419         os << " deopt: " << std::setw(6) << entry.deoptimization_index();
420       }
421       os << "\n";
422     }
423     os << "\n";
424   }
425 
426   os << "RelocInfo (size = " << reloc_info().size() << ")\n";
427   for (RelocIterator it(instructions(), reloc_info(), constant_pool());
428        !it.done(); it.next()) {
429     it.rinfo()->Print(nullptr, os);
430   }
431   os << "\n";
432 
433   if (code_comments_size() > 0) {
434     PrintCodeCommentsSection(os, code_comments(), code_comments_size());
435   }
436 #endif  // ENABLE_DISASSEMBLER
437 }
438 
GetWasmCodeKindAsString(WasmCode::Kind kind)439 const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
440   switch (kind) {
441     case WasmCode::kFunction:
442       return "wasm function";
443     case WasmCode::kWasmToCapiWrapper:
444       return "wasm-to-capi";
445     case WasmCode::kWasmToJsWrapper:
446       return "wasm-to-js";
447     case WasmCode::kJumpTable:
448       return "jump table";
449   }
450   return "unknown kind";
451 }
452 
~WasmCode()453 WasmCode::~WasmCode() {
454   if (has_trap_handler_index()) {
455     trap_handler::ReleaseHandlerData(trap_handler_index());
456   }
457 }
458 
DecRefOnPotentiallyDeadCode()459 V8_WARN_UNUSED_RESULT bool WasmCode::DecRefOnPotentiallyDeadCode() {
460   if (native_module_->engine()->AddPotentiallyDeadCode(this)) {
461     // The code just became potentially dead. The ref count we wanted to
462     // decrement is now transferred to the set of potentially dead code, and
463     // will be decremented when the next GC is run.
464     return false;
465   }
466   // If we reach here, the code was already potentially dead. Decrement the ref
467   // count, and return true if it drops to zero.
468   return DecRefOnDeadCode();
469 }
470 
471 // static
DecrementRefCount(Vector<WasmCode * const> code_vec)472 void WasmCode::DecrementRefCount(Vector<WasmCode* const> code_vec) {
473   // Decrement the ref counter of all given code objects. Keep the ones whose
474   // ref count drops to zero.
475   WasmEngine::DeadCodeMap dead_code;
476   WasmEngine* engine = nullptr;
477   for (WasmCode* code : code_vec) {
478     if (!code->DecRef()) continue;  // Remaining references.
479     dead_code[code->native_module()].push_back(code);
480     if (!engine) engine = code->native_module()->engine();
481     DCHECK_EQ(engine, code->native_module()->engine());
482   }
483 
484   DCHECK_EQ(dead_code.empty(), engine == nullptr);
485   if (engine) engine->FreeDeadCode(dead_code);
486 }
487 
GetSourcePositionBefore(int offset)488 int WasmCode::GetSourcePositionBefore(int offset) {
489   int position = kNoSourcePosition;
490   for (SourcePositionTableIterator iterator(source_positions());
491        !iterator.done() && iterator.code_offset() < offset;
492        iterator.Advance()) {
493     position = iterator.source_position().ScriptOffset();
494   }
495   return position;
496 }
497 
~OptionalLock()498 WasmCodeAllocator::OptionalLock::~OptionalLock() {
499   if (allocator_) allocator_->mutex_.Unlock();
500 }
501 
Lock(WasmCodeAllocator * allocator)502 void WasmCodeAllocator::OptionalLock::Lock(WasmCodeAllocator* allocator) {
503   DCHECK(!is_locked());
504   allocator_ = allocator;
505   allocator->mutex_.Lock();
506 }
507 
508 // static
509 constexpr size_t WasmCodeAllocator::kMaxCodeSpaceSize;
510 
WasmCodeAllocator(WasmCodeManager * code_manager,VirtualMemory code_space,std::shared_ptr<Counters> async_counters)511 WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager,
512                                      VirtualMemory code_space,
513                                      std::shared_ptr<Counters> async_counters)
514     : code_manager_(code_manager),
515       free_code_space_(code_space.region()),
516       async_counters_(std::move(async_counters)) {
517   owned_code_space_.reserve(4);
518   owned_code_space_.emplace_back(std::move(code_space));
519   async_counters_->wasm_module_num_code_spaces()->AddSample(1);
520 }
521 
~WasmCodeAllocator()522 WasmCodeAllocator::~WasmCodeAllocator() {
523   code_manager_->FreeNativeModule(VectorOf(owned_code_space_),
524                                   committed_code_space());
525 }
526 
Init(NativeModule * native_module)527 void WasmCodeAllocator::Init(NativeModule* native_module) {
528   DCHECK_EQ(1, owned_code_space_.size());
529   native_module->AddCodeSpace(owned_code_space_[0].region(), {});
530 }
531 
532 namespace {
533 // On Windows, we cannot commit a region that straddles different reservations
534 // of virtual memory. Because we bump-allocate, and because, if we need more
535 // memory, we append that memory at the end of the owned_code_space_ list, we
536 // traverse that list in reverse order to find the reservation(s) that guide how
537 // to chunk the region to commit.
538 #if V8_OS_WIN
539 constexpr bool kNeedsToSplitRangeByReservations = true;
540 #else
541 constexpr bool kNeedsToSplitRangeByReservations = false;
542 #endif
543 
SplitRangeByReservationsIfNeeded(base::AddressRegion range,const std::vector<VirtualMemory> & owned_code_space)544 base::SmallVector<base::AddressRegion, 1> SplitRangeByReservationsIfNeeded(
545     base::AddressRegion range,
546     const std::vector<VirtualMemory>& owned_code_space) {
547   if (!kNeedsToSplitRangeByReservations) return {range};
548 
549   base::SmallVector<base::AddressRegion, 1> split_ranges;
550   size_t missing_begin = range.begin();
551   size_t missing_end = range.end();
552   for (auto& vmem : base::Reversed(owned_code_space)) {
553     Address overlap_begin = std::max(missing_begin, vmem.address());
554     Address overlap_end = std::min(missing_end, vmem.end());
555     if (overlap_begin >= overlap_end) continue;
556     split_ranges.emplace_back(overlap_begin, overlap_end - overlap_begin);
557     // Opportunistically reduce the missing range. This might terminate the loop
558     // early.
559     if (missing_begin == overlap_begin) missing_begin = overlap_end;
560     if (missing_end == overlap_end) missing_end = overlap_begin;
561     if (missing_begin >= missing_end) break;
562   }
563 #ifdef ENABLE_SLOW_DCHECKS
564   // The returned vector should cover the full range.
565   size_t total_split_size = 0;
566   for (auto split : split_ranges) total_split_size += split.size();
567   DCHECK_EQ(range.size(), total_split_size);
568 #endif
569   return split_ranges;
570 }
571 
NumWasmFunctionsInFarJumpTable(uint32_t num_declared_functions)572 int NumWasmFunctionsInFarJumpTable(uint32_t num_declared_functions) {
573   return NativeModule::kNeedsFarJumpsBetweenCodeSpaces
574              ? static_cast<int>(num_declared_functions)
575              : 0;
576 }
577 
578 // Returns an overapproximation of the code size overhead per new code space
579 // created by the jump tables.
OverheadPerCodeSpace(uint32_t num_declared_functions)580 size_t OverheadPerCodeSpace(uint32_t num_declared_functions) {
581   // Overhead for the jump table.
582   size_t overhead = RoundUp<kCodeAlignment>(
583       JumpTableAssembler::SizeForNumberOfSlots(num_declared_functions));
584 
585 #if defined(V8_OS_WIN64)
586   // On Win64, we need to reserve some pages at the beginning of an executable
587   // space. See {AddCodeSpace}.
588   overhead += Heap::GetCodeRangeReservedAreaSize();
589 #endif  // V8_OS_WIN64
590 
591   // Overhead for the far jump table.
592   overhead +=
593       RoundUp<kCodeAlignment>(JumpTableAssembler::SizeForNumberOfFarJumpSlots(
594           WasmCode::kRuntimeStubCount,
595           NumWasmFunctionsInFarJumpTable(num_declared_functions)));
596 
597   return overhead;
598 }
599 
ReservationSize(size_t code_size_estimate,int num_declared_functions,size_t total_reserved)600 size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
601                        size_t total_reserved) {
602   size_t overhead = OverheadPerCodeSpace(num_declared_functions);
603 
604   // Reserve a power of two at least as big as any of
605   //   a) needed size + overhead (this is the minimum needed)
606   //   b) 2 * overhead (to not waste too much space by overhead)
607   //   c) 1/4 of current total reservation size (to grow exponentially)
608   size_t reserve_size = base::bits::RoundUpToPowerOfTwo(
609       std::max(std::max(RoundUp<kCodeAlignment>(code_size_estimate) + overhead,
610                         2 * overhead),
611                total_reserved / 4));
612 
613   // Limit by the maximum supported code space size.
614   return std::min(WasmCodeAllocator::kMaxCodeSpaceSize, reserve_size);
615 }
616 
617 }  // namespace
618 
AllocateForCode(NativeModule * native_module,size_t size)619 Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module,
620                                                 size_t size) {
621   return AllocateForCodeInRegion(native_module, size, kUnrestrictedRegion,
622                                  WasmCodeAllocator::OptionalLock{});
623 }
624 
AllocateForCodeInRegion(NativeModule * native_module,size_t size,base::AddressRegion region,const WasmCodeAllocator::OptionalLock & optional_lock)625 Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
626     NativeModule* native_module, size_t size, base::AddressRegion region,
627     const WasmCodeAllocator::OptionalLock& optional_lock) {
628   OptionalLock new_lock;
629   if (!optional_lock.is_locked()) new_lock.Lock(this);
630   const auto& locked_lock =
631       optional_lock.is_locked() ? optional_lock : new_lock;
632   DCHECK(locked_lock.is_locked());
633   DCHECK_EQ(code_manager_, native_module->engine()->code_manager());
634   DCHECK_LT(0, size);
635   v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
636   size = RoundUp<kCodeAlignment>(size);
637   base::AddressRegion code_space =
638       free_code_space_.AllocateInRegion(size, region);
639   if (V8_UNLIKELY(code_space.is_empty())) {
640     // Only allocations without a specific region are allowed to fail. Otherwise
641     // the region must have been allocated big enough to hold all initial
642     // allocations (jump tables etc).
643     CHECK_EQ(kUnrestrictedRegion, region);
644 
645     Address hint = owned_code_space_.empty() ? kNullAddress
646                                              : owned_code_space_.back().end();
647 
648     size_t total_reserved = 0;
649     for (auto& vmem : owned_code_space_) total_reserved += vmem.size();
650     size_t reserve_size = ReservationSize(
651         size, native_module->module()->num_declared_functions, total_reserved);
652     VirtualMemory new_mem =
653         code_manager_->TryAllocate(reserve_size, reinterpret_cast<void*>(hint));
654     if (!new_mem.IsReserved()) {
655       V8::FatalProcessOutOfMemory(nullptr, "wasm code reservation");
656       UNREACHABLE();
657     }
658 
659     base::AddressRegion new_region = new_mem.region();
660     code_manager_->AssignRange(new_region, native_module);
661     free_code_space_.Merge(new_region);
662     owned_code_space_.emplace_back(std::move(new_mem));
663     native_module->AddCodeSpace(new_region, locked_lock);
664 
665     code_space = free_code_space_.Allocate(size);
666     DCHECK(!code_space.is_empty());
667     async_counters_->wasm_module_num_code_spaces()->AddSample(
668         static_cast<int>(owned_code_space_.size()));
669   }
670   const Address commit_page_size = page_allocator->CommitPageSize();
671   Address commit_start = RoundUp(code_space.begin(), commit_page_size);
672   Address commit_end = RoundUp(code_space.end(), commit_page_size);
673   // {commit_start} will be either code_space.start or the start of the next
674   // page. {commit_end} will be the start of the page after the one in which
675   // the allocation ends.
676   // We start from an aligned start, and we know we allocated vmem in
677   // page multiples.
678   // We just need to commit what's not committed. The page in which we
679   // start is already committed (or we start at the beginning of a page).
680   // The end needs to be committed all through the end of the page.
681   if (commit_start < commit_end) {
682     for (base::AddressRegion split_range : SplitRangeByReservationsIfNeeded(
683              {commit_start, commit_end - commit_start}, owned_code_space_)) {
684       code_manager_->Commit(split_range);
685     }
686     committed_code_space_.fetch_add(commit_end - commit_start);
687     // Committed code cannot grow bigger than maximum code space size.
688     DCHECK_LE(committed_code_space_.load(), FLAG_wasm_max_code_space * MB);
689   }
690   DCHECK(IsAligned(code_space.begin(), kCodeAlignment));
691   allocated_code_space_.Merge(code_space);
692   generated_code_size_.fetch_add(code_space.size(), std::memory_order_relaxed);
693 
694   TRACE_HEAP("Code alloc for %p: 0x%" PRIxPTR ",+%zu\n", this,
695              code_space.begin(), size);
696   return {reinterpret_cast<byte*>(code_space.begin()), code_space.size()};
697 }
698 
SetExecutable(bool executable)699 bool WasmCodeAllocator::SetExecutable(bool executable) {
700   base::MutexGuard lock(&mutex_);
701   if (is_executable_ == executable) return true;
702   TRACE_HEAP("Setting module %p as executable: %d.\n", this, executable);
703 
704   v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
705 
706   if (FLAG_wasm_write_protect_code_memory) {
707     PageAllocator::Permission permission =
708         executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
709 #if V8_OS_WIN
710     // On windows, we need to switch permissions per separate virtual memory
711     // reservation.
712     // For now, in that case, we commit at reserved memory granularity.
713     // Technically, that may be a waste, because we may reserve more than we
714     // use. On 32-bit though, the scarce resource is the address space -
715     // committed or not.
716     for (auto& vmem : owned_code_space_) {
717       if (!SetPermissions(page_allocator, vmem.address(), vmem.size(),
718                           permission)) {
719         return false;
720       }
721       TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
722                  executable);
723     }
724 #else   // V8_OS_WIN
725     size_t commit_page_size = page_allocator->CommitPageSize();
726     for (auto& region : allocated_code_space_.regions()) {
727       // allocated_code_space_ is fine-grained, so we need to
728       // page-align it.
729       size_t region_size = RoundUp(region.size(), commit_page_size);
730       if (!SetPermissions(page_allocator, region.begin(), region_size,
731                           permission)) {
732         return false;
733       }
734       TRACE_HEAP("Set 0x%" PRIxPTR ":0x%" PRIxPTR " to executable:%d\n",
735                  region.begin(), region.end(), executable);
736     }
737 #endif  // V8_OS_WIN
738   }
739   is_executable_ = executable;
740   return true;
741 }
742 
FreeCode(Vector<WasmCode * const> codes)743 void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
744   // Zap code area and collect freed code regions.
745   DisjointAllocationPool freed_regions;
746   size_t code_size = 0;
747   CODE_SPACE_WRITE_SCOPE
748   for (WasmCode* code : codes) {
749     ZapCode(code->instruction_start(), code->instructions().size());
750     FlushInstructionCache(code->instruction_start(),
751                           code->instructions().size());
752     code_size += code->instructions().size();
753     freed_regions.Merge(base::AddressRegion{code->instruction_start(),
754                                             code->instructions().size()});
755   }
756   freed_code_size_.fetch_add(code_size);
757 
758   // Merge {freed_regions} into {freed_code_space_} and put all ranges of full
759   // pages to decommit into {regions_to_decommit} (decommitting is expensive,
760   // so try to merge regions before decommitting).
761   DisjointAllocationPool regions_to_decommit;
762   PageAllocator* allocator = GetPlatformPageAllocator();
763   size_t commit_page_size = allocator->CommitPageSize();
764   {
765     base::MutexGuard guard(&mutex_);
766     for (auto region : freed_regions.regions()) {
767       auto merged_region = freed_code_space_.Merge(region);
768       Address discard_start =
769           std::max(RoundUp(merged_region.begin(), commit_page_size),
770                    RoundDown(region.begin(), commit_page_size));
771       Address discard_end =
772           std::min(RoundDown(merged_region.end(), commit_page_size),
773                    RoundUp(region.end(), commit_page_size));
774       if (discard_start >= discard_end) continue;
775       regions_to_decommit.Merge({discard_start, discard_end - discard_start});
776     }
777   }
778 
779   for (auto region : regions_to_decommit.regions()) {
780     size_t old_committed = committed_code_space_.fetch_sub(region.size());
781     DCHECK_GE(old_committed, region.size());
782     USE(old_committed);
783     for (base::AddressRegion split_range :
784          SplitRangeByReservationsIfNeeded(region, owned_code_space_)) {
785       code_manager_->Decommit(split_range);
786     }
787   }
788 }
789 
GetNumCodeSpaces() const790 size_t WasmCodeAllocator::GetNumCodeSpaces() const {
791   base::MutexGuard lock(&mutex_);
792   return owned_code_space_.size();
793 }
794 
795 // static
796 constexpr base::AddressRegion WasmCodeAllocator::kUnrestrictedRegion;
797 
NativeModule(WasmEngine * engine,const WasmFeatures & enabled,VirtualMemory code_space,std::shared_ptr<const WasmModule> module,std::shared_ptr<Counters> async_counters,std::shared_ptr<NativeModule> * shared_this)798 NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
799                            VirtualMemory code_space,
800                            std::shared_ptr<const WasmModule> module,
801                            std::shared_ptr<Counters> async_counters,
802                            std::shared_ptr<NativeModule>* shared_this)
803     : code_allocator_(engine->code_manager(), std::move(code_space),
804                       async_counters),
805       enabled_features_(enabled),
806       module_(std::move(module)),
807       import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
808           new WasmImportWrapperCache())),
809       engine_(engine),
810       use_trap_handler_(trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler
811                                                              : kNoTrapHandler) {
812   // We receive a pointer to an empty {std::shared_ptr}, and install ourselve
813   // there.
814   DCHECK_NOT_NULL(shared_this);
815   DCHECK_NULL(*shared_this);
816   shared_this->reset(this);
817   compilation_state_ =
818       CompilationState::New(*shared_this, std::move(async_counters));
819   DCHECK_NOT_NULL(module_);
820   if (module_->num_declared_functions > 0) {
821     code_table_ =
822         std::make_unique<WasmCode*[]>(module_->num_declared_functions);
823     num_liftoff_function_calls_ =
824         std::make_unique<uint32_t[]>(module_->num_declared_functions);
825 
826     // Start counter at 4 to avoid runtime calls for smaller numbers.
827     constexpr int kCounterStart = 4;
828     std::fill_n(num_liftoff_function_calls_.get(),
829                 module_->num_declared_functions, kCounterStart);
830   }
831   code_allocator_.Init(this);
832 }
833 
ReserveCodeTableForTesting(uint32_t max_functions)834 void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
835   WasmCodeRefScope code_ref_scope;
836   DCHECK_LE(module_->num_declared_functions, max_functions);
837   auto new_table = std::make_unique<WasmCode*[]>(max_functions);
838   if (module_->num_declared_functions > 0) {
839     memcpy(new_table.get(), code_table_.get(),
840            module_->num_declared_functions * sizeof(WasmCode*));
841   }
842   code_table_ = std::move(new_table);
843 
844   base::AddressRegion single_code_space_region;
845   {
846     base::MutexGuard guard(&allocation_mutex_);
847     CHECK_EQ(1, code_space_data_.size());
848     single_code_space_region = code_space_data_[0].region;
849   }
850   // Re-allocate jump table.
851   main_jump_table_ = CreateEmptyJumpTableInRegion(
852       JumpTableAssembler::SizeForNumberOfSlots(max_functions),
853       single_code_space_region, WasmCodeAllocator::OptionalLock{});
854   base::MutexGuard guard(&allocation_mutex_);
855   code_space_data_[0].jump_table = main_jump_table_;
856 }
857 
LogWasmCodes(Isolate * isolate)858 void NativeModule::LogWasmCodes(Isolate* isolate) {
859   if (!WasmCode::ShouldBeLogged(isolate)) return;
860 
861   TRACE_EVENT1("v8.wasm", "wasm.LogWasmCodes", "functions",
862                module_->num_declared_functions);
863 
864   // TODO(titzer): we skip the logging of the import wrappers
865   // here, but they should be included somehow.
866   int start = module_->num_imported_functions;
867   int end = start + module_->num_declared_functions;
868   WasmCodeRefScope code_ref_scope;
869   for (int func_index = start; func_index < end; ++func_index) {
870     if (WasmCode* code = GetCode(func_index)) code->LogCode(isolate);
871   }
872 }
873 
CreateCompilationEnv() const874 CompilationEnv NativeModule::CreateCompilationEnv() const {
875   return {module(), use_trap_handler_, kRuntimeExceptionSupport,
876           enabled_features_, kNoLowerSimd};
877 }
878 
AddCodeForTesting(Handle<Code> code)879 WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
880   CODE_SPACE_WRITE_SCOPE
881   const size_t relocation_size = code->relocation_size();
882   OwnedVector<byte> reloc_info;
883   if (relocation_size > 0) {
884     reloc_info = OwnedVector<byte>::Of(
885         Vector<byte>{code->relocation_start(), relocation_size});
886   }
887   Handle<ByteArray> source_pos_table(code->SourcePositionTable(),
888                                      code->GetIsolate());
889   OwnedVector<byte> source_pos =
890       OwnedVector<byte>::NewForOverwrite(source_pos_table->length());
891   if (source_pos_table->length() > 0) {
892     source_pos_table->copy_out(0, source_pos.start(),
893                                source_pos_table->length());
894   }
895   CHECK(!code->is_off_heap_trampoline());
896   STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
897   Vector<const byte> instructions(
898       reinterpret_cast<byte*>(code->raw_body_start()),
899       static_cast<size_t>(code->raw_body_size()));
900   const int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
901 
902   // Metadata offsets in Code objects are relative to the start of the metadata
903   // section, whereas WasmCode expects offsets relative to InstructionStart.
904   const int base_offset = code->raw_instruction_size();
905   // TODO(jgruber,v8:8758): Remove this translation. It exists only because
906   // Code objects contains real offsets but WasmCode expects an offset of 0 to
907   // mean 'empty'.
908   const int safepoint_table_offset =
909       code->has_safepoint_table() ? base_offset + code->safepoint_table_offset()
910                                   : 0;
911   const int handler_table_offset = base_offset + code->handler_table_offset();
912   const int constant_pool_offset = base_offset + code->constant_pool_offset();
913   const int code_comments_offset = base_offset + code->code_comments_offset();
914 
915   Vector<uint8_t> dst_code_bytes =
916       code_allocator_.AllocateForCode(this, instructions.size());
917   memcpy(dst_code_bytes.begin(), instructions.begin(), instructions.size());
918 
919   // Apply the relocation delta by iterating over the RelocInfo.
920   intptr_t delta = reinterpret_cast<Address>(dst_code_bytes.begin()) -
921                    code->raw_instruction_start();
922   int mode_mask =
923       RelocInfo::kApplyMask | RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
924   auto jump_tables_ref =
925       FindJumpTablesForRegion(base::AddressRegionOf(dst_code_bytes));
926   Address dst_code_addr = reinterpret_cast<Address>(dst_code_bytes.begin());
927   Address constant_pool_start = dst_code_addr + constant_pool_offset;
928   RelocIterator orig_it(*code, mode_mask);
929   for (RelocIterator it(dst_code_bytes, reloc_info.as_vector(),
930                         constant_pool_start, mode_mask);
931        !it.done(); it.next(), orig_it.next()) {
932     RelocInfo::Mode mode = it.rinfo()->rmode();
933     if (RelocInfo::IsWasmStubCall(mode)) {
934       uint32_t stub_call_tag = orig_it.rinfo()->wasm_call_tag();
935       DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
936       Address entry = GetNearRuntimeStubEntry(
937           static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables_ref);
938       it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
939     } else {
940       it.rinfo()->apply(delta);
941     }
942   }
943 
944   // Flush the i-cache after relocation.
945   FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
946 
947   std::unique_ptr<WasmCode> new_code{
948       new WasmCode{this,                    // native_module
949                    kAnonymousFuncIndex,     // index
950                    dst_code_bytes,          // instructions
951                    stack_slots,             // stack_slots
952                    0,                       // tagged_parameter_slots
953                    safepoint_table_offset,  // safepoint_table_offset
954                    handler_table_offset,    // handler_table_offset
955                    constant_pool_offset,    // constant_pool_offset
956                    code_comments_offset,    // code_comments_offset
957                    instructions.length(),   // unpadded_binary_size
958                    {},                      // protected_instructions
959                    reloc_info.as_vector(),  // reloc_info
960                    source_pos.as_vector(),  // source positions
961                    WasmCode::kFunction,     // kind
962                    ExecutionTier::kNone,    // tier
963                    kNoDebugging}};          // for_debugging
964   new_code->MaybePrint();
965   new_code->Validate();
966 
967   return PublishCode(std::move(new_code));
968 }
969 
UseLazyStub(uint32_t func_index)970 void NativeModule::UseLazyStub(uint32_t func_index) {
971   DCHECK_LE(module_->num_imported_functions, func_index);
972   DCHECK_LT(func_index,
973             module_->num_imported_functions + module_->num_declared_functions);
974 
975   if (!lazy_compile_table_) {
976     uint32_t num_slots = module_->num_declared_functions;
977     WasmCodeRefScope code_ref_scope;
978     CODE_SPACE_WRITE_SCOPE
979     base::AddressRegion single_code_space_region;
980     {
981       base::MutexGuard guard(&allocation_mutex_);
982       DCHECK_EQ(1, code_space_data_.size());
983       single_code_space_region = code_space_data_[0].region;
984     }
985     lazy_compile_table_ = CreateEmptyJumpTableInRegion(
986         JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots),
987         single_code_space_region, WasmCodeAllocator::OptionalLock{});
988     JumpTableAssembler::GenerateLazyCompileTable(
989         lazy_compile_table_->instruction_start(), num_slots,
990         module_->num_imported_functions,
991         GetNearRuntimeStubEntry(WasmCode::kWasmCompileLazy,
992                                 FindJumpTablesForRegion(base::AddressRegionOf(
993                                     lazy_compile_table_->instructions()))));
994   }
995 
996   // Add jump table entry for jump to the lazy compile stub.
997   uint32_t slot_index = declared_function_index(module(), func_index);
998   DCHECK_NULL(code_table_[slot_index]);
999   Address lazy_compile_target =
1000       lazy_compile_table_->instruction_start() +
1001       JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
1002   base::MutexGuard guard(&allocation_mutex_);
1003   PatchJumpTablesLocked(slot_index, lazy_compile_target);
1004 }
1005 
AddCode(int index,const CodeDesc & desc,int stack_slots,int tagged_parameter_slots,Vector<const byte> protected_instructions_data,Vector<const byte> source_position_table,WasmCode::Kind kind,ExecutionTier tier,ForDebugging for_debugging)1006 std::unique_ptr<WasmCode> NativeModule::AddCode(
1007     int index, const CodeDesc& desc, int stack_slots,
1008     int tagged_parameter_slots, Vector<const byte> protected_instructions_data,
1009     Vector<const byte> source_position_table, WasmCode::Kind kind,
1010     ExecutionTier tier, ForDebugging for_debugging) {
1011   Vector<byte> code_space =
1012       code_allocator_.AllocateForCode(this, desc.instr_size);
1013   auto jump_table_ref =
1014       FindJumpTablesForRegion(base::AddressRegionOf(code_space));
1015   return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
1016                               protected_instructions_data,
1017                               source_position_table, kind, tier, for_debugging,
1018                               code_space, jump_table_ref);
1019 }
1020 
AddCodeWithCodeSpace(int index,const CodeDesc & desc,int stack_slots,int tagged_parameter_slots,Vector<const byte> protected_instructions_data,Vector<const byte> source_position_table,WasmCode::Kind kind,ExecutionTier tier,ForDebugging for_debugging,Vector<uint8_t> dst_code_bytes,const JumpTablesRef & jump_tables)1021 std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
1022     int index, const CodeDesc& desc, int stack_slots,
1023     int tagged_parameter_slots, Vector<const byte> protected_instructions_data,
1024     Vector<const byte> source_position_table, WasmCode::Kind kind,
1025     ExecutionTier tier, ForDebugging for_debugging,
1026     Vector<uint8_t> dst_code_bytes, const JumpTablesRef& jump_tables) {
1027   Vector<byte> reloc_info{desc.buffer + desc.buffer_size - desc.reloc_size,
1028                           static_cast<size_t>(desc.reloc_size)};
1029   UpdateCodeSize(desc.instr_size, tier, for_debugging);
1030 
1031   // TODO(jgruber,v8:8758): Remove this translation. It exists only because
1032   // CodeDesc contains real offsets but WasmCode expects an offset of 0 to mean
1033   // 'empty'.
1034   const int safepoint_table_offset =
1035       desc.safepoint_table_size == 0 ? 0 : desc.safepoint_table_offset;
1036   const int handler_table_offset = desc.handler_table_offset;
1037   const int constant_pool_offset = desc.constant_pool_offset;
1038   const int code_comments_offset = desc.code_comments_offset;
1039   const int instr_size = desc.instr_size;
1040 
1041   CODE_SPACE_WRITE_SCOPE
1042   memcpy(dst_code_bytes.begin(), desc.buffer,
1043          static_cast<size_t>(desc.instr_size));
1044 
1045   // Apply the relocation delta by iterating over the RelocInfo.
1046   intptr_t delta = dst_code_bytes.begin() - desc.buffer;
1047   int mode_mask = RelocInfo::kApplyMask |
1048                   RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
1049                   RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
1050   Address code_start = reinterpret_cast<Address>(dst_code_bytes.begin());
1051   Address constant_pool_start = code_start + constant_pool_offset;
1052   for (RelocIterator it(dst_code_bytes, reloc_info, constant_pool_start,
1053                         mode_mask);
1054        !it.done(); it.next()) {
1055     RelocInfo::Mode mode = it.rinfo()->rmode();
1056     if (RelocInfo::IsWasmCall(mode)) {
1057       uint32_t call_tag = it.rinfo()->wasm_call_tag();
1058       Address target = GetNearCallTargetForFunction(call_tag, jump_tables);
1059       it.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH);
1060     } else if (RelocInfo::IsWasmStubCall(mode)) {
1061       uint32_t stub_call_tag = it.rinfo()->wasm_call_tag();
1062       DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
1063       Address entry = GetNearRuntimeStubEntry(
1064           static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables);
1065       it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
1066     } else {
1067       it.rinfo()->apply(delta);
1068     }
1069   }
1070 
1071   // Flush the i-cache after relocation.
1072   FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
1073 
1074   // Liftoff code will not be relocated or serialized, thus do not store any
1075   // relocation information.
1076   if (tier == ExecutionTier::kLiftoff) reloc_info = {};
1077 
1078   std::unique_ptr<WasmCode> code{new WasmCode{
1079       this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
1080       safepoint_table_offset, handler_table_offset, constant_pool_offset,
1081       code_comments_offset, instr_size, protected_instructions_data, reloc_info,
1082       source_position_table, kind, tier, for_debugging}};
1083   code->MaybePrint();
1084   code->Validate();
1085 
1086   return code;
1087 }
1088 
PublishCode(std::unique_ptr<WasmCode> code)1089 WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
1090   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
1091                "wasm.PublishCode");
1092   base::MutexGuard lock(&allocation_mutex_);
1093   return PublishCodeLocked(std::move(code));
1094 }
1095 
PublishCode(Vector<std::unique_ptr<WasmCode>> codes)1096 std::vector<WasmCode*> NativeModule::PublishCode(
1097     Vector<std::unique_ptr<WasmCode>> codes) {
1098   TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
1099                "wasm.PublishCode", "number", codes.size());
1100   std::vector<WasmCode*> published_code;
1101   published_code.reserve(codes.size());
1102   base::MutexGuard lock(&allocation_mutex_);
1103   // The published code is put into the top-most surrounding {WasmCodeRefScope}.
1104   for (auto& code : codes) {
1105     published_code.push_back(PublishCodeLocked(std::move(code)));
1106   }
1107   return published_code;
1108 }
1109 
GetCodeKind(const WasmCompilationResult & result)1110 WasmCode::Kind GetCodeKind(const WasmCompilationResult& result) {
1111   switch (result.kind) {
1112     case WasmCompilationResult::kWasmToJsWrapper:
1113       return WasmCode::Kind::kWasmToJsWrapper;
1114     case WasmCompilationResult::kFunction:
1115       return WasmCode::Kind::kFunction;
1116     default:
1117       UNREACHABLE();
1118   }
1119 }
1120 
PublishCodeLocked(std::unique_ptr<WasmCode> code)1121 WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
1122   // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
1123   DCHECK(!allocation_mutex_.TryLock());
1124 
1125   if (!code->IsAnonymous() &&
1126       code->index() >= module_->num_imported_functions) {
1127     DCHECK_LT(code->index(), num_functions());
1128 
1129     code->RegisterTrapHandlerData();
1130 
1131     // Assume an order of execution tiers that represents the quality of their
1132     // generated code.
1133     static_assert(ExecutionTier::kNone < ExecutionTier::kLiftoff &&
1134                       ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
1135                   "Assume an order on execution tiers");
1136 
1137     uint32_t slot_idx = declared_function_index(module(), code->index());
1138     WasmCode* prior_code = code_table_[slot_idx];
1139     // If we are tiered down, install all debugging code (except for stepping
1140     // code, which is only used for a single frame and never installed in the
1141     // code table of jump table). Otherwise, install code if it was compiled
1142     // with a higher tier.
1143     static_assert(
1144         kForDebugging > kNoDebugging && kWithBreakpoints > kForDebugging,
1145         "for_debugging is ordered");
1146     const bool update_code_table =
1147         // Never install stepping code.
1148         code->for_debugging() != kForStepping &&
1149         (!prior_code ||
1150          (tiering_state_ == kTieredDown
1151               // Tiered down: Install breakpoints over normal debug code.
1152               ? prior_code->for_debugging() <= code->for_debugging()
1153               // Tiered up: Install if the tier is higher than before.
1154               : prior_code->tier() < code->tier()));
1155     if (update_code_table) {
1156       code_table_[slot_idx] = code.get();
1157       if (prior_code) {
1158         WasmCodeRefScope::AddRef(prior_code);
1159         // The code is added to the current {WasmCodeRefScope}, hence the ref
1160         // count cannot drop to zero here.
1161         CHECK(!prior_code->DecRef());
1162       }
1163 
1164       PatchJumpTablesLocked(slot_idx, code->instruction_start());
1165     }
1166     if (!code->for_debugging() && tiering_state_ == kTieredDown &&
1167         code->tier() == ExecutionTier::kTurbofan) {
1168       liftoff_bailout_count_.fetch_add(1);
1169     }
1170   }
1171   WasmCodeRefScope::AddRef(code.get());
1172   WasmCode* result = code.get();
1173   owned_code_.emplace(result->instruction_start(), std::move(code));
1174   return result;
1175 }
1176 
AddDeserializedCode(int index,Vector<const byte> instructions,int stack_slots,int tagged_parameter_slots,int safepoint_table_offset,int handler_table_offset,int constant_pool_offset,int code_comments_offset,int unpadded_binary_size,Vector<const byte> protected_instructions_data,Vector<const byte> reloc_info,Vector<const byte> source_position_table,WasmCode::Kind kind,ExecutionTier tier)1177 WasmCode* NativeModule::AddDeserializedCode(
1178     int index, Vector<const byte> instructions, int stack_slots,
1179     int tagged_parameter_slots, int safepoint_table_offset,
1180     int handler_table_offset, int constant_pool_offset,
1181     int code_comments_offset, int unpadded_binary_size,
1182     Vector<const byte> protected_instructions_data,
1183     Vector<const byte> reloc_info, Vector<const byte> source_position_table,
1184     WasmCode::Kind kind, ExecutionTier tier) {
1185   // CodeSpaceWriteScope is provided by the caller.
1186   Vector<uint8_t> dst_code_bytes =
1187       code_allocator_.AllocateForCode(this, instructions.size());
1188   UpdateCodeSize(dst_code_bytes.size(), tier, kNoDebugging);
1189   memcpy(dst_code_bytes.begin(), instructions.begin(), instructions.size());
1190 
1191   std::unique_ptr<WasmCode> code{new WasmCode{
1192       this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
1193       safepoint_table_offset, handler_table_offset, constant_pool_offset,
1194       code_comments_offset, unpadded_binary_size, protected_instructions_data,
1195       reloc_info, source_position_table, kind, tier, kNoDebugging}};
1196 
1197   // Note: we do not flush the i-cache here, since the code needs to be
1198   // relocated anyway. The caller is responsible for flushing the i-cache later.
1199 
1200   return PublishCode(std::move(code));
1201 }
1202 
SnapshotCodeTable() const1203 std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
1204   base::MutexGuard lock(&allocation_mutex_);
1205   WasmCode** start = code_table_.get();
1206   WasmCode** end = start + module_->num_declared_functions;
1207   return std::vector<WasmCode*>{start, end};
1208 }
1209 
GetCode(uint32_t index) const1210 WasmCode* NativeModule::GetCode(uint32_t index) const {
1211   base::MutexGuard guard(&allocation_mutex_);
1212   WasmCode* code = code_table_[declared_function_index(module(), index)];
1213   if (code) WasmCodeRefScope::AddRef(code);
1214   return code;
1215 }
1216 
HasCode(uint32_t index) const1217 bool NativeModule::HasCode(uint32_t index) const {
1218   base::MutexGuard guard(&allocation_mutex_);
1219   return code_table_[declared_function_index(module(), index)] != nullptr;
1220 }
1221 
HasCodeWithTier(uint32_t index,ExecutionTier tier) const1222 bool NativeModule::HasCodeWithTier(uint32_t index, ExecutionTier tier) const {
1223   base::MutexGuard guard(&allocation_mutex_);
1224   return code_table_[declared_function_index(module(), index)] != nullptr &&
1225          code_table_[declared_function_index(module(), index)]->tier() == tier;
1226 }
1227 
SetWasmSourceMap(std::unique_ptr<WasmModuleSourceMap> source_map)1228 void NativeModule::SetWasmSourceMap(
1229     std::unique_ptr<WasmModuleSourceMap> source_map) {
1230   source_map_ = std::move(source_map);
1231 }
1232 
GetWasmSourceMap() const1233 WasmModuleSourceMap* NativeModule::GetWasmSourceMap() const {
1234   return source_map_.get();
1235 }
1236 
CreateEmptyJumpTableInRegion(int jump_table_size,base::AddressRegion region,const WasmCodeAllocator::OptionalLock & allocator_lock)1237 WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
1238     int jump_table_size, base::AddressRegion region,
1239     const WasmCodeAllocator::OptionalLock& allocator_lock) {
1240   // Only call this if we really need a jump table.
1241   DCHECK_LT(0, jump_table_size);
1242   Vector<uint8_t> code_space = code_allocator_.AllocateForCodeInRegion(
1243       this, jump_table_size, region, allocator_lock);
1244   DCHECK(!code_space.empty());
1245   UpdateCodeSize(jump_table_size, ExecutionTier::kNone, kNoDebugging);
1246   CODE_SPACE_WRITE_SCOPE
1247   ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
1248   std::unique_ptr<WasmCode> code{
1249       new WasmCode{this,                  // native_module
1250                    kAnonymousFuncIndex,   // index
1251                    code_space,            // instructions
1252                    0,                     // stack_slots
1253                    0,                     // tagged_parameter_slots
1254                    0,                     // safepoint_table_offset
1255                    jump_table_size,       // handler_table_offset
1256                    jump_table_size,       // constant_pool_offset
1257                    jump_table_size,       // code_comments_offset
1258                    jump_table_size,       // unpadded_binary_size
1259                    {},                    // protected_instructions
1260                    {},                    // reloc_info
1261                    {},                    // source_pos
1262                    WasmCode::kJumpTable,  // kind
1263                    ExecutionTier::kNone,  // tier
1264                    kNoDebugging}};        // for_debugging
1265   return PublishCode(std::move(code));
1266 }
1267 
UpdateCodeSize(size_t size,ExecutionTier tier,ForDebugging for_debugging)1268 void NativeModule::UpdateCodeSize(size_t size, ExecutionTier tier,
1269                                   ForDebugging for_debugging) {
1270   if (for_debugging != kNoDebugging) return;
1271   // Count jump tables (ExecutionTier::kNone) for both Liftoff and TurboFan as
1272   // this is shared code.
1273   if (tier != ExecutionTier::kTurbofan) liftoff_code_size_.fetch_add(size);
1274   if (tier != ExecutionTier::kLiftoff) turbofan_code_size_.fetch_add(size);
1275 }
1276 
PatchJumpTablesLocked(uint32_t slot_index,Address target)1277 void NativeModule::PatchJumpTablesLocked(uint32_t slot_index, Address target) {
1278   // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
1279   DCHECK(!allocation_mutex_.TryLock());
1280 
1281   CODE_SPACE_WRITE_SCOPE
1282   for (auto& code_space_data : code_space_data_) {
1283     DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
1284     if (!code_space_data.jump_table) continue;
1285     PatchJumpTableLocked(code_space_data, slot_index, target);
1286   }
1287 }
1288 
PatchJumpTableLocked(const CodeSpaceData & code_space_data,uint32_t slot_index,Address target)1289 void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
1290                                         uint32_t slot_index, Address target) {
1291   // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
1292   DCHECK(!allocation_mutex_.TryLock());
1293 
1294   DCHECK_NOT_NULL(code_space_data.jump_table);
1295   DCHECK_NOT_NULL(code_space_data.far_jump_table);
1296 
1297   DCHECK_LT(slot_index, module_->num_declared_functions);
1298   Address jump_table_slot =
1299       code_space_data.jump_table->instruction_start() +
1300       JumpTableAssembler::JumpSlotIndexToOffset(slot_index);
1301   uint32_t far_jump_table_offset = JumpTableAssembler::FarJumpSlotIndexToOffset(
1302       WasmCode::kRuntimeStubCount + slot_index);
1303   // Only pass the far jump table start if the far jump table actually has a
1304   // slot for this function index (i.e. does not only contain runtime stubs).
1305   bool has_far_jump_slot =
1306       far_jump_table_offset <
1307       code_space_data.far_jump_table->instructions().size();
1308   Address far_jump_table_start =
1309       code_space_data.far_jump_table->instruction_start();
1310   Address far_jump_table_slot =
1311       has_far_jump_slot ? far_jump_table_start + far_jump_table_offset
1312                         : kNullAddress;
1313   JumpTableAssembler::PatchJumpTableSlot(jump_table_slot, far_jump_table_slot,
1314                                          target);
1315 }
1316 
AddCodeSpace(base::AddressRegion region,const WasmCodeAllocator::OptionalLock & allocator_lock)1317 void NativeModule::AddCodeSpace(
1318     base::AddressRegion region,
1319     const WasmCodeAllocator::OptionalLock& allocator_lock) {
1320   // Each code space must be at least twice as large as the overhead per code
1321   // space. Otherwise, we are wasting too much memory.
1322   DCHECK_GE(region.size(),
1323             2 * OverheadPerCodeSpace(module()->num_declared_functions));
1324 
1325 #if defined(V8_OS_WIN64)
1326   // On some platforms, specifically Win64, we need to reserve some pages at
1327   // the beginning of an executable space.
1328   // See src/heap/spaces.cc, MemoryAllocator::InitializeCodePageAllocator() and
1329   // https://cs.chromium.org/chromium/src/components/crash/content/app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
1330   // for details.
1331   if (engine_->code_manager()
1332           ->CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
1333     size_t size = Heap::GetCodeRangeReservedAreaSize();
1334     DCHECK_LT(0, size);
1335     Vector<byte> padding = code_allocator_.AllocateForCodeInRegion(
1336         this, size, region, allocator_lock);
1337     CHECK_EQ(reinterpret_cast<Address>(padding.begin()), region.begin());
1338     win64_unwindinfo::RegisterNonABICompliantCodeRange(
1339         reinterpret_cast<void*>(region.begin()), region.size());
1340   }
1341 #endif  // V8_OS_WIN64
1342 
1343   WasmCodeRefScope code_ref_scope;
1344   CODE_SPACE_WRITE_SCOPE
1345   WasmCode* jump_table = nullptr;
1346   WasmCode* far_jump_table = nullptr;
1347   const uint32_t num_wasm_functions = module_->num_declared_functions;
1348   const bool is_first_code_space = code_space_data_.empty();
1349   // We always need a far jump table, because it contains the runtime stubs.
1350   const bool needs_far_jump_table = !FindJumpTablesForRegion(region).is_valid();
1351   const bool needs_jump_table = num_wasm_functions > 0 && needs_far_jump_table;
1352 
1353   if (needs_jump_table) {
1354     jump_table = CreateEmptyJumpTableInRegion(
1355         JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region,
1356         allocator_lock);
1357     CHECK(region.contains(jump_table->instruction_start()));
1358   }
1359 
1360   if (needs_far_jump_table) {
1361     int num_function_slots = NumWasmFunctionsInFarJumpTable(num_wasm_functions);
1362     far_jump_table = CreateEmptyJumpTableInRegion(
1363         JumpTableAssembler::SizeForNumberOfFarJumpSlots(
1364             WasmCode::kRuntimeStubCount,
1365             NumWasmFunctionsInFarJumpTable(num_function_slots)),
1366         region, allocator_lock);
1367     CHECK(region.contains(far_jump_table->instruction_start()));
1368     EmbeddedData embedded_data = EmbeddedData::FromBlob();
1369 #define RUNTIME_STUB(Name) Builtins::k##Name,
1370 #define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
1371     Builtins::Name stub_names[WasmCode::kRuntimeStubCount] = {
1372         WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
1373 #undef RUNTIME_STUB
1374 #undef RUNTIME_STUB_TRAP
1375     STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
1376     Address builtin_addresses[WasmCode::kRuntimeStubCount];
1377     for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
1378       Builtins::Name builtin = stub_names[i];
1379       builtin_addresses[i] = embedded_data.InstructionStartOfBuiltin(builtin);
1380     }
1381     JumpTableAssembler::GenerateFarJumpTable(
1382         far_jump_table->instruction_start(), builtin_addresses,
1383         WasmCode::kRuntimeStubCount, num_function_slots);
1384   }
1385 
1386   if (is_first_code_space) {
1387     // This can be updated and accessed without locks, since the addition of the
1388     // first code space happens during initialization of the {NativeModule},
1389     // where no concurrent accesses are possible.
1390     main_jump_table_ = jump_table;
1391     main_far_jump_table_ = far_jump_table;
1392   }
1393 
1394   base::MutexGuard guard(&allocation_mutex_);
1395   code_space_data_.push_back(CodeSpaceData{region, jump_table, far_jump_table});
1396 
1397   if (jump_table && !is_first_code_space) {
1398     // Patch the new jump table(s) with existing functions. If this is the first
1399     // code space, there cannot be any functions that have been compiled yet.
1400     const CodeSpaceData& new_code_space_data = code_space_data_.back();
1401     for (uint32_t slot_index = 0; slot_index < num_wasm_functions;
1402          ++slot_index) {
1403       if (code_table_[slot_index]) {
1404         PatchJumpTableLocked(new_code_space_data, slot_index,
1405                              code_table_[slot_index]->instruction_start());
1406       } else if (lazy_compile_table_) {
1407         Address lazy_compile_target =
1408             lazy_compile_table_->instruction_start() +
1409             JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
1410         PatchJumpTableLocked(new_code_space_data, slot_index,
1411                              lazy_compile_target);
1412       }
1413     }
1414   }
1415 }
1416 
1417 namespace {
1418 class NativeModuleWireBytesStorage final : public WireBytesStorage {
1419  public:
NativeModuleWireBytesStorage(std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes)1420   explicit NativeModuleWireBytesStorage(
1421       std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes)
1422       : wire_bytes_(std::move(wire_bytes)) {}
1423 
GetCode(WireBytesRef ref) const1424   Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
1425     return std::atomic_load(&wire_bytes_)
1426         ->as_vector()
1427         .SubVector(ref.offset(), ref.end_offset());
1428   }
1429 
1430  private:
1431   const std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
1432 };
1433 }  // namespace
1434 
SetWireBytes(OwnedVector<const uint8_t> wire_bytes)1435 void NativeModule::SetWireBytes(OwnedVector<const uint8_t> wire_bytes) {
1436   auto shared_wire_bytes =
1437       std::make_shared<OwnedVector<const uint8_t>>(std::move(wire_bytes));
1438   std::atomic_store(&wire_bytes_, shared_wire_bytes);
1439   if (!shared_wire_bytes->empty()) {
1440     compilation_state_->SetWireBytesStorage(
1441         std::make_shared<NativeModuleWireBytesStorage>(
1442             std::move(shared_wire_bytes)));
1443   }
1444 }
1445 
Lookup(Address pc) const1446 WasmCode* NativeModule::Lookup(Address pc) const {
1447   base::MutexGuard lock(&allocation_mutex_);
1448   auto iter = owned_code_.upper_bound(pc);
1449   if (iter == owned_code_.begin()) return nullptr;
1450   --iter;
1451   WasmCode* candidate = iter->second.get();
1452   DCHECK_EQ(candidate->instruction_start(), iter->first);
1453   if (!candidate->contains(pc)) return nullptr;
1454   WasmCodeRefScope::AddRef(candidate);
1455   return candidate;
1456 }
1457 
GetJumpTableOffset(uint32_t func_index) const1458 uint32_t NativeModule::GetJumpTableOffset(uint32_t func_index) const {
1459   uint32_t slot_idx = declared_function_index(module(), func_index);
1460   return JumpTableAssembler::JumpSlotIndexToOffset(slot_idx);
1461 }
1462 
GetCallTargetForFunction(uint32_t func_index) const1463 Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
1464   // Return the jump table slot for that function index.
1465   DCHECK_NOT_NULL(main_jump_table_);
1466   uint32_t slot_offset = GetJumpTableOffset(func_index);
1467   DCHECK_LT(slot_offset, main_jump_table_->instructions().size());
1468   return main_jump_table_->instruction_start() + slot_offset;
1469 }
1470 
FindJumpTablesForRegion(base::AddressRegion code_region) const1471 NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegion(
1472     base::AddressRegion code_region) const {
1473   auto jump_table_usable = [code_region](const WasmCode* jump_table) {
1474     Address table_start = jump_table->instruction_start();
1475     Address table_end = table_start + jump_table->instructions().size();
1476     // Compute the maximum distance from anywhere in the code region to anywhere
1477     // in the jump table, avoiding any underflow.
1478     size_t max_distance = std::max(
1479         code_region.end() > table_start ? code_region.end() - table_start : 0,
1480         table_end > code_region.begin() ? table_end - code_region.begin() : 0);
1481     // We can allow a max_distance that is equal to kMaxCodeSpaceSize, because
1482     // every call or jump will target an address *within* the region, but never
1483     // exactly the end of the region. So all occuring offsets are actually
1484     // smaller than max_distance.
1485     return max_distance <= WasmCodeAllocator::kMaxCodeSpaceSize;
1486   };
1487 
1488   // Fast path: Try to use {main_jump_table_} and {main_far_jump_table_}.
1489   // Access to these fields is possible without locking, since these fields are
1490   // initialized on construction of the {NativeModule}.
1491   if (main_far_jump_table_ && jump_table_usable(main_far_jump_table_) &&
1492       (main_jump_table_ == nullptr || jump_table_usable(main_jump_table_))) {
1493     return {
1494         main_jump_table_ ? main_jump_table_->instruction_start() : kNullAddress,
1495         main_far_jump_table_->instruction_start()};
1496   }
1497 
1498   // Otherwise, take the mutex and look for another suitable jump table.
1499   base::MutexGuard guard(&allocation_mutex_);
1500   for (auto& code_space_data : code_space_data_) {
1501     DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
1502     if (!code_space_data.far_jump_table) continue;
1503     // Only return these jump tables if they are reachable from the whole
1504     // {code_region}.
1505     if (kNeedsFarJumpsBetweenCodeSpaces &&
1506         (!jump_table_usable(code_space_data.far_jump_table) ||
1507          (code_space_data.jump_table &&
1508           !jump_table_usable(code_space_data.jump_table)))) {
1509       continue;
1510     }
1511     return {code_space_data.jump_table
1512                 ? code_space_data.jump_table->instruction_start()
1513                 : kNullAddress,
1514             code_space_data.far_jump_table->instruction_start()};
1515   }
1516   return {};
1517 }
1518 
GetNearCallTargetForFunction(uint32_t func_index,const JumpTablesRef & jump_tables) const1519 Address NativeModule::GetNearCallTargetForFunction(
1520     uint32_t func_index, const JumpTablesRef& jump_tables) const {
1521   DCHECK(jump_tables.is_valid());
1522   uint32_t slot_offset = GetJumpTableOffset(func_index);
1523   return jump_tables.jump_table_start + slot_offset;
1524 }
1525 
GetNearRuntimeStubEntry(WasmCode::RuntimeStubId index,const JumpTablesRef & jump_tables) const1526 Address NativeModule::GetNearRuntimeStubEntry(
1527     WasmCode::RuntimeStubId index, const JumpTablesRef& jump_tables) const {
1528   DCHECK(jump_tables.is_valid());
1529   auto offset = JumpTableAssembler::FarJumpSlotIndexToOffset(index);
1530   return jump_tables.far_jump_table_start + offset;
1531 }
1532 
GetFunctionIndexFromJumpTableSlot(Address slot_address) const1533 uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
1534     Address slot_address) const {
1535   WasmCodeRefScope code_refs;
1536   WasmCode* code = Lookup(slot_address);
1537   DCHECK_NOT_NULL(code);
1538   DCHECK_EQ(WasmCode::kJumpTable, code->kind());
1539   uint32_t slot_offset =
1540       static_cast<uint32_t>(slot_address - code->instruction_start());
1541   uint32_t slot_idx = JumpTableAssembler::SlotOffsetToIndex(slot_offset);
1542   DCHECK_LT(slot_idx, module_->num_declared_functions);
1543   DCHECK_EQ(slot_address,
1544             code->instruction_start() +
1545                 JumpTableAssembler::JumpSlotIndexToOffset(slot_idx));
1546   return module_->num_imported_functions + slot_idx;
1547 }
1548 
GetRuntimeStubId(Address target) const1549 WasmCode::RuntimeStubId NativeModule::GetRuntimeStubId(Address target) const {
1550   base::MutexGuard guard(&allocation_mutex_);
1551 
1552   for (auto& code_space_data : code_space_data_) {
1553     if (code_space_data.far_jump_table != nullptr &&
1554         code_space_data.far_jump_table->contains(target)) {
1555       uint32_t offset = static_cast<uint32_t>(
1556           target - code_space_data.far_jump_table->instruction_start());
1557       uint32_t index = JumpTableAssembler::FarJumpSlotOffsetToIndex(offset);
1558       if (index >= WasmCode::kRuntimeStubCount) continue;
1559       if (JumpTableAssembler::FarJumpSlotIndexToOffset(index) != offset) {
1560         continue;
1561       }
1562       return static_cast<WasmCode::RuntimeStubId>(index);
1563     }
1564   }
1565 
1566   // Invalid address.
1567   return WasmCode::kRuntimeStubCount;
1568 }
1569 
~NativeModule()1570 NativeModule::~NativeModule() {
1571   TRACE_HEAP("Deleting native module: %p\n", this);
1572   // Cancel all background compilation before resetting any field of the
1573   // NativeModule or freeing anything.
1574   compilation_state_->CancelCompilation();
1575   engine_->FreeNativeModule(this);
1576   // Free the import wrapper cache before releasing the {WasmCode} objects in
1577   // {owned_code_}. The destructor of {WasmImportWrapperCache} still needs to
1578   // decrease reference counts on the {WasmCode} objects.
1579   import_wrapper_cache_.reset();
1580 }
1581 
WasmCodeManager(size_t max_committed)1582 WasmCodeManager::WasmCodeManager(size_t max_committed)
1583     : max_committed_code_space_(max_committed),
1584       critical_committed_code_space_(max_committed / 2) {
1585   DCHECK_LE(max_committed, FLAG_wasm_max_code_space * MB);
1586 }
1587 
1588 #if defined(V8_OS_WIN64)
CanRegisterUnwindInfoForNonABICompliantCodeRange() const1589 bool WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange() const {
1590   return win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
1591          FLAG_win64_unwinding_info;
1592 }
1593 #endif  // V8_OS_WIN64
1594 
Commit(base::AddressRegion region)1595 void WasmCodeManager::Commit(base::AddressRegion region) {
1596   // TODO(v8:8462): Remove eager commit once perf supports remapping.
1597   if (V8_UNLIKELY(FLAG_perf_prof)) return;
1598   DCHECK(IsAligned(region.begin(), CommitPageSize()));
1599   DCHECK(IsAligned(region.size(), CommitPageSize()));
1600   // Reserve the size. Use CAS loop to avoid overflow on
1601   // {total_committed_code_space_}.
1602   size_t old_value = total_committed_code_space_.load();
1603   while (true) {
1604     DCHECK_GE(max_committed_code_space_, old_value);
1605     if (region.size() > max_committed_code_space_ - old_value) {
1606       V8::FatalProcessOutOfMemory(
1607           nullptr,
1608           "WasmCodeManager::Commit: Exceeding maximum wasm code space");
1609       UNREACHABLE();
1610     }
1611     if (total_committed_code_space_.compare_exchange_weak(
1612             old_value, old_value + region.size())) {
1613       break;
1614     }
1615   }
1616   PageAllocator::Permission permission = FLAG_wasm_write_protect_code_memory
1617                                              ? PageAllocator::kReadWrite
1618                                              : PageAllocator::kReadWriteExecute;
1619 
1620   TRACE_HEAP("Setting rw permissions for 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
1621              region.begin(), region.end());
1622 
1623   if (!SetPermissions(GetPlatformPageAllocator(), region.begin(), region.size(),
1624                       permission)) {
1625     // Highly unlikely.
1626     V8::FatalProcessOutOfMemory(
1627         nullptr,
1628         "WasmCodeManager::Commit: Cannot make pre-reserved region writable");
1629     UNREACHABLE();
1630   }
1631 }
1632 
Decommit(base::AddressRegion region)1633 void WasmCodeManager::Decommit(base::AddressRegion region) {
1634   // TODO(v8:8462): Remove this once perf supports remapping.
1635   if (V8_UNLIKELY(FLAG_perf_prof)) return;
1636   PageAllocator* allocator = GetPlatformPageAllocator();
1637   DCHECK(IsAligned(region.begin(), allocator->CommitPageSize()));
1638   DCHECK(IsAligned(region.size(), allocator->CommitPageSize()));
1639   size_t old_committed = total_committed_code_space_.fetch_sub(region.size());
1640   DCHECK_LE(region.size(), old_committed);
1641   USE(old_committed);
1642   TRACE_HEAP("Discarding system pages 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
1643              region.begin(), region.end());
1644   CHECK(allocator->SetPermissions(reinterpret_cast<void*>(region.begin()),
1645                                   region.size(), PageAllocator::kNoAccess));
1646 }
1647 
AssignRange(base::AddressRegion region,NativeModule * native_module)1648 void WasmCodeManager::AssignRange(base::AddressRegion region,
1649                                   NativeModule* native_module) {
1650   base::MutexGuard lock(&native_modules_mutex_);
1651   lookup_map_.insert(std::make_pair(
1652       region.begin(), std::make_pair(region.end(), native_module)));
1653 }
1654 
TryAllocate(size_t size,void * hint)1655 VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
1656   v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
1657   DCHECK_GT(size, 0);
1658   size_t allocate_page_size = page_allocator->AllocatePageSize();
1659   size = RoundUp(size, allocate_page_size);
1660   if (!BackingStore::ReserveAddressSpace(size)) return {};
1661   if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
1662 
1663   // When we start exposing Wasm in jitless mode, then the jitless flag
1664   // will have to determine whether we set kMapAsJittable or not.
1665   DCHECK(!FLAG_jitless);
1666   VirtualMemory mem(page_allocator, size, hint, allocate_page_size,
1667                     VirtualMemory::kMapAsJittable);
1668   if (!mem.IsReserved()) {
1669     BackingStore::ReleaseReservation(size);
1670     return {};
1671   }
1672   TRACE_HEAP("VMem alloc: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n", mem.address(),
1673              mem.end(), mem.size());
1674 
1675   // TODO(v8:8462): Remove eager commit once perf supports remapping.
1676   if (FLAG_perf_prof) {
1677     SetPermissions(GetPlatformPageAllocator(), mem.address(), mem.size(),
1678                    PageAllocator::kReadWriteExecute);
1679   }
1680   return mem;
1681 }
1682 
1683 namespace {
1684 // The numbers here are rough estimates, used to calculate the size of the
1685 // initial code reservation and for estimating the amount of external memory
1686 // reported to the GC.
1687 // They do not need to be accurate. Choosing them too small will result in
1688 // separate code spaces being allocated (compile time and runtime overhead),
1689 // choosing them too large results in over-reservation (virtual address space
1690 // only).
1691 // The current numbers have been determined on 2019-11-11 by clemensb@, based
1692 // on one small and one large module compiled from C++ by Emscripten. If in
1693 // doubt, they where chosen slightly larger than required, as over-reservation
1694 // is not a big issue currently.
1695 // Numbers will change when Liftoff or TurboFan evolve, other toolchains are
1696 // used to produce the wasm code, or characteristics of wasm modules on the
1697 // web change. They might require occasional tuning.
1698 // This patch might help to find reasonable numbers for any future adaptation:
1699 // https://crrev.com/c/1910945
1700 #if V8_TARGET_ARCH_X64
1701 constexpr size_t kTurbofanFunctionOverhead = 20;
1702 constexpr size_t kTurbofanCodeSizeMultiplier = 3;
1703 constexpr size_t kLiftoffFunctionOverhead = 60;
1704 constexpr size_t kLiftoffCodeSizeMultiplier = 4;
1705 constexpr size_t kImportSize = 350;
1706 #elif V8_TARGET_ARCH_IA32
1707 constexpr size_t kTurbofanFunctionOverhead = 20;
1708 constexpr size_t kTurbofanCodeSizeMultiplier = 4;
1709 constexpr size_t kLiftoffFunctionOverhead = 60;
1710 constexpr size_t kLiftoffCodeSizeMultiplier = 5;
1711 constexpr size_t kImportSize = 480;
1712 #elif V8_TARGET_ARCH_ARM
1713 constexpr size_t kTurbofanFunctionOverhead = 40;
1714 constexpr size_t kTurbofanCodeSizeMultiplier = 4;
1715 constexpr size_t kLiftoffFunctionOverhead = 108;
1716 constexpr size_t kLiftoffCodeSizeMultiplier = 7;
1717 constexpr size_t kImportSize = 750;
1718 #elif V8_TARGET_ARCH_ARM64
1719 constexpr size_t kTurbofanFunctionOverhead = 60;
1720 constexpr size_t kTurbofanCodeSizeMultiplier = 4;
1721 constexpr size_t kLiftoffFunctionOverhead = 80;
1722 constexpr size_t kLiftoffCodeSizeMultiplier = 7;
1723 constexpr size_t kImportSize = 750;
1724 #else
1725 // Other platforms should add their own estimates if needed. Numbers below are
1726 // the minimum of other architectures.
1727 constexpr size_t kTurbofanFunctionOverhead = 20;
1728 constexpr size_t kTurbofanCodeSizeMultiplier = 3;
1729 constexpr size_t kLiftoffFunctionOverhead = 60;
1730 constexpr size_t kLiftoffCodeSizeMultiplier = 4;
1731 constexpr size_t kImportSize = 350;
1732 #endif
1733 }  // namespace
1734 
1735 // static
EstimateLiftoffCodeSize(int body_size)1736 size_t WasmCodeManager::EstimateLiftoffCodeSize(int body_size) {
1737   return kLiftoffFunctionOverhead + kCodeAlignment / 2 +
1738          body_size * kLiftoffCodeSizeMultiplier;
1739 }
1740 
1741 // static
EstimateNativeModuleCodeSize(const WasmModule * module,bool include_liftoff)1742 size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module,
1743                                                      bool include_liftoff) {
1744   int num_functions = static_cast<int>(module->num_declared_functions);
1745   int num_imported_functions = static_cast<int>(module->num_imported_functions);
1746   int code_section_length = 0;
1747   if (num_functions > 0) {
1748     DCHECK_EQ(module->functions.size(), num_imported_functions + num_functions);
1749     auto* first_fn = &module->functions[module->num_imported_functions];
1750     auto* last_fn = &module->functions.back();
1751     code_section_length =
1752         static_cast<int>(last_fn->code.end_offset() - first_fn->code.offset());
1753   }
1754   return EstimateNativeModuleCodeSize(num_functions, num_imported_functions,
1755                                       code_section_length, include_liftoff);
1756 }
1757 
1758 // static
EstimateNativeModuleCodeSize(int num_functions,int num_imported_functions,int code_section_length,bool include_liftoff)1759 size_t WasmCodeManager::EstimateNativeModuleCodeSize(int num_functions,
1760                                                      int num_imported_functions,
1761                                                      int code_section_length,
1762                                                      bool include_liftoff) {
1763   const size_t overhead_per_function =
1764       kTurbofanFunctionOverhead + kCodeAlignment / 2 +
1765       (include_liftoff ? kLiftoffFunctionOverhead + kCodeAlignment / 2 : 0);
1766   const size_t overhead_per_code_byte =
1767       kTurbofanCodeSizeMultiplier +
1768       (include_liftoff ? kLiftoffCodeSizeMultiplier : 0);
1769   const size_t jump_table_size = RoundUp<kCodeAlignment>(
1770       JumpTableAssembler::SizeForNumberOfSlots(num_functions));
1771   const size_t far_jump_table_size =
1772       RoundUp<kCodeAlignment>(JumpTableAssembler::SizeForNumberOfFarJumpSlots(
1773           WasmCode::kRuntimeStubCount,
1774           NumWasmFunctionsInFarJumpTable(num_functions)));
1775   return jump_table_size                                 // jump table
1776          + far_jump_table_size                           // far jump table
1777          + overhead_per_function * num_functions         // per function
1778          + overhead_per_code_byte * code_section_length  // per code byte
1779          + kImportSize * num_imported_functions;         // per import
1780 }
1781 
1782 // static
EstimateNativeModuleMetaDataSize(const WasmModule * module)1783 size_t WasmCodeManager::EstimateNativeModuleMetaDataSize(
1784     const WasmModule* module) {
1785   size_t wasm_module_estimate = EstimateStoredSize(module);
1786 
1787   uint32_t num_wasm_functions = module->num_declared_functions;
1788 
1789   // TODO(wasm): Include wire bytes size.
1790   size_t native_module_estimate =
1791       sizeof(NativeModule) +                     /* NativeModule struct */
1792       (sizeof(WasmCode*) * num_wasm_functions) + /* code table size */
1793       (sizeof(WasmCode) * num_wasm_functions);   /* code object size */
1794 
1795   return wasm_module_estimate + native_module_estimate;
1796 }
1797 
NewNativeModule(WasmEngine * engine,Isolate * isolate,const WasmFeatures & enabled,size_t code_size_estimate,std::shared_ptr<const WasmModule> module)1798 std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
1799     WasmEngine* engine, Isolate* isolate, const WasmFeatures& enabled,
1800     size_t code_size_estimate, std::shared_ptr<const WasmModule> module) {
1801   DCHECK_EQ(this, isolate->wasm_engine()->code_manager());
1802   if (total_committed_code_space_.load() >
1803       critical_committed_code_space_.load()) {
1804     (reinterpret_cast<v8::Isolate*>(isolate))
1805         ->MemoryPressureNotification(MemoryPressureLevel::kCritical);
1806     size_t committed = total_committed_code_space_.load();
1807     DCHECK_GE(max_committed_code_space_, committed);
1808     critical_committed_code_space_.store(
1809         committed + (max_committed_code_space_ - committed) / 2);
1810   }
1811 
1812   // If we cannot add code space later, reserve enough address space up front.
1813   size_t code_vmem_size =
1814       ReservationSize(code_size_estimate, module->num_declared_functions, 0);
1815 
1816   // The '--wasm-max-code-space-reservation' testing flag can be used to reduce
1817   // the maximum size of the initial code space reservation (in MB).
1818   if (FLAG_wasm_max_initial_code_space_reservation > 0) {
1819     size_t flag_max_bytes =
1820         static_cast<size_t>(FLAG_wasm_max_initial_code_space_reservation) * MB;
1821     if (flag_max_bytes < code_vmem_size) code_vmem_size = flag_max_bytes;
1822   }
1823 
1824   // Try up to two times; getting rid of dead JSArrayBuffer allocations might
1825   // require two GCs because the first GC maybe incremental and may have
1826   // floating garbage.
1827   static constexpr int kAllocationRetries = 2;
1828   VirtualMemory code_space;
1829   for (int retries = 0;; ++retries) {
1830     code_space = TryAllocate(code_vmem_size);
1831     if (code_space.IsReserved()) break;
1832     if (retries == kAllocationRetries) {
1833       V8::FatalProcessOutOfMemory(isolate, "NewNativeModule");
1834       UNREACHABLE();
1835     }
1836     // Run one GC, then try the allocation again.
1837     isolate->heap()->MemoryPressureNotification(MemoryPressureLevel::kCritical,
1838                                                 true);
1839   }
1840 
1841   Address start = code_space.address();
1842   size_t size = code_space.size();
1843   Address end = code_space.end();
1844   std::shared_ptr<NativeModule> ret;
1845   new NativeModule(engine, enabled, std::move(code_space), std::move(module),
1846                    isolate->async_counters(), &ret);
1847   // The constructor initialized the shared_ptr.
1848   DCHECK_NOT_NULL(ret);
1849   TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", ret.get(), start,
1850              size);
1851 
1852   base::MutexGuard lock(&native_modules_mutex_);
1853   lookup_map_.insert(std::make_pair(start, std::make_pair(end, ret.get())));
1854   return ret;
1855 }
1856 
SampleCodeSize(Counters * counters,NativeModule::CodeSamplingTime sampling_time) const1857 void NativeModule::SampleCodeSize(
1858     Counters* counters, NativeModule::CodeSamplingTime sampling_time) const {
1859   size_t code_size = sampling_time == kSampling
1860                          ? code_allocator_.committed_code_space()
1861                          : code_allocator_.generated_code_size();
1862   int code_size_mb = static_cast<int>(code_size / MB);
1863   Histogram* histogram = nullptr;
1864   switch (sampling_time) {
1865     case kAfterBaseline:
1866       histogram = counters->wasm_module_code_size_mb_after_baseline();
1867       break;
1868     case kAfterTopTier:
1869       histogram = counters->wasm_module_code_size_mb_after_top_tier();
1870       break;
1871     case kSampling: {
1872       histogram = counters->wasm_module_code_size_mb();
1873       // If this is a wasm module of >= 2MB, also sample the freed code size,
1874       // absolute and relative. Code GC does not happen on asm.js modules, and
1875       // small modules will never trigger GC anyway.
1876       size_t generated_size = code_allocator_.generated_code_size();
1877       if (generated_size >= 2 * MB && module()->origin == kWasmOrigin) {
1878         size_t freed_size = code_allocator_.freed_code_size();
1879         DCHECK_LE(freed_size, generated_size);
1880         int freed_percent = static_cast<int>(100 * freed_size / generated_size);
1881         counters->wasm_module_freed_code_size_percent()->AddSample(
1882             freed_percent);
1883       }
1884       break;
1885     }
1886   }
1887   histogram->AddSample(code_size_mb);
1888 }
1889 
AddCompiledCode(WasmCompilationResult result)1890 std::unique_ptr<WasmCode> NativeModule::AddCompiledCode(
1891     WasmCompilationResult result) {
1892   std::vector<std::unique_ptr<WasmCode>> code = AddCompiledCode({&result, 1});
1893   return std::move(code[0]);
1894 }
1895 
AddCompiledCode(Vector<WasmCompilationResult> results)1896 std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
1897     Vector<WasmCompilationResult> results) {
1898   TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
1899                "wasm.AddCompiledCode", "num", results.size());
1900   DCHECK(!results.empty());
1901   // First, allocate code space for all the results.
1902   size_t total_code_space = 0;
1903   for (auto& result : results) {
1904     DCHECK(result.succeeded());
1905     total_code_space += RoundUp<kCodeAlignment>(result.code_desc.instr_size);
1906   }
1907   Vector<byte> code_space =
1908       code_allocator_.AllocateForCode(this, total_code_space);
1909   // Lookup the jump tables to use once, then use for all code objects.
1910   auto jump_tables = FindJumpTablesForRegion(base::AddressRegionOf(code_space));
1911 
1912   std::vector<std::unique_ptr<WasmCode>> generated_code;
1913   generated_code.reserve(results.size());
1914 
1915   // Now copy the generated code into the code space and relocate it.
1916   CODE_SPACE_WRITE_SCOPE
1917   for (auto& result : results) {
1918     DCHECK_EQ(result.code_desc.buffer, result.instr_buffer.get());
1919     size_t code_size = RoundUp<kCodeAlignment>(result.code_desc.instr_size);
1920     Vector<byte> this_code_space = code_space.SubVector(0, code_size);
1921     code_space += code_size;
1922     generated_code.emplace_back(AddCodeWithCodeSpace(
1923         result.func_index, result.code_desc, result.frame_slot_count,
1924         result.tagged_parameter_slots,
1925         result.protected_instructions_data.as_vector(),
1926         result.source_positions.as_vector(), GetCodeKind(result),
1927         result.result_tier, result.for_debugging, this_code_space,
1928         jump_tables));
1929   }
1930   DCHECK_EQ(0, code_space.size());
1931 
1932   return generated_code;
1933 }
1934 
SetTieringState(TieringState new_tiering_state)1935 void NativeModule::SetTieringState(TieringState new_tiering_state) {
1936   // Do not tier down asm.js (just never change the tiering state).
1937   if (module()->origin != kWasmOrigin) return;
1938 
1939   base::MutexGuard lock(&allocation_mutex_);
1940   tiering_state_ = new_tiering_state;
1941 }
1942 
IsTieredDown()1943 bool NativeModule::IsTieredDown() {
1944   base::MutexGuard lock(&allocation_mutex_);
1945   return tiering_state_ == kTieredDown;
1946 }
1947 
RecompileForTiering()1948 void NativeModule::RecompileForTiering() {
1949   // Read the tiering state under the lock, then trigger recompilation after
1950   // releasing the lock. If the tiering state was changed when the triggered
1951   // compilation units finish, code installation will handle that correctly.
1952   TieringState current_state;
1953   {
1954     base::MutexGuard lock(&allocation_mutex_);
1955     current_state = tiering_state_;
1956   }
1957   RecompileNativeModule(this, current_state);
1958 }
1959 
FindFunctionsToRecompile(TieringState new_tiering_state)1960 std::vector<int> NativeModule::FindFunctionsToRecompile(
1961     TieringState new_tiering_state) {
1962   base::MutexGuard guard(&allocation_mutex_);
1963   std::vector<int> function_indexes;
1964   int imported = module()->num_imported_functions;
1965   int declared = module()->num_declared_functions;
1966   for (int slot_index = 0; slot_index < declared; ++slot_index) {
1967     int function_index = imported + slot_index;
1968     WasmCode* code = code_table_[slot_index];
1969     bool code_is_good = new_tiering_state == kTieredDown
1970                             ? code && code->for_debugging()
1971                             : code && code->tier() == ExecutionTier::kTurbofan;
1972     if (!code_is_good) function_indexes.push_back(function_index);
1973   }
1974   return function_indexes;
1975 }
1976 
FreeCode(Vector<WasmCode * const> codes)1977 void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
1978   // Free the code space.
1979   code_allocator_.FreeCode(codes);
1980 
1981   DebugInfo* debug_info = nullptr;
1982   {
1983     base::MutexGuard guard(&allocation_mutex_);
1984     debug_info = debug_info_.get();
1985     // Free the {WasmCode} objects. This will also unregister trap handler data.
1986     for (WasmCode* code : codes) {
1987       DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
1988       owned_code_.erase(code->instruction_start());
1989     }
1990   }
1991   // Remove debug side tables for all removed code objects, after releasing our
1992   // lock. This is to avoid lock order inversion.
1993   if (debug_info) debug_info->RemoveDebugSideTables(codes);
1994 }
1995 
GetNumberOfCodeSpacesForTesting() const1996 size_t NativeModule::GetNumberOfCodeSpacesForTesting() const {
1997   return code_allocator_.GetNumCodeSpaces();
1998 }
1999 
HasDebugInfo() const2000 bool NativeModule::HasDebugInfo() const {
2001   base::MutexGuard guard(&allocation_mutex_);
2002   return debug_info_ != nullptr;
2003 }
2004 
GetDebugInfo()2005 DebugInfo* NativeModule::GetDebugInfo() {
2006   base::MutexGuard guard(&allocation_mutex_);
2007   if (!debug_info_) debug_info_ = std::make_unique<DebugInfo>(this);
2008   return debug_info_.get();
2009 }
2010 
FreeNativeModule(Vector<VirtualMemory> owned_code_space,size_t committed_size)2011 void WasmCodeManager::FreeNativeModule(Vector<VirtualMemory> owned_code_space,
2012                                        size_t committed_size) {
2013   base::MutexGuard lock(&native_modules_mutex_);
2014   for (auto& code_space : owned_code_space) {
2015     DCHECK(code_space.IsReserved());
2016     TRACE_HEAP("VMem Release: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n",
2017                code_space.address(), code_space.end(), code_space.size());
2018 
2019 #if defined(V8_OS_WIN64)
2020     if (CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
2021       win64_unwindinfo::UnregisterNonABICompliantCodeRange(
2022           reinterpret_cast<void*>(code_space.address()));
2023     }
2024 #endif  // V8_OS_WIN64
2025 
2026     lookup_map_.erase(code_space.address());
2027     BackingStore::ReleaseReservation(code_space.size());
2028     code_space.Free();
2029     DCHECK(!code_space.IsReserved());
2030   }
2031 
2032   DCHECK(IsAligned(committed_size, CommitPageSize()));
2033   // TODO(v8:8462): Remove this once perf supports remapping.
2034   if (!FLAG_perf_prof) {
2035     size_t old_committed =
2036         total_committed_code_space_.fetch_sub(committed_size);
2037     DCHECK_LE(committed_size, old_committed);
2038     USE(old_committed);
2039   }
2040 }
2041 
LookupNativeModule(Address pc) const2042 NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
2043   base::MutexGuard lock(&native_modules_mutex_);
2044   if (lookup_map_.empty()) return nullptr;
2045 
2046   auto iter = lookup_map_.upper_bound(pc);
2047   if (iter == lookup_map_.begin()) return nullptr;
2048   --iter;
2049   Address region_start = iter->first;
2050   Address region_end = iter->second.first;
2051   NativeModule* candidate = iter->second.second;
2052 
2053   DCHECK_NOT_NULL(candidate);
2054   return region_start <= pc && pc < region_end ? candidate : nullptr;
2055 }
2056 
LookupCode(Address pc) const2057 WasmCode* WasmCodeManager::LookupCode(Address pc) const {
2058   NativeModule* candidate = LookupNativeModule(pc);
2059   return candidate ? candidate->Lookup(pc) : nullptr;
2060 }
2061 
2062 // TODO(v8:7424): Code protection scopes are not yet supported with shared code
2063 // enabled and need to be revisited.
NativeModuleModificationScope(NativeModule * native_module)2064 NativeModuleModificationScope::NativeModuleModificationScope(
2065     NativeModule* native_module)
2066     : native_module_(native_module) {
2067   if (FLAG_wasm_write_protect_code_memory && native_module_ &&
2068       (native_module_->modification_scope_depth_++) == 0) {
2069     bool success = native_module_->SetExecutable(false);
2070     CHECK(success);
2071   }
2072 }
2073 
~NativeModuleModificationScope()2074 NativeModuleModificationScope::~NativeModuleModificationScope() {
2075   if (FLAG_wasm_write_protect_code_memory && native_module_ &&
2076       (native_module_->modification_scope_depth_--) == 1) {
2077     bool success = native_module_->SetExecutable(true);
2078     CHECK(success);
2079   }
2080 }
2081 
2082 namespace {
2083 thread_local WasmCodeRefScope* current_code_refs_scope = nullptr;
2084 }  // namespace
2085 
WasmCodeRefScope()2086 WasmCodeRefScope::WasmCodeRefScope()
2087     : previous_scope_(current_code_refs_scope) {
2088   current_code_refs_scope = this;
2089 }
2090 
~WasmCodeRefScope()2091 WasmCodeRefScope::~WasmCodeRefScope() {
2092   DCHECK_EQ(this, current_code_refs_scope);
2093   current_code_refs_scope = previous_scope_;
2094   std::vector<WasmCode*> code_ptrs;
2095   code_ptrs.reserve(code_ptrs_.size());
2096   code_ptrs.assign(code_ptrs_.begin(), code_ptrs_.end());
2097   WasmCode::DecrementRefCount(VectorOf(code_ptrs));
2098 }
2099 
2100 // static
AddRef(WasmCode * code)2101 void WasmCodeRefScope::AddRef(WasmCode* code) {
2102   DCHECK_NOT_NULL(code);
2103   WasmCodeRefScope* current_scope = current_code_refs_scope;
2104   DCHECK_NOT_NULL(current_scope);
2105   auto entry = current_scope->code_ptrs_.insert(code);
2106   // If we added a new entry, increment the ref counter.
2107   if (entry.second) code->IncRef();
2108 }
2109 
GetRuntimeStubName(WasmCode::RuntimeStubId stub_id)2110 const char* GetRuntimeStubName(WasmCode::RuntimeStubId stub_id) {
2111 #define RUNTIME_STUB_NAME(Name) #Name,
2112 #define RUNTIME_STUB_NAME_TRAP(Name) "ThrowWasm" #Name,
2113   constexpr const char* runtime_stub_names[] = {WASM_RUNTIME_STUB_LIST(
2114       RUNTIME_STUB_NAME, RUNTIME_STUB_NAME_TRAP) "<unknown>"};
2115 #undef RUNTIME_STUB_NAME
2116 #undef RUNTIME_STUB_NAME_TRAP
2117   STATIC_ASSERT(arraysize(runtime_stub_names) ==
2118                 WasmCode::kRuntimeStubCount + 1);
2119 
2120   DCHECK_GT(arraysize(runtime_stub_names), stub_id);
2121   return runtime_stub_names[stub_id];
2122 }
2123 
2124 }  // namespace wasm
2125 }  // namespace internal
2126 }  // namespace v8
2127 #undef TRACE_HEAP
2128