1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/wasm/wasm-code-manager.h"
6
7 #include <algorithm>
8 #include <iomanip>
9 #include <numeric>
10
11 #include "src/base/atomicops.h"
12 #include "src/base/build_config.h"
13 #include "src/base/iterator.h"
14 #include "src/base/macros.h"
15 #include "src/base/platform/platform.h"
16 #include "src/base/small-vector.h"
17 #include "src/base/vector.h"
18 #include "src/codegen/assembler-inl.h"
19 #include "src/codegen/macro-assembler-inl.h"
20 #include "src/codegen/macro-assembler.h"
21 #include "src/common/globals.h"
22 #include "src/diagnostics/disassembler.h"
23 #include "src/logging/counters.h"
24 #include "src/logging/log.h"
25 #include "src/objects/objects-inl.h"
26 #include "src/snapshot/embedded/embedded-data-inl.h"
27 #include "src/utils/ostreams.h"
28 #include "src/wasm/code-space-access.h"
29 #include "src/wasm/compilation-environment.h"
30 #include "src/wasm/function-compiler.h"
31 #include "src/wasm/jump-table-assembler.h"
32 #include "src/wasm/memory-protection-key.h"
33 #include "src/wasm/module-compiler.h"
34 #include "src/wasm/wasm-debug.h"
35 #include "src/wasm/wasm-engine.h"
36 #include "src/wasm/wasm-import-wrapper-cache.h"
37 #include "src/wasm/wasm-module-sourcemap.h"
38 #include "src/wasm/wasm-module.h"
39 #include "src/wasm/wasm-objects-inl.h"
40 #include "src/wasm/wasm-objects.h"
41
42 #if defined(V8_OS_WIN64)
43 #include "src/base/platform/wrappers.h"
44 #include "src/diagnostics/unwinding-info-win64.h"
45 #endif // V8_OS_WIN64
46
47 #define TRACE_HEAP(...) \
48 do { \
49 if (FLAG_trace_wasm_native_heap) PrintF(__VA_ARGS__); \
50 } while (false)
51
52 namespace v8 {
53 namespace internal {
54 namespace wasm {
55
56 using trap_handler::ProtectedInstructionData;
57
Merge(base::AddressRegion new_region)58 base::AddressRegion DisjointAllocationPool::Merge(
59 base::AddressRegion new_region) {
60 // Find the possible insertion position by identifying the first region whose
61 // start address is not less than that of {new_region}. Since there cannot be
62 // any overlap between regions, this also means that the start of {above} is
63 // bigger or equal than the *end* of {new_region}.
64 auto above = regions_.lower_bound(new_region);
65 DCHECK(above == regions_.end() || above->begin() >= new_region.end());
66
67 // Check whether to merge with {above}.
68 if (above != regions_.end() && new_region.end() == above->begin()) {
69 base::AddressRegion merged_region{new_region.begin(),
70 new_region.size() + above->size()};
71 DCHECK_EQ(merged_region.end(), above->end());
72 // Check whether to also merge with the region below.
73 if (above != regions_.begin()) {
74 auto below = above;
75 --below;
76 if (below->end() == new_region.begin()) {
77 merged_region = {below->begin(), below->size() + merged_region.size()};
78 regions_.erase(below);
79 }
80 }
81 auto insert_pos = regions_.erase(above);
82 regions_.insert(insert_pos, merged_region);
83 return merged_region;
84 }
85
86 // No element below, and not adjavent to {above}: insert and done.
87 if (above == regions_.begin()) {
88 regions_.insert(above, new_region);
89 return new_region;
90 }
91
92 auto below = above;
93 --below;
94 // Consistency check:
95 DCHECK(above == regions_.end() || below->end() < above->begin());
96
97 // Adjacent to {below}: merge and done.
98 if (below->end() == new_region.begin()) {
99 base::AddressRegion merged_region{below->begin(),
100 below->size() + new_region.size()};
101 DCHECK_EQ(merged_region.end(), new_region.end());
102 regions_.erase(below);
103 regions_.insert(above, merged_region);
104 return merged_region;
105 }
106
107 // Not adjacent to any existing region: insert between {below} and {above}.
108 DCHECK_LT(below->end(), new_region.begin());
109 regions_.insert(above, new_region);
110 return new_region;
111 }
112
Allocate(size_t size)113 base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {
114 return AllocateInRegion(size,
115 {kNullAddress, std::numeric_limits<size_t>::max()});
116 }
117
AllocateInRegion(size_t size,base::AddressRegion region)118 base::AddressRegion DisjointAllocationPool::AllocateInRegion(
119 size_t size, base::AddressRegion region) {
120 // Get an iterator to the first contained region whose start address is not
121 // smaller than the start address of {region}. Start the search from the
122 // region one before that (the last one whose start address is smaller).
123 auto it = regions_.lower_bound(region);
124 if (it != regions_.begin()) --it;
125
126 for (auto end = regions_.end(); it != end; ++it) {
127 base::AddressRegion overlap = it->GetOverlap(region);
128 if (size > overlap.size()) continue;
129 base::AddressRegion ret{overlap.begin(), size};
130 base::AddressRegion old = *it;
131 auto insert_pos = regions_.erase(it);
132 if (size == old.size()) {
133 // We use the full region --> nothing to add back.
134 } else if (ret.begin() == old.begin()) {
135 // We return a region at the start --> shrink old region from front.
136 regions_.insert(insert_pos, {old.begin() + size, old.size() - size});
137 } else if (ret.end() == old.end()) {
138 // We return a region at the end --> shrink remaining region.
139 regions_.insert(insert_pos, {old.begin(), old.size() - size});
140 } else {
141 // We return something in the middle --> split the remaining region
142 // (insert the region with smaller address first).
143 regions_.insert(insert_pos, {old.begin(), ret.begin() - old.begin()});
144 regions_.insert(insert_pos, {ret.end(), old.end() - ret.end()});
145 }
146 return ret;
147 }
148 return {};
149 }
150
constant_pool() const151 Address WasmCode::constant_pool() const {
152 if (FLAG_enable_embedded_constant_pool) {
153 if (constant_pool_offset_ < code_comments_offset_) {
154 return instruction_start() + constant_pool_offset_;
155 }
156 }
157 return kNullAddress;
158 }
159
handler_table() const160 Address WasmCode::handler_table() const {
161 return instruction_start() + handler_table_offset_;
162 }
163
handler_table_size() const164 int WasmCode::handler_table_size() const {
165 DCHECK_GE(constant_pool_offset_, handler_table_offset_);
166 return static_cast<int>(constant_pool_offset_ - handler_table_offset_);
167 }
168
code_comments() const169 Address WasmCode::code_comments() const {
170 return instruction_start() + code_comments_offset_;
171 }
172
code_comments_size() const173 int WasmCode::code_comments_size() const {
174 DCHECK_GE(unpadded_binary_size_, code_comments_offset_);
175 return static_cast<int>(unpadded_binary_size_ - code_comments_offset_);
176 }
177
ConcatenateBytes(std::initializer_list<base::Vector<const byte>> vectors)178 std::unique_ptr<const byte[]> WasmCode::ConcatenateBytes(
179 std::initializer_list<base::Vector<const byte>> vectors) {
180 size_t total_size = 0;
181 for (auto& vec : vectors) total_size += vec.size();
182 // Use default-initialization (== no initialization).
183 std::unique_ptr<byte[]> result{new byte[total_size]};
184 byte* ptr = result.get();
185 for (auto& vec : vectors) {
186 if (vec.empty()) continue; // Avoid nullptr in {memcpy}.
187 memcpy(ptr, vec.begin(), vec.size());
188 ptr += vec.size();
189 }
190 return result;
191 }
192
RegisterTrapHandlerData()193 void WasmCode::RegisterTrapHandlerData() {
194 DCHECK(!has_trap_handler_index());
195 if (kind() != WasmCode::kWasmFunction) return;
196 if (protected_instructions_size_ == 0) return;
197
198 Address base = instruction_start();
199
200 size_t size = instructions().size();
201 auto protected_instruction_data = this->protected_instructions();
202 const int index =
203 RegisterHandlerData(base, size, protected_instruction_data.size(),
204 protected_instruction_data.begin());
205
206 // TODO(eholk): if index is negative, fail.
207 CHECK_LE(0, index);
208 set_trap_handler_index(index);
209 DCHECK(has_trap_handler_index());
210 }
211
ShouldBeLogged(Isolate * isolate)212 bool WasmCode::ShouldBeLogged(Isolate* isolate) {
213 // The return value is cached in {WasmEngine::IsolateData::log_codes}. Ensure
214 // to call {WasmEngine::EnableCodeLogging} if this return value would change
215 // for any isolate. Otherwise we might lose code events.
216 return isolate->logger()->is_listening_to_code_events() ||
217 isolate->code_event_dispatcher()->IsListeningToCodeEvents() ||
218 isolate->is_profiling();
219 }
220
DebugName() const221 std::string WasmCode::DebugName() const {
222 if (IsAnonymous()) {
223 return "anonymous function";
224 }
225
226 ModuleWireBytes wire_bytes(native_module()->wire_bytes());
227 const WasmModule* module = native_module()->module();
228 WireBytesRef name_ref =
229 module->lazily_generated_names.LookupFunctionName(wire_bytes, index());
230 WasmName name = wire_bytes.GetNameOrNull(name_ref);
231 std::string name_buffer;
232 if (kind() == kWasmToJsWrapper) {
233 name_buffer = "wasm-to-js:";
234 size_t prefix_len = name_buffer.size();
235 constexpr size_t kMaxSigLength = 128;
236 name_buffer.resize(prefix_len + kMaxSigLength);
237 const FunctionSig* sig = module->functions[index()].sig;
238 size_t sig_length = PrintSignature(
239 base::VectorOf(&name_buffer[prefix_len], kMaxSigLength), sig);
240 name_buffer.resize(prefix_len + sig_length);
241 // If the import has a name, also append that (separated by "-").
242 if (!name.empty()) {
243 name_buffer += '-';
244 name_buffer.append(name.begin(), name.size());
245 }
246 } else if (name.empty()) {
247 name_buffer.resize(32);
248 name_buffer.resize(
249 SNPrintF(base::VectorOf(&name_buffer.front(), name_buffer.size()),
250 "wasm-function[%d]", index()));
251 } else {
252 name_buffer.append(name.begin(), name.end());
253 }
254 return name_buffer;
255 }
256
LogCode(Isolate * isolate,const char * source_url,int script_id) const257 void WasmCode::LogCode(Isolate* isolate, const char* source_url,
258 int script_id) const {
259 DCHECK(ShouldBeLogged(isolate));
260 if (IsAnonymous()) return;
261
262 ModuleWireBytes wire_bytes(native_module_->wire_bytes());
263 const WasmModule* module = native_module_->module();
264 std::string fn_name = DebugName();
265 WasmName name = base::VectorOf(fn_name);
266
267 const WasmDebugSymbols& debug_symbols = module->debug_symbols;
268 auto load_wasm_source_map = isolate->wasm_load_source_map_callback();
269 auto source_map = native_module_->GetWasmSourceMap();
270 if (!source_map && debug_symbols.type == WasmDebugSymbols::Type::SourceMap &&
271 !debug_symbols.external_url.is_empty() && load_wasm_source_map) {
272 WasmName external_url =
273 wire_bytes.GetNameOrNull(debug_symbols.external_url);
274 std::string external_url_string(external_url.data(), external_url.size());
275 HandleScope scope(isolate);
276 v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
277 Local<v8::String> source_map_str =
278 load_wasm_source_map(v8_isolate, external_url_string.c_str());
279 native_module_->SetWasmSourceMap(
280 std::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str));
281 }
282
283 // Record source positions before adding code, otherwise when code is added,
284 // there are no source positions to associate with the added code.
285 if (!source_positions().empty()) {
286 LOG_CODE_EVENT(isolate, WasmCodeLinePosInfoRecordEvent(instruction_start(),
287 source_positions()));
288 }
289
290 int code_offset = module->functions[index_].code.offset();
291 PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this, name,
292 source_url, code_offset, script_id));
293 }
294
Validate() const295 void WasmCode::Validate() const {
296 // The packing strategy for {tagged_parameter_slots} only works if both the
297 // max number of parameters and their max combined stack slot usage fits into
298 // their respective half of the result value.
299 STATIC_ASSERT(wasm::kV8MaxWasmFunctionParams <
300 std::numeric_limits<uint16_t>::max());
301 static constexpr int kMaxSlotsPerParam = 4; // S128 on 32-bit platforms.
302 STATIC_ASSERT(wasm::kV8MaxWasmFunctionParams * kMaxSlotsPerParam <
303 std::numeric_limits<uint16_t>::max());
304
305 #ifdef DEBUG
306 // Scope for foreign WasmCode pointers.
307 WasmCodeRefScope code_ref_scope;
308 // We expect certain relocation info modes to never appear in {WasmCode}
309 // objects or to be restricted to a small set of valid values. Hence the
310 // iteration below does not use a mask, but visits all relocation data.
311 for (RelocIterator it(instructions(), reloc_info(), constant_pool());
312 !it.done(); it.next()) {
313 RelocInfo::Mode mode = it.rinfo()->rmode();
314 switch (mode) {
315 case RelocInfo::WASM_CALL: {
316 Address target = it.rinfo()->wasm_call_address();
317 WasmCode* code = native_module_->Lookup(target);
318 CHECK_NOT_NULL(code);
319 CHECK_EQ(WasmCode::kJumpTable, code->kind());
320 CHECK(code->contains(target));
321 break;
322 }
323 case RelocInfo::WASM_STUB_CALL: {
324 Address target = it.rinfo()->wasm_stub_call_address();
325 WasmCode* code = native_module_->Lookup(target);
326 CHECK_NOT_NULL(code);
327 CHECK_EQ(WasmCode::kJumpTable, code->kind());
328 CHECK(code->contains(target));
329 break;
330 }
331 case RelocInfo::INTERNAL_REFERENCE:
332 case RelocInfo::INTERNAL_REFERENCE_ENCODED: {
333 Address target = it.rinfo()->target_internal_reference();
334 CHECK(contains(target));
335 break;
336 }
337 case RelocInfo::EXTERNAL_REFERENCE:
338 case RelocInfo::CONST_POOL:
339 case RelocInfo::VENEER_POOL:
340 // These are OK to appear.
341 break;
342 default:
343 FATAL("Unexpected mode: %d", mode);
344 }
345 }
346 #endif
347 }
348
MaybePrint() const349 void WasmCode::MaybePrint() const {
350 // Determines whether flags want this code to be printed.
351 bool function_index_matches =
352 (!IsAnonymous() &&
353 FLAG_print_wasm_code_function_index == static_cast<int>(index()));
354 if (FLAG_print_code || (kind() == kWasmFunction
355 ? (FLAG_print_wasm_code || function_index_matches)
356 : FLAG_print_wasm_stub_code)) {
357 std::string name = DebugName();
358 Print(name.c_str());
359 }
360 }
361
Print(const char * name) const362 void WasmCode::Print(const char* name) const {
363 StdoutStream os;
364 os << "--- WebAssembly code ---\n";
365 Disassemble(name, os);
366 if (native_module_->HasDebugInfo()) {
367 if (auto* debug_side_table =
368 native_module_->GetDebugInfo()->GetDebugSideTableIfExists(this)) {
369 debug_side_table->Print(os);
370 }
371 }
372 os << "--- End code ---\n";
373 }
374
Disassemble(const char * name,std::ostream & os,Address current_pc) const375 void WasmCode::Disassemble(const char* name, std::ostream& os,
376 Address current_pc) const {
377 if (name) os << "name: " << name << "\n";
378 if (!IsAnonymous()) os << "index: " << index() << "\n";
379 os << "kind: " << GetWasmCodeKindAsString(kind()) << "\n";
380 if (kind() == kWasmFunction) {
381 DCHECK(is_liftoff() || tier() == ExecutionTier::kTurbofan);
382 const char* compiler =
383 is_liftoff() ? (for_debugging() ? "Liftoff (debug)" : "Liftoff")
384 : "TurboFan";
385 os << "compiler: " << compiler << "\n";
386 }
387 size_t padding = instructions().size() - unpadded_binary_size_;
388 os << "Body (size = " << instructions().size() << " = "
389 << unpadded_binary_size_ << " + " << padding << " padding)\n";
390
391 int instruction_size = unpadded_binary_size_;
392 if (constant_pool_offset_ < instruction_size) {
393 instruction_size = constant_pool_offset_;
394 }
395 if (safepoint_table_offset_ && safepoint_table_offset_ < instruction_size) {
396 instruction_size = safepoint_table_offset_;
397 }
398 if (handler_table_offset_ < instruction_size) {
399 instruction_size = handler_table_offset_;
400 }
401 DCHECK_LT(0, instruction_size);
402
403 #ifdef ENABLE_DISASSEMBLER
404 os << "Instructions (size = " << instruction_size << ")\n";
405 Disassembler::Decode(nullptr, os, instructions().begin(),
406 instructions().begin() + instruction_size,
407 CodeReference(this), current_pc);
408 os << "\n";
409
410 if (handler_table_size() > 0) {
411 HandlerTable table(this);
412 os << "Exception Handler Table (size = " << table.NumberOfReturnEntries()
413 << "):\n";
414 table.HandlerTableReturnPrint(os);
415 os << "\n";
416 }
417
418 if (protected_instructions_size_ > 0) {
419 os << "Protected instructions:\n pc offset land pad\n";
420 for (auto& data : protected_instructions()) {
421 os << std::setw(10) << std::hex << data.instr_offset << std::setw(10)
422 << std::hex << data.landing_offset << "\n";
423 }
424 os << "\n";
425 }
426
427 if (!source_positions().empty()) {
428 os << "Source positions:\n pc offset position\n";
429 for (SourcePositionTableIterator it(source_positions()); !it.done();
430 it.Advance()) {
431 os << std::setw(10) << std::hex << it.code_offset() << std::dec
432 << std::setw(10) << it.source_position().ScriptOffset()
433 << (it.is_statement() ? " statement" : "") << "\n";
434 }
435 os << "\n";
436 }
437
438 if (safepoint_table_offset_ > 0) {
439 SafepointTable table(this);
440 table.Print(os);
441 os << "\n";
442 }
443
444 os << "RelocInfo (size = " << reloc_info().size() << ")\n";
445 for (RelocIterator it(instructions(), reloc_info(), constant_pool());
446 !it.done(); it.next()) {
447 it.rinfo()->Print(nullptr, os);
448 }
449 os << "\n";
450 #else // !ENABLE_DISASSEMBLER
451 os << "Instructions (size = " << instruction_size << ", "
452 << static_cast<void*>(instructions().begin()) << "-"
453 << static_cast<void*>(instructions().begin() + instruction_size) << ")\n";
454 #endif // !ENABLE_DISASSEMBLER
455 }
456
GetWasmCodeKindAsString(WasmCode::Kind kind)457 const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
458 switch (kind) {
459 case WasmCode::kWasmFunction:
460 return "wasm function";
461 case WasmCode::kWasmToCapiWrapper:
462 return "wasm-to-capi";
463 case WasmCode::kWasmToJsWrapper:
464 return "wasm-to-js";
465 case WasmCode::kJumpTable:
466 return "jump table";
467 }
468 return "unknown kind";
469 }
470
~WasmCode()471 WasmCode::~WasmCode() {
472 if (has_trap_handler_index()) {
473 trap_handler::ReleaseHandlerData(trap_handler_index());
474 }
475 }
476
DecRefOnPotentiallyDeadCode()477 V8_WARN_UNUSED_RESULT bool WasmCode::DecRefOnPotentiallyDeadCode() {
478 if (GetWasmEngine()->AddPotentiallyDeadCode(this)) {
479 // The code just became potentially dead. The ref count we wanted to
480 // decrement is now transferred to the set of potentially dead code, and
481 // will be decremented when the next GC is run.
482 return false;
483 }
484 // If we reach here, the code was already potentially dead. Decrement the ref
485 // count, and return true if it drops to zero.
486 return DecRefOnDeadCode();
487 }
488
489 // static
DecrementRefCount(base::Vector<WasmCode * const> code_vec)490 void WasmCode::DecrementRefCount(base::Vector<WasmCode* const> code_vec) {
491 // Decrement the ref counter of all given code objects. Keep the ones whose
492 // ref count drops to zero.
493 WasmEngine::DeadCodeMap dead_code;
494 for (WasmCode* code : code_vec) {
495 if (!code->DecRef()) continue; // Remaining references.
496 dead_code[code->native_module()].push_back(code);
497 }
498
499 if (dead_code.empty()) return;
500
501 GetWasmEngine()->FreeDeadCode(dead_code);
502 }
503
GetSourcePositionBefore(int offset)504 int WasmCode::GetSourcePositionBefore(int offset) {
505 int position = kNoSourcePosition;
506 for (SourcePositionTableIterator iterator(source_positions());
507 !iterator.done() && iterator.code_offset() < offset;
508 iterator.Advance()) {
509 position = iterator.source_position().ScriptOffset();
510 }
511 return position;
512 }
513
514 // static
515 constexpr size_t WasmCodeAllocator::kMaxCodeSpaceSize;
516
WasmCodeAllocator(std::shared_ptr<Counters> async_counters)517 WasmCodeAllocator::WasmCodeAllocator(std::shared_ptr<Counters> async_counters)
518 : protect_code_memory_(
519 !V8_HAS_PTHREAD_JIT_WRITE_PROTECT &&
520 FLAG_wasm_write_protect_code_memory &&
521 !GetWasmCodeManager()->MemoryProtectionKeysEnabled()),
522 async_counters_(std::move(async_counters)) {
523 owned_code_space_.reserve(4);
524 }
525
~WasmCodeAllocator()526 WasmCodeAllocator::~WasmCodeAllocator() {
527 GetWasmCodeManager()->FreeNativeModule(base::VectorOf(owned_code_space_),
528 committed_code_space());
529 }
530
Init(VirtualMemory code_space)531 void WasmCodeAllocator::Init(VirtualMemory code_space) {
532 DCHECK(owned_code_space_.empty());
533 DCHECK(free_code_space_.IsEmpty());
534 free_code_space_.Merge(code_space.region());
535 owned_code_space_.emplace_back(std::move(code_space));
536 async_counters_->wasm_module_num_code_spaces()->AddSample(1);
537 }
538
539 namespace {
540 // On Windows, we cannot commit a region that straddles different reservations
541 // of virtual memory. Because we bump-allocate, and because, if we need more
542 // memory, we append that memory at the end of the owned_code_space_ list, we
543 // traverse that list in reverse order to find the reservation(s) that guide how
544 // to chunk the region to commit.
545 #if V8_OS_WIN
546 constexpr bool kNeedsToSplitRangeByReservations = true;
547 #else
548 constexpr bool kNeedsToSplitRangeByReservations = false;
549 #endif
550
SplitRangeByReservationsIfNeeded(base::AddressRegion range,const std::vector<VirtualMemory> & owned_code_space)551 base::SmallVector<base::AddressRegion, 1> SplitRangeByReservationsIfNeeded(
552 base::AddressRegion range,
553 const std::vector<VirtualMemory>& owned_code_space) {
554 if (!kNeedsToSplitRangeByReservations) return {range};
555
556 base::SmallVector<base::AddressRegion, 1> split_ranges;
557 size_t missing_begin = range.begin();
558 size_t missing_end = range.end();
559 for (auto& vmem : base::Reversed(owned_code_space)) {
560 Address overlap_begin = std::max(missing_begin, vmem.address());
561 Address overlap_end = std::min(missing_end, vmem.end());
562 if (overlap_begin >= overlap_end) continue;
563 split_ranges.emplace_back(overlap_begin, overlap_end - overlap_begin);
564 // Opportunistically reduce the missing range. This might terminate the loop
565 // early.
566 if (missing_begin == overlap_begin) missing_begin = overlap_end;
567 if (missing_end == overlap_end) missing_end = overlap_begin;
568 if (missing_begin >= missing_end) break;
569 }
570 #ifdef ENABLE_SLOW_DCHECKS
571 // The returned vector should cover the full range.
572 size_t total_split_size = 0;
573 for (auto split : split_ranges) total_split_size += split.size();
574 DCHECK_EQ(range.size(), total_split_size);
575 #endif
576 return split_ranges;
577 }
578
NumWasmFunctionsInFarJumpTable(uint32_t num_declared_functions)579 int NumWasmFunctionsInFarJumpTable(uint32_t num_declared_functions) {
580 return NativeModule::kNeedsFarJumpsBetweenCodeSpaces
581 ? static_cast<int>(num_declared_functions)
582 : 0;
583 }
584
585 // Returns an overapproximation of the code size overhead per new code space
586 // created by the jump tables.
OverheadPerCodeSpace(uint32_t num_declared_functions)587 size_t OverheadPerCodeSpace(uint32_t num_declared_functions) {
588 // Overhead for the jump table.
589 size_t overhead = RoundUp<kCodeAlignment>(
590 JumpTableAssembler::SizeForNumberOfSlots(num_declared_functions));
591
592 #if defined(V8_OS_WIN64)
593 // On Win64, we need to reserve some pages at the beginning of an executable
594 // space. See {AddCodeSpace}.
595 overhead += Heap::GetCodeRangeReservedAreaSize();
596 #endif // V8_OS_WIN64
597
598 // Overhead for the far jump table.
599 overhead +=
600 RoundUp<kCodeAlignment>(JumpTableAssembler::SizeForNumberOfFarJumpSlots(
601 WasmCode::kRuntimeStubCount,
602 NumWasmFunctionsInFarJumpTable(num_declared_functions)));
603
604 return overhead;
605 }
606
607 // Returns an estimate how much code space should be reserved.
ReservationSize(size_t code_size_estimate,int num_declared_functions,size_t total_reserved)608 size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
609 size_t total_reserved) {
610 size_t overhead = OverheadPerCodeSpace(num_declared_functions);
611
612 // Reserve the maximum of
613 // a) needed size + overhead (this is the minimum needed)
614 // b) 2 * overhead (to not waste too much space by overhead)
615 // c) 1/4 of current total reservation size (to grow exponentially)
616 size_t minimum_size = 2 * overhead;
617 size_t suggested_size =
618 std::max(std::max(RoundUp<kCodeAlignment>(code_size_estimate) + overhead,
619 minimum_size),
620 total_reserved / 4);
621
622 if (V8_UNLIKELY(minimum_size > WasmCodeAllocator::kMaxCodeSpaceSize)) {
623 constexpr auto format = base::StaticCharVector(
624 "wasm code reservation: required minimum (%zu) is bigger than "
625 "supported maximum (%zu)");
626 constexpr int kMaxMessageLength =
627 format.size() - 6 + 2 * std::numeric_limits<size_t>::digits10;
628 base::EmbeddedVector<char, kMaxMessageLength + 1> message;
629 SNPrintF(message, format.begin(), minimum_size,
630 WasmCodeAllocator::kMaxCodeSpaceSize);
631 V8::FatalProcessOutOfMemory(nullptr, message.begin());
632 UNREACHABLE();
633 }
634
635 // Limit by the maximum supported code space size.
636 size_t reserve_size =
637 std::min(WasmCodeAllocator::kMaxCodeSpaceSize, suggested_size);
638
639 return reserve_size;
640 }
641
642 #ifdef DEBUG
643 // Check postconditions when returning from this method:
644 // 1) {region} must be fully contained in {writable_memory_};
645 // 2) {writable_memory_} must be a maximally merged ordered set of disjoint
646 // non-empty regions.
647 class CheckWritableMemoryRegions {
648 public:
CheckWritableMemoryRegions(std::set<base::AddressRegion,base::AddressRegion::StartAddressLess> & writable_memory,base::AddressRegion new_region,size_t & new_writable_memory)649 CheckWritableMemoryRegions(
650 std::set<base::AddressRegion, base::AddressRegion::StartAddressLess>&
651 writable_memory,
652 base::AddressRegion new_region, size_t& new_writable_memory)
653 : writable_memory_(writable_memory),
654 new_region_(new_region),
655 new_writable_memory_(new_writable_memory),
656 old_writable_size_(std::accumulate(
657 writable_memory_.begin(), writable_memory_.end(), size_t{0},
__anon7e8927ca0202(size_t old, base::AddressRegion region) 658 [](size_t old, base::AddressRegion region) {
659 return old + region.size();
660 })) {}
661
~CheckWritableMemoryRegions()662 ~CheckWritableMemoryRegions() {
663 // {new_region} must be contained in {writable_memory_}.
664 DCHECK(std::any_of(
665 writable_memory_.begin(), writable_memory_.end(),
666 [this](auto region) { return region.contains(new_region_); }));
667
668 // The new total size of writable memory must have increased by
669 // {new_writable_memory}.
670 size_t total_writable_size = std::accumulate(
671 writable_memory_.begin(), writable_memory_.end(), size_t{0},
672 [](size_t old, auto region) { return old + region.size(); });
673 DCHECK_EQ(old_writable_size_ + new_writable_memory_, total_writable_size);
674
675 // There are no empty regions.
676 DCHECK(std::none_of(writable_memory_.begin(), writable_memory_.end(),
677 [](auto region) { return region.is_empty(); }));
678
679 // Regions are sorted and disjoint. (std::accumulate has nodiscard on msvc
680 // so USE is required to prevent build failures in debug builds).
681 USE(std::accumulate(writable_memory_.begin(), writable_memory_.end(),
682 Address{0}, [](Address previous_end, auto region) {
683 DCHECK_LT(previous_end, region.begin());
684 return region.end();
685 }));
686 }
687
688 private:
689 const std::set<base::AddressRegion, base::AddressRegion::StartAddressLess>&
690 writable_memory_;
691 const base::AddressRegion new_region_;
692 const size_t& new_writable_memory_;
693 const size_t old_writable_size_;
694 };
695 #else // !DEBUG
696 class CheckWritableMemoryRegions {
697 public:
698 template <typename... Args>
CheckWritableMemoryRegions(Args...)699 explicit CheckWritableMemoryRegions(Args...) {}
700 };
701 #endif // !DEBUG
702
703 } // namespace
704
AllocateForCode(NativeModule * native_module,size_t size)705 base::Vector<byte> WasmCodeAllocator::AllocateForCode(
706 NativeModule* native_module, size_t size) {
707 return AllocateForCodeInRegion(native_module, size, kUnrestrictedRegion);
708 }
709
AllocateForCodeInRegion(NativeModule * native_module,size_t size,base::AddressRegion region)710 base::Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
711 NativeModule* native_module, size_t size, base::AddressRegion region) {
712 DCHECK_LT(0, size);
713 auto* code_manager = GetWasmCodeManager();
714 size = RoundUp<kCodeAlignment>(size);
715 base::AddressRegion code_space =
716 free_code_space_.AllocateInRegion(size, region);
717 if (V8_UNLIKELY(code_space.is_empty())) {
718 // Only allocations without a specific region are allowed to fail. Otherwise
719 // the region must have been allocated big enough to hold all initial
720 // allocations (jump tables etc).
721 CHECK_EQ(kUnrestrictedRegion, region);
722
723 Address hint = owned_code_space_.empty() ? kNullAddress
724 : owned_code_space_.back().end();
725
726 size_t total_reserved = 0;
727 for (auto& vmem : owned_code_space_) total_reserved += vmem.size();
728 size_t reserve_size = ReservationSize(
729 size, native_module->module()->num_declared_functions, total_reserved);
730 VirtualMemory new_mem =
731 code_manager->TryAllocate(reserve_size, reinterpret_cast<void*>(hint));
732 if (!new_mem.IsReserved()) {
733 constexpr auto format = base::StaticCharVector(
734 "Cannot allocate more code space (%zu bytes, currently %zu)");
735 constexpr int kMaxMessageLength =
736 format.size() - 6 + 2 * std::numeric_limits<size_t>::digits10;
737 base::EmbeddedVector<char, kMaxMessageLength + 1> message;
738 SNPrintF(message, format.begin(), total_reserved, reserve_size);
739 V8::FatalProcessOutOfMemory(nullptr, message.begin());
740 UNREACHABLE();
741 }
742
743 base::AddressRegion new_region = new_mem.region();
744 code_manager->AssignRange(new_region, native_module);
745 free_code_space_.Merge(new_region);
746 owned_code_space_.emplace_back(std::move(new_mem));
747 native_module->AddCodeSpaceLocked(new_region);
748
749 code_space = free_code_space_.Allocate(size);
750 DCHECK(!code_space.is_empty());
751 async_counters_->wasm_module_num_code_spaces()->AddSample(
752 static_cast<int>(owned_code_space_.size()));
753 }
754 const Address commit_page_size = CommitPageSize();
755 Address commit_start = RoundUp(code_space.begin(), commit_page_size);
756 if (commit_start != code_space.begin()) {
757 MakeWritable({commit_start - commit_page_size, commit_page_size});
758 }
759
760 Address commit_end = RoundUp(code_space.end(), commit_page_size);
761 // {commit_start} will be either code_space.start or the start of the next
762 // page. {commit_end} will be the start of the page after the one in which
763 // the allocation ends.
764 // We start from an aligned start, and we know we allocated vmem in
765 // page multiples.
766 // We just need to commit what's not committed. The page in which we
767 // start is already committed (or we start at the beginning of a page).
768 // The end needs to be committed all through the end of the page.
769 if (commit_start < commit_end) {
770 for (base::AddressRegion split_range : SplitRangeByReservationsIfNeeded(
771 {commit_start, commit_end - commit_start}, owned_code_space_)) {
772 code_manager->Commit(split_range);
773 }
774 committed_code_space_.fetch_add(commit_end - commit_start);
775 // Committed code cannot grow bigger than maximum code space size.
776 DCHECK_LE(committed_code_space_.load(), FLAG_wasm_max_code_space * MB);
777 if (protect_code_memory_) {
778 DCHECK_LT(0, writers_count_);
779 InsertIntoWritableRegions({commit_start, commit_end - commit_start},
780 false);
781 }
782 }
783 DCHECK(IsAligned(code_space.begin(), kCodeAlignment));
784 allocated_code_space_.Merge(code_space);
785 generated_code_size_.fetch_add(code_space.size(), std::memory_order_relaxed);
786
787 TRACE_HEAP("Code alloc for %p: 0x%" PRIxPTR ",+%zu\n", this,
788 code_space.begin(), size);
789 return {reinterpret_cast<byte*>(code_space.begin()), code_space.size()};
790 }
791
792 // TODO(dlehmann): Ensure that {AddWriter()} is always paired up with a
793 // {RemoveWriter}, such that eventually the code space is write protected.
794 // One solution is to make the API foolproof by hiding {SetWritable()} and
795 // allowing change of permissions only through {CodeSpaceWriteScope}.
796 // TODO(dlehmann): Add tests that ensure the code space is eventually write-
797 // protected.
AddWriter()798 void WasmCodeAllocator::AddWriter() {
799 DCHECK(protect_code_memory_);
800 ++writers_count_;
801 }
802
RemoveWriter()803 void WasmCodeAllocator::RemoveWriter() {
804 DCHECK(protect_code_memory_);
805 DCHECK_GT(writers_count_, 0);
806 if (--writers_count_ > 0) return;
807
808 // Switch all memory to non-writable.
809 v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
810 for (base::AddressRegion writable : writable_memory_) {
811 for (base::AddressRegion split_range :
812 SplitRangeByReservationsIfNeeded(writable, owned_code_space_)) {
813 TRACE_HEAP("Set 0x%" V8PRIxPTR ":0x%" V8PRIxPTR " to RX\n",
814 split_range.begin(), split_range.end());
815 CHECK(SetPermissions(page_allocator, split_range.begin(),
816 split_range.size(), PageAllocator::kReadExecute));
817 }
818 }
819 writable_memory_.clear();
820 }
821
MakeWritable(base::AddressRegion region)822 void WasmCodeAllocator::MakeWritable(base::AddressRegion region) {
823 if (!protect_code_memory_) return;
824 DCHECK_LT(0, writers_count_);
825 DCHECK(!region.is_empty());
826 v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
827
828 // Align to commit page size.
829 size_t commit_page_size = page_allocator->CommitPageSize();
830 DCHECK(base::bits::IsPowerOfTwo(commit_page_size));
831 Address begin = RoundDown(region.begin(), commit_page_size);
832 Address end = RoundUp(region.end(), commit_page_size);
833 region = base::AddressRegion(begin, end - begin);
834
835 InsertIntoWritableRegions(region, true);
836 }
837
FreeCode(base::Vector<WasmCode * const> codes)838 void WasmCodeAllocator::FreeCode(base::Vector<WasmCode* const> codes) {
839 // Zap code area and collect freed code regions.
840 DisjointAllocationPool freed_regions;
841 size_t code_size = 0;
842 for (WasmCode* code : codes) {
843 code_size += code->instructions().size();
844 freed_regions.Merge(base::AddressRegion{code->instruction_start(),
845 code->instructions().size()});
846 }
847 freed_code_size_.fetch_add(code_size);
848
849 // Merge {freed_regions} into {freed_code_space_} and put all ranges of full
850 // pages to decommit into {regions_to_decommit} (decommitting is expensive,
851 // so try to merge regions before decommitting).
852 DisjointAllocationPool regions_to_decommit;
853 size_t commit_page_size = CommitPageSize();
854 for (auto region : freed_regions.regions()) {
855 auto merged_region = freed_code_space_.Merge(region);
856 Address discard_start =
857 std::max(RoundUp(merged_region.begin(), commit_page_size),
858 RoundDown(region.begin(), commit_page_size));
859 Address discard_end =
860 std::min(RoundDown(merged_region.end(), commit_page_size),
861 RoundUp(region.end(), commit_page_size));
862 if (discard_start >= discard_end) continue;
863 regions_to_decommit.Merge({discard_start, discard_end - discard_start});
864 }
865
866 auto* code_manager = GetWasmCodeManager();
867 for (auto region : regions_to_decommit.regions()) {
868 size_t old_committed = committed_code_space_.fetch_sub(region.size());
869 DCHECK_GE(old_committed, region.size());
870 USE(old_committed);
871 for (base::AddressRegion split_range :
872 SplitRangeByReservationsIfNeeded(region, owned_code_space_)) {
873 code_manager->Decommit(split_range);
874 }
875 }
876 }
877
GetNumCodeSpaces() const878 size_t WasmCodeAllocator::GetNumCodeSpaces() const {
879 return owned_code_space_.size();
880 }
881
InsertIntoWritableRegions(base::AddressRegion region,bool switch_to_writable)882 void WasmCodeAllocator::InsertIntoWritableRegions(base::AddressRegion region,
883 bool switch_to_writable) {
884 size_t new_writable_memory = 0;
885
886 CheckWritableMemoryRegions check_on_return{writable_memory_, region,
887 new_writable_memory};
888
889 v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
890 // Subroutine to make a non-writable region writable (if {switch_to_writable}
891 // is {true}) and insert it into {writable_memory_}.
892 auto make_writable = [&](decltype(writable_memory_)::iterator insert_pos,
893 base::AddressRegion region) {
894 new_writable_memory += region.size();
895 if (switch_to_writable) {
896 for (base::AddressRegion split_range :
897 SplitRangeByReservationsIfNeeded(region, owned_code_space_)) {
898 TRACE_HEAP("Set 0x%" V8PRIxPTR ":0x%" V8PRIxPTR " to RWX\n",
899 split_range.begin(), split_range.end());
900 CHECK(SetPermissions(page_allocator, split_range.begin(),
901 split_range.size(),
902 PageAllocator::kReadWriteExecute));
903 }
904 }
905
906 // Insert {region} into {writable_memory_} before {insert_pos}, potentially
907 // merging it with the surrounding regions.
908 if (insert_pos != writable_memory_.begin()) {
909 auto previous = insert_pos;
910 --previous;
911 if (previous->end() == region.begin()) {
912 region = {previous->begin(), previous->size() + region.size()};
913 writable_memory_.erase(previous);
914 }
915 }
916 if (insert_pos != writable_memory_.end() &&
917 region.end() == insert_pos->begin()) {
918 region = {region.begin(), insert_pos->size() + region.size()};
919 insert_pos = writable_memory_.erase(insert_pos);
920 }
921 writable_memory_.insert(insert_pos, region);
922 };
923
924 DCHECK(!region.is_empty());
925 // Find a possible insertion position by identifying the first region whose
926 // start address is not less than that of {new_region}, and the starting the
927 // merge from the existing region before that.
928 auto it = writable_memory_.lower_bound(region);
929 if (it != writable_memory_.begin()) --it;
930 for (;; ++it) {
931 if (it == writable_memory_.end() || it->begin() >= region.end()) {
932 // No overlap; add before {it}.
933 make_writable(it, region);
934 return;
935 }
936 if (it->end() <= region.begin()) continue; // Continue after {it}.
937 base::AddressRegion overlap = it->GetOverlap(region);
938 DCHECK(!overlap.is_empty());
939 if (overlap.begin() == region.begin()) {
940 if (overlap.end() == region.end()) return; // Fully contained already.
941 // Remove overlap (which is already writable) and continue.
942 region = {overlap.end(), region.end() - overlap.end()};
943 continue;
944 }
945 if (overlap.end() == region.end()) {
946 // Remove overlap (which is already writable), then make the remaining
947 // region writable.
948 region = {region.begin(), overlap.begin() - region.begin()};
949 make_writable(it, region);
950 return;
951 }
952 // Split {region}, make the split writable, and continue with the rest.
953 base::AddressRegion split = {region.begin(),
954 overlap.begin() - region.begin()};
955 make_writable(it, split);
956 region = {overlap.end(), region.end() - overlap.end()};
957 }
958 }
959
960 // static
961 constexpr base::AddressRegion WasmCodeAllocator::kUnrestrictedRegion;
962
963 namespace {
GetBoundsChecks(const WasmModule * module)964 BoundsCheckStrategy GetBoundsChecks(const WasmModule* module) {
965 if (!FLAG_wasm_bounds_checks) return kNoBoundsChecks;
966 if (FLAG_wasm_enforce_bounds_checks) return kExplicitBoundsChecks;
967 // We do not have trap handler support for memory64 yet.
968 if (module->is_memory64) return kExplicitBoundsChecks;
969 if (trap_handler::IsTrapHandlerEnabled()) return kTrapHandler;
970 return kExplicitBoundsChecks;
971 }
972 } // namespace
973
NativeModule(const WasmFeatures & enabled,DynamicTiering dynamic_tiering,VirtualMemory code_space,std::shared_ptr<const WasmModule> module,std::shared_ptr<Counters> async_counters,std::shared_ptr<NativeModule> * shared_this)974 NativeModule::NativeModule(const WasmFeatures& enabled,
975 DynamicTiering dynamic_tiering,
976 VirtualMemory code_space,
977 std::shared_ptr<const WasmModule> module,
978 std::shared_ptr<Counters> async_counters,
979 std::shared_ptr<NativeModule>* shared_this)
980 : engine_scope_(
981 GetWasmEngine()->GetBarrierForBackgroundCompile()->TryLock()),
982 code_allocator_(async_counters),
983 enabled_features_(enabled),
984 module_(std::move(module)),
985 import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
986 new WasmImportWrapperCache())),
987 bounds_checks_(GetBoundsChecks(module_.get())) {
988 DCHECK(engine_scope_);
989 // We receive a pointer to an empty {std::shared_ptr}, and install ourselve
990 // there.
991 DCHECK_NOT_NULL(shared_this);
992 DCHECK_NULL(*shared_this);
993 shared_this->reset(this);
994 compilation_state_ = CompilationState::New(
995 *shared_this, std::move(async_counters), dynamic_tiering);
996 compilation_state_->InitCompileJob();
997 DCHECK_NOT_NULL(module_);
998 if (module_->num_declared_functions > 0) {
999 code_table_ =
1000 std::make_unique<WasmCode*[]>(module_->num_declared_functions);
1001 tiering_budgets_ =
1002 std::make_unique<uint32_t[]>(module_->num_declared_functions);
1003
1004 std::fill_n(tiering_budgets_.get(), module_->num_declared_functions,
1005 FLAG_wasm_tiering_budget);
1006 }
1007 // Even though there cannot be another thread using this object (since we are
1008 // just constructing it), we need to hold the mutex to fulfill the
1009 // precondition of {WasmCodeAllocator::Init}, which calls
1010 // {NativeModule::AddCodeSpaceLocked}.
1011 base::RecursiveMutexGuard guard{&allocation_mutex_};
1012 auto initial_region = code_space.region();
1013 code_allocator_.Init(std::move(code_space));
1014 AddCodeSpaceLocked(initial_region);
1015 }
1016
ReserveCodeTableForTesting(uint32_t max_functions)1017 void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
1018 WasmCodeRefScope code_ref_scope;
1019 DCHECK_LE(module_->num_declared_functions, max_functions);
1020 auto new_table = std::make_unique<WasmCode*[]>(max_functions);
1021 if (module_->num_declared_functions > 0) {
1022 memcpy(new_table.get(), code_table_.get(),
1023 module_->num_declared_functions * sizeof(WasmCode*));
1024 }
1025 code_table_ = std::move(new_table);
1026
1027 base::AddressRegion single_code_space_region;
1028 base::RecursiveMutexGuard guard(&allocation_mutex_);
1029 CHECK_EQ(1, code_space_data_.size());
1030 single_code_space_region = code_space_data_[0].region;
1031 // Re-allocate jump table.
1032 main_jump_table_ = CreateEmptyJumpTableInRegionLocked(
1033 JumpTableAssembler::SizeForNumberOfSlots(max_functions),
1034 single_code_space_region);
1035 code_space_data_[0].jump_table = main_jump_table_;
1036 }
1037
LogWasmCodes(Isolate * isolate,Script script)1038 void NativeModule::LogWasmCodes(Isolate* isolate, Script script) {
1039 DisallowGarbageCollection no_gc;
1040 if (!WasmCode::ShouldBeLogged(isolate)) return;
1041
1042 TRACE_EVENT1("v8.wasm", "wasm.LogWasmCodes", "functions",
1043 module_->num_declared_functions);
1044
1045 Object url_obj = script.name();
1046 DCHECK(url_obj.IsString() || url_obj.IsUndefined());
1047 std::unique_ptr<char[]> source_url =
1048 url_obj.IsString() ? String::cast(url_obj).ToCString() : nullptr;
1049
1050 // Log all owned code, not just the current entries in the code table. This
1051 // will also include import wrappers.
1052 WasmCodeRefScope code_ref_scope;
1053 for (auto& code : SnapshotAllOwnedCode()) {
1054 code->LogCode(isolate, source_url.get(), script.id());
1055 }
1056 }
1057
CreateCompilationEnv() const1058 CompilationEnv NativeModule::CreateCompilationEnv() const {
1059 return {module(), bounds_checks_, kRuntimeExceptionSupport, enabled_features_,
1060 compilation_state()->dynamic_tiering()};
1061 }
1062
AddCodeForTesting(Handle<Code> code)1063 WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
1064 CodeSpaceWriteScope code_space_write_scope(this);
1065 const size_t relocation_size = code->relocation_size();
1066 base::OwnedVector<byte> reloc_info;
1067 if (relocation_size > 0) {
1068 reloc_info = base::OwnedVector<byte>::Of(
1069 base::Vector<byte>{code->relocation_start(), relocation_size});
1070 }
1071 Handle<ByteArray> source_pos_table(code->source_position_table(),
1072 code->GetIsolate());
1073 base::OwnedVector<byte> source_pos =
1074 base::OwnedVector<byte>::NewForOverwrite(source_pos_table->length());
1075 if (source_pos_table->length() > 0) {
1076 source_pos_table->copy_out(0, source_pos.start(),
1077 source_pos_table->length());
1078 }
1079 CHECK(!code->is_off_heap_trampoline());
1080 STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
1081 base::Vector<const byte> instructions(
1082 reinterpret_cast<byte*>(code->raw_body_start()),
1083 static_cast<size_t>(code->raw_body_size()));
1084 const int stack_slots = code->stack_slots();
1085
1086 // Metadata offsets in Code objects are relative to the start of the metadata
1087 // section, whereas WasmCode expects offsets relative to InstructionStart.
1088 const int base_offset = code->raw_instruction_size();
1089 // TODO(jgruber,v8:8758): Remove this translation. It exists only because
1090 // Code objects contains real offsets but WasmCode expects an offset of 0 to
1091 // mean 'empty'.
1092 const int safepoint_table_offset =
1093 code->has_safepoint_table() ? base_offset + code->safepoint_table_offset()
1094 : 0;
1095 const int handler_table_offset = base_offset + code->handler_table_offset();
1096 const int constant_pool_offset = base_offset + code->constant_pool_offset();
1097 const int code_comments_offset = base_offset + code->code_comments_offset();
1098
1099 base::RecursiveMutexGuard guard{&allocation_mutex_};
1100 base::Vector<uint8_t> dst_code_bytes =
1101 code_allocator_.AllocateForCode(this, instructions.size());
1102 memcpy(dst_code_bytes.begin(), instructions.begin(), instructions.size());
1103
1104 // Apply the relocation delta by iterating over the RelocInfo.
1105 intptr_t delta = reinterpret_cast<Address>(dst_code_bytes.begin()) -
1106 code->raw_instruction_start();
1107 int mode_mask =
1108 RelocInfo::kApplyMask | RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
1109 auto jump_tables_ref =
1110 FindJumpTablesForRegionLocked(base::AddressRegionOf(dst_code_bytes));
1111 Address dst_code_addr = reinterpret_cast<Address>(dst_code_bytes.begin());
1112 Address constant_pool_start = dst_code_addr + constant_pool_offset;
1113 RelocIterator orig_it(*code, mode_mask);
1114 for (RelocIterator it(dst_code_bytes, reloc_info.as_vector(),
1115 constant_pool_start, mode_mask);
1116 !it.done(); it.next(), orig_it.next()) {
1117 RelocInfo::Mode mode = it.rinfo()->rmode();
1118 if (RelocInfo::IsWasmStubCall(mode)) {
1119 uint32_t stub_call_tag = orig_it.rinfo()->wasm_call_tag();
1120 DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
1121 Address entry = GetNearRuntimeStubEntry(
1122 static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables_ref);
1123 it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
1124 } else {
1125 it.rinfo()->apply(delta);
1126 }
1127 }
1128
1129 // Flush the i-cache after relocation.
1130 FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
1131
1132 std::unique_ptr<WasmCode> new_code{
1133 new WasmCode{this, // native_module
1134 kAnonymousFuncIndex, // index
1135 dst_code_bytes, // instructions
1136 stack_slots, // stack_slots
1137 0, // tagged_parameter_slots
1138 safepoint_table_offset, // safepoint_table_offset
1139 handler_table_offset, // handler_table_offset
1140 constant_pool_offset, // constant_pool_offset
1141 code_comments_offset, // code_comments_offset
1142 instructions.length(), // unpadded_binary_size
1143 {}, // protected_instructions
1144 reloc_info.as_vector(), // reloc_info
1145 source_pos.as_vector(), // source positions
1146 WasmCode::kWasmFunction, // kind
1147 ExecutionTier::kNone, // tier
1148 kNoDebugging}}; // for_debugging
1149 new_code->MaybePrint();
1150 new_code->Validate();
1151
1152 return PublishCodeLocked(std::move(new_code));
1153 }
1154
UseLazyStub(uint32_t func_index)1155 void NativeModule::UseLazyStub(uint32_t func_index) {
1156 DCHECK_LE(module_->num_imported_functions, func_index);
1157 DCHECK_LT(func_index,
1158 module_->num_imported_functions + module_->num_declared_functions);
1159 // Avoid opening a new write scope per function. The caller should hold the
1160 // scope instead.
1161 DCHECK(CodeSpaceWriteScope::IsInScope());
1162
1163 base::RecursiveMutexGuard guard(&allocation_mutex_);
1164 if (!lazy_compile_table_) {
1165 uint32_t num_slots = module_->num_declared_functions;
1166 WasmCodeRefScope code_ref_scope;
1167 DCHECK_EQ(1, code_space_data_.size());
1168 base::AddressRegion single_code_space_region = code_space_data_[0].region;
1169 lazy_compile_table_ = CreateEmptyJumpTableInRegionLocked(
1170 JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots),
1171 single_code_space_region);
1172 JumpTableAssembler::GenerateLazyCompileTable(
1173 lazy_compile_table_->instruction_start(), num_slots,
1174 module_->num_imported_functions,
1175 GetNearRuntimeStubEntry(
1176 WasmCode::kWasmCompileLazy,
1177 FindJumpTablesForRegionLocked(
1178 base::AddressRegionOf(lazy_compile_table_->instructions()))));
1179 }
1180
1181 // Add jump table entry for jump to the lazy compile stub.
1182 uint32_t slot_index = declared_function_index(module(), func_index);
1183 DCHECK_NULL(code_table_[slot_index]);
1184 Address lazy_compile_target =
1185 lazy_compile_table_->instruction_start() +
1186 JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
1187 PatchJumpTablesLocked(slot_index, lazy_compile_target);
1188 }
1189
AddCode(int index,const CodeDesc & desc,int stack_slots,uint32_t tagged_parameter_slots,base::Vector<const byte> protected_instructions_data,base::Vector<const byte> source_position_table,WasmCode::Kind kind,ExecutionTier tier,ForDebugging for_debugging)1190 std::unique_ptr<WasmCode> NativeModule::AddCode(
1191 int index, const CodeDesc& desc, int stack_slots,
1192 uint32_t tagged_parameter_slots,
1193 base::Vector<const byte> protected_instructions_data,
1194 base::Vector<const byte> source_position_table, WasmCode::Kind kind,
1195 ExecutionTier tier, ForDebugging for_debugging) {
1196 base::Vector<byte> code_space;
1197 NativeModule::JumpTablesRef jump_table_ref;
1198 {
1199 base::RecursiveMutexGuard guard{&allocation_mutex_};
1200 code_space = code_allocator_.AllocateForCode(this, desc.instr_size);
1201 jump_table_ref =
1202 FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
1203 }
1204 return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
1205 protected_instructions_data,
1206 source_position_table, kind, tier, for_debugging,
1207 code_space, jump_table_ref);
1208 }
1209
AddCodeWithCodeSpace(int index,const CodeDesc & desc,int stack_slots,uint32_t tagged_parameter_slots,base::Vector<const byte> protected_instructions_data,base::Vector<const byte> source_position_table,WasmCode::Kind kind,ExecutionTier tier,ForDebugging for_debugging,base::Vector<uint8_t> dst_code_bytes,const JumpTablesRef & jump_tables)1210 std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
1211 int index, const CodeDesc& desc, int stack_slots,
1212 uint32_t tagged_parameter_slots,
1213 base::Vector<const byte> protected_instructions_data,
1214 base::Vector<const byte> source_position_table, WasmCode::Kind kind,
1215 ExecutionTier tier, ForDebugging for_debugging,
1216 base::Vector<uint8_t> dst_code_bytes, const JumpTablesRef& jump_tables) {
1217 base::Vector<byte> reloc_info{
1218 desc.buffer + desc.buffer_size - desc.reloc_size,
1219 static_cast<size_t>(desc.reloc_size)};
1220 UpdateCodeSize(desc.instr_size, tier, for_debugging);
1221
1222 // TODO(jgruber,v8:8758): Remove this translation. It exists only because
1223 // CodeDesc contains real offsets but WasmCode expects an offset of 0 to mean
1224 // 'empty'.
1225 const int safepoint_table_offset =
1226 desc.safepoint_table_size == 0 ? 0 : desc.safepoint_table_offset;
1227 const int handler_table_offset = desc.handler_table_offset;
1228 const int constant_pool_offset = desc.constant_pool_offset;
1229 const int code_comments_offset = desc.code_comments_offset;
1230 const int instr_size = desc.instr_size;
1231
1232 memcpy(dst_code_bytes.begin(), desc.buffer,
1233 static_cast<size_t>(desc.instr_size));
1234
1235 // Apply the relocation delta by iterating over the RelocInfo.
1236 intptr_t delta = dst_code_bytes.begin() - desc.buffer;
1237 int mode_mask = RelocInfo::kApplyMask |
1238 RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
1239 RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
1240 Address code_start = reinterpret_cast<Address>(dst_code_bytes.begin());
1241 Address constant_pool_start = code_start + constant_pool_offset;
1242 for (RelocIterator it(dst_code_bytes, reloc_info, constant_pool_start,
1243 mode_mask);
1244 !it.done(); it.next()) {
1245 RelocInfo::Mode mode = it.rinfo()->rmode();
1246 if (RelocInfo::IsWasmCall(mode)) {
1247 uint32_t call_tag = it.rinfo()->wasm_call_tag();
1248 Address target = GetNearCallTargetForFunction(call_tag, jump_tables);
1249 it.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH);
1250 } else if (RelocInfo::IsWasmStubCall(mode)) {
1251 uint32_t stub_call_tag = it.rinfo()->wasm_call_tag();
1252 DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
1253 Address entry = GetNearRuntimeStubEntry(
1254 static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables);
1255 it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
1256 } else {
1257 it.rinfo()->apply(delta);
1258 }
1259 }
1260
1261 // Flush the i-cache after relocation.
1262 FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
1263
1264 // Liftoff code will not be relocated or serialized, thus do not store any
1265 // relocation information.
1266 if (tier == ExecutionTier::kLiftoff) reloc_info = {};
1267
1268 std::unique_ptr<WasmCode> code{new WasmCode{
1269 this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
1270 safepoint_table_offset, handler_table_offset, constant_pool_offset,
1271 code_comments_offset, instr_size, protected_instructions_data, reloc_info,
1272 source_position_table, kind, tier, for_debugging}};
1273
1274 code->MaybePrint();
1275 code->Validate();
1276
1277 return code;
1278 }
1279
PublishCode(std::unique_ptr<WasmCode> code)1280 WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
1281 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
1282 "wasm.PublishCode");
1283 base::RecursiveMutexGuard lock(&allocation_mutex_);
1284 CodeSpaceWriteScope code_space_write_scope(this);
1285 return PublishCodeLocked(std::move(code));
1286 }
1287
PublishCode(base::Vector<std::unique_ptr<WasmCode>> codes)1288 std::vector<WasmCode*> NativeModule::PublishCode(
1289 base::Vector<std::unique_ptr<WasmCode>> codes) {
1290 // Publishing often happens in a loop, so the caller should hold the
1291 // {CodeSpaceWriteScope} outside of such a loop.
1292 DCHECK(CodeSpaceWriteScope::IsInScope());
1293 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
1294 "wasm.PublishCode", "number", codes.size());
1295 std::vector<WasmCode*> published_code;
1296 published_code.reserve(codes.size());
1297 base::RecursiveMutexGuard lock(&allocation_mutex_);
1298 // The published code is put into the top-most surrounding {WasmCodeRefScope}.
1299 for (auto& code : codes) {
1300 published_code.push_back(PublishCodeLocked(std::move(code)));
1301 }
1302 return published_code;
1303 }
1304
GetCodeKind(const WasmCompilationResult & result)1305 WasmCode::Kind GetCodeKind(const WasmCompilationResult& result) {
1306 switch (result.kind) {
1307 case WasmCompilationResult::kWasmToJsWrapper:
1308 return WasmCode::Kind::kWasmToJsWrapper;
1309 case WasmCompilationResult::kFunction:
1310 return WasmCode::Kind::kWasmFunction;
1311 default:
1312 UNREACHABLE();
1313 }
1314 }
1315
PublishCodeLocked(std::unique_ptr<WasmCode> owned_code)1316 WasmCode* NativeModule::PublishCodeLocked(
1317 std::unique_ptr<WasmCode> owned_code) {
1318 allocation_mutex_.AssertHeld();
1319
1320 WasmCode* code = owned_code.get();
1321 new_owned_code_.emplace_back(std::move(owned_code));
1322
1323 // Add the code to the surrounding code ref scope, so the returned pointer is
1324 // guaranteed to be valid.
1325 WasmCodeRefScope::AddRef(code);
1326
1327 if (code->index() < static_cast<int>(module_->num_imported_functions)) {
1328 return code;
1329 }
1330
1331 DCHECK_LT(code->index(), num_functions());
1332
1333 code->RegisterTrapHandlerData();
1334
1335 // Put the code in the debugging cache, if needed.
1336 if (V8_UNLIKELY(cached_code_)) InsertToCodeCache(code);
1337
1338 // Assume an order of execution tiers that represents the quality of their
1339 // generated code.
1340 static_assert(ExecutionTier::kNone < ExecutionTier::kLiftoff &&
1341 ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
1342 "Assume an order on execution tiers");
1343
1344 uint32_t slot_idx = declared_function_index(module(), code->index());
1345 WasmCode* prior_code = code_table_[slot_idx];
1346 // If we are tiered down, install all debugging code (except for stepping
1347 // code, which is only used for a single frame and never installed in the
1348 // code table of jump table). Otherwise, install code if it was compiled
1349 // with a higher tier.
1350 static_assert(
1351 kForDebugging > kNoDebugging && kWithBreakpoints > kForDebugging,
1352 "for_debugging is ordered");
1353 const bool update_code_table =
1354 // Never install stepping code.
1355 code->for_debugging() != kForStepping &&
1356 (!prior_code ||
1357 (tiering_state_ == kTieredDown
1358 // Tiered down: Install breakpoints over normal debug code.
1359 ? prior_code->for_debugging() <= code->for_debugging()
1360 // Tiered up: Install if the tier is higher than before or we
1361 // replace debugging code with non-debugging code.
1362 : (prior_code->tier() < code->tier() ||
1363 (prior_code->for_debugging() && !code->for_debugging()))));
1364 if (update_code_table) {
1365 code_table_[slot_idx] = code;
1366 if (prior_code) {
1367 WasmCodeRefScope::AddRef(prior_code);
1368 // The code is added to the current {WasmCodeRefScope}, hence the ref
1369 // count cannot drop to zero here.
1370 prior_code->DecRefOnLiveCode();
1371 }
1372
1373 PatchJumpTablesLocked(slot_idx, code->instruction_start());
1374 } else {
1375 // The code tables does not hold a reference to the code, hence decrement
1376 // the initial ref count of 1. The code was added to the
1377 // {WasmCodeRefScope} though, so it cannot die here.
1378 code->DecRefOnLiveCode();
1379 }
1380
1381 return code;
1382 }
1383
ReinstallDebugCode(WasmCode * code)1384 void NativeModule::ReinstallDebugCode(WasmCode* code) {
1385 base::RecursiveMutexGuard lock(&allocation_mutex_);
1386
1387 DCHECK_EQ(this, code->native_module());
1388 DCHECK_EQ(kWithBreakpoints, code->for_debugging());
1389 DCHECK(!code->IsAnonymous());
1390 DCHECK_LE(module_->num_imported_functions, code->index());
1391 DCHECK_LT(code->index(), num_functions());
1392
1393 // If the module is tiered up by now, do not reinstall debug code.
1394 if (tiering_state_ != kTieredDown) return;
1395
1396 uint32_t slot_idx = declared_function_index(module(), code->index());
1397 if (WasmCode* prior_code = code_table_[slot_idx]) {
1398 WasmCodeRefScope::AddRef(prior_code);
1399 // The code is added to the current {WasmCodeRefScope}, hence the ref
1400 // count cannot drop to zero here.
1401 prior_code->DecRefOnLiveCode();
1402 }
1403 code_table_[slot_idx] = code;
1404 code->IncRef();
1405
1406 CodeSpaceWriteScope code_space_write_scope(this);
1407 PatchJumpTablesLocked(slot_idx, code->instruction_start());
1408 }
1409
1410 std::pair<base::Vector<uint8_t>, NativeModule::JumpTablesRef>
AllocateForDeserializedCode(size_t total_code_size)1411 NativeModule::AllocateForDeserializedCode(size_t total_code_size) {
1412 base::RecursiveMutexGuard guard{&allocation_mutex_};
1413 base::Vector<uint8_t> code_space =
1414 code_allocator_.AllocateForCode(this, total_code_size);
1415 auto jump_tables =
1416 FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
1417 return {code_space, jump_tables};
1418 }
1419
AddDeserializedCode(int index,base::Vector<byte> instructions,int stack_slots,uint32_t tagged_parameter_slots,int safepoint_table_offset,int handler_table_offset,int constant_pool_offset,int code_comments_offset,int unpadded_binary_size,base::Vector<const byte> protected_instructions_data,base::Vector<const byte> reloc_info,base::Vector<const byte> source_position_table,WasmCode::Kind kind,ExecutionTier tier)1420 std::unique_ptr<WasmCode> NativeModule::AddDeserializedCode(
1421 int index, base::Vector<byte> instructions, int stack_slots,
1422 uint32_t tagged_parameter_slots, int safepoint_table_offset,
1423 int handler_table_offset, int constant_pool_offset,
1424 int code_comments_offset, int unpadded_binary_size,
1425 base::Vector<const byte> protected_instructions_data,
1426 base::Vector<const byte> reloc_info,
1427 base::Vector<const byte> source_position_table, WasmCode::Kind kind,
1428 ExecutionTier tier) {
1429 UpdateCodeSize(instructions.size(), tier, kNoDebugging);
1430
1431 return std::unique_ptr<WasmCode>{new WasmCode{
1432 this, index, instructions, stack_slots, tagged_parameter_slots,
1433 safepoint_table_offset, handler_table_offset, constant_pool_offset,
1434 code_comments_offset, unpadded_binary_size, protected_instructions_data,
1435 reloc_info, source_position_table, kind, tier, kNoDebugging}};
1436 }
1437
SnapshotCodeTable() const1438 std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
1439 base::RecursiveMutexGuard lock(&allocation_mutex_);
1440 WasmCode** start = code_table_.get();
1441 WasmCode** end = start + module_->num_declared_functions;
1442 for (WasmCode* code : base::VectorOf(start, end - start)) {
1443 if (code) WasmCodeRefScope::AddRef(code);
1444 }
1445 return std::vector<WasmCode*>{start, end};
1446 }
1447
SnapshotAllOwnedCode() const1448 std::vector<WasmCode*> NativeModule::SnapshotAllOwnedCode() const {
1449 base::RecursiveMutexGuard lock(&allocation_mutex_);
1450 if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
1451
1452 std::vector<WasmCode*> all_code(owned_code_.size());
1453 std::transform(owned_code_.begin(), owned_code_.end(), all_code.begin(),
1454 [](auto& entry) { return entry.second.get(); });
1455 std::for_each(all_code.begin(), all_code.end(), WasmCodeRefScope::AddRef);
1456 return all_code;
1457 }
1458
GetCode(uint32_t index) const1459 WasmCode* NativeModule::GetCode(uint32_t index) const {
1460 base::RecursiveMutexGuard guard(&allocation_mutex_);
1461 WasmCode* code = code_table_[declared_function_index(module(), index)];
1462 if (code) WasmCodeRefScope::AddRef(code);
1463 return code;
1464 }
1465
HasCode(uint32_t index) const1466 bool NativeModule::HasCode(uint32_t index) const {
1467 base::RecursiveMutexGuard guard(&allocation_mutex_);
1468 return code_table_[declared_function_index(module(), index)] != nullptr;
1469 }
1470
HasCodeWithTier(uint32_t index,ExecutionTier tier) const1471 bool NativeModule::HasCodeWithTier(uint32_t index, ExecutionTier tier) const {
1472 base::RecursiveMutexGuard guard(&allocation_mutex_);
1473 return code_table_[declared_function_index(module(), index)] != nullptr &&
1474 code_table_[declared_function_index(module(), index)]->tier() == tier;
1475 }
1476
SetWasmSourceMap(std::unique_ptr<WasmModuleSourceMap> source_map)1477 void NativeModule::SetWasmSourceMap(
1478 std::unique_ptr<WasmModuleSourceMap> source_map) {
1479 source_map_ = std::move(source_map);
1480 }
1481
GetWasmSourceMap() const1482 WasmModuleSourceMap* NativeModule::GetWasmSourceMap() const {
1483 return source_map_.get();
1484 }
1485
CreateEmptyJumpTableInRegionLocked(int jump_table_size,base::AddressRegion region)1486 WasmCode* NativeModule::CreateEmptyJumpTableInRegionLocked(
1487 int jump_table_size, base::AddressRegion region) {
1488 allocation_mutex_.AssertHeld();
1489 // Only call this if we really need a jump table.
1490 DCHECK_LT(0, jump_table_size);
1491 CodeSpaceWriteScope code_space_write_scope(this);
1492 base::Vector<uint8_t> code_space =
1493 code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
1494 DCHECK(!code_space.empty());
1495 UpdateCodeSize(jump_table_size, ExecutionTier::kNone, kNoDebugging);
1496 ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
1497 std::unique_ptr<WasmCode> code{
1498 new WasmCode{this, // native_module
1499 kAnonymousFuncIndex, // index
1500 code_space, // instructions
1501 0, // stack_slots
1502 0, // tagged_parameter_slots
1503 0, // safepoint_table_offset
1504 jump_table_size, // handler_table_offset
1505 jump_table_size, // constant_pool_offset
1506 jump_table_size, // code_comments_offset
1507 jump_table_size, // unpadded_binary_size
1508 {}, // protected_instructions
1509 {}, // reloc_info
1510 {}, // source_pos
1511 WasmCode::kJumpTable, // kind
1512 ExecutionTier::kNone, // tier
1513 kNoDebugging}}; // for_debugging
1514 return PublishCodeLocked(std::move(code));
1515 }
1516
UpdateCodeSize(size_t size,ExecutionTier tier,ForDebugging for_debugging)1517 void NativeModule::UpdateCodeSize(size_t size, ExecutionTier tier,
1518 ForDebugging for_debugging) {
1519 if (for_debugging != kNoDebugging) return;
1520 // Count jump tables (ExecutionTier::kNone) for both Liftoff and TurboFan as
1521 // this is shared code.
1522 if (tier != ExecutionTier::kTurbofan) liftoff_code_size_.fetch_add(size);
1523 if (tier != ExecutionTier::kLiftoff) turbofan_code_size_.fetch_add(size);
1524 }
1525
PatchJumpTablesLocked(uint32_t slot_index,Address target)1526 void NativeModule::PatchJumpTablesLocked(uint32_t slot_index, Address target) {
1527 allocation_mutex_.AssertHeld();
1528
1529 for (auto& code_space_data : code_space_data_) {
1530 DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
1531 if (!code_space_data.jump_table) continue;
1532 PatchJumpTableLocked(code_space_data, slot_index, target);
1533 }
1534 }
1535
PatchJumpTableLocked(const CodeSpaceData & code_space_data,uint32_t slot_index,Address target)1536 void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
1537 uint32_t slot_index, Address target) {
1538 allocation_mutex_.AssertHeld();
1539
1540 DCHECK_NOT_NULL(code_space_data.jump_table);
1541 DCHECK_NOT_NULL(code_space_data.far_jump_table);
1542
1543 // Jump tables are often allocated next to each other, so we can switch
1544 // permissions on both at the same time.
1545 if (code_space_data.jump_table->instructions().end() ==
1546 code_space_data.far_jump_table->instructions().begin()) {
1547 base::Vector<uint8_t> jump_tables_space = base::VectorOf(
1548 code_space_data.jump_table->instructions().begin(),
1549 code_space_data.jump_table->instructions().size() +
1550 code_space_data.far_jump_table->instructions().size());
1551 code_allocator_.MakeWritable(AddressRegionOf(jump_tables_space));
1552 } else {
1553 code_allocator_.MakeWritable(
1554 AddressRegionOf(code_space_data.jump_table->instructions()));
1555 code_allocator_.MakeWritable(
1556 AddressRegionOf(code_space_data.far_jump_table->instructions()));
1557 }
1558
1559 DCHECK_LT(slot_index, module_->num_declared_functions);
1560 Address jump_table_slot =
1561 code_space_data.jump_table->instruction_start() +
1562 JumpTableAssembler::JumpSlotIndexToOffset(slot_index);
1563 uint32_t far_jump_table_offset = JumpTableAssembler::FarJumpSlotIndexToOffset(
1564 WasmCode::kRuntimeStubCount + slot_index);
1565 // Only pass the far jump table start if the far jump table actually has a
1566 // slot for this function index (i.e. does not only contain runtime stubs).
1567 bool has_far_jump_slot =
1568 far_jump_table_offset <
1569 code_space_data.far_jump_table->instructions().size();
1570 Address far_jump_table_start =
1571 code_space_data.far_jump_table->instruction_start();
1572 Address far_jump_table_slot =
1573 has_far_jump_slot ? far_jump_table_start + far_jump_table_offset
1574 : kNullAddress;
1575 JumpTableAssembler::PatchJumpTableSlot(jump_table_slot, far_jump_table_slot,
1576 target);
1577 }
1578
AddCodeSpaceLocked(base::AddressRegion region)1579 void NativeModule::AddCodeSpaceLocked(base::AddressRegion region) {
1580 allocation_mutex_.AssertHeld();
1581
1582 // Each code space must be at least twice as large as the overhead per code
1583 // space. Otherwise, we are wasting too much memory.
1584 DCHECK_GE(region.size(),
1585 2 * OverheadPerCodeSpace(module()->num_declared_functions));
1586
1587 CodeSpaceWriteScope code_space_write_scope(this);
1588 #if defined(V8_OS_WIN64)
1589 // On some platforms, specifically Win64, we need to reserve some pages at
1590 // the beginning of an executable space.
1591 // See src/heap/spaces.cc, MemoryAllocator::InitializeCodePageAllocator() and
1592 // https://cs.chromium.org/chromium/src/components/crash/content/app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
1593 // for details.
1594 if (WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
1595 size_t size = Heap::GetCodeRangeReservedAreaSize();
1596 DCHECK_LT(0, size);
1597 base::Vector<byte> padding =
1598 code_allocator_.AllocateForCodeInRegion(this, size, region);
1599 CHECK_EQ(reinterpret_cast<Address>(padding.begin()), region.begin());
1600 win64_unwindinfo::RegisterNonABICompliantCodeRange(
1601 reinterpret_cast<void*>(region.begin()), region.size());
1602 }
1603 #endif // V8_OS_WIN64
1604
1605 WasmCodeRefScope code_ref_scope;
1606 WasmCode* jump_table = nullptr;
1607 WasmCode* far_jump_table = nullptr;
1608 const uint32_t num_wasm_functions = module_->num_declared_functions;
1609 const bool is_first_code_space = code_space_data_.empty();
1610 // We always need a far jump table, because it contains the runtime stubs.
1611 const bool needs_far_jump_table =
1612 !FindJumpTablesForRegionLocked(region).is_valid();
1613 const bool needs_jump_table = num_wasm_functions > 0 && needs_far_jump_table;
1614
1615 if (needs_jump_table) {
1616 jump_table = CreateEmptyJumpTableInRegionLocked(
1617 JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region);
1618 CHECK(region.contains(jump_table->instruction_start()));
1619 }
1620
1621 if (needs_far_jump_table) {
1622 int num_function_slots = NumWasmFunctionsInFarJumpTable(num_wasm_functions);
1623 far_jump_table = CreateEmptyJumpTableInRegionLocked(
1624 JumpTableAssembler::SizeForNumberOfFarJumpSlots(
1625 WasmCode::kRuntimeStubCount,
1626 NumWasmFunctionsInFarJumpTable(num_function_slots)),
1627 region);
1628 CHECK(region.contains(far_jump_table->instruction_start()));
1629 EmbeddedData embedded_data = EmbeddedData::FromBlob();
1630 #define RUNTIME_STUB(Name) Builtin::k##Name,
1631 #define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
1632 Builtin stub_names[WasmCode::kRuntimeStubCount] = {
1633 WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
1634 #undef RUNTIME_STUB
1635 #undef RUNTIME_STUB_TRAP
1636 STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
1637 Address builtin_addresses[WasmCode::kRuntimeStubCount];
1638 for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
1639 Builtin builtin = stub_names[i];
1640 builtin_addresses[i] = embedded_data.InstructionStartOfBuiltin(builtin);
1641 }
1642 JumpTableAssembler::GenerateFarJumpTable(
1643 far_jump_table->instruction_start(), builtin_addresses,
1644 WasmCode::kRuntimeStubCount, num_function_slots);
1645 }
1646
1647 if (is_first_code_space) {
1648 // This can be updated and accessed without locks, since the addition of the
1649 // first code space happens during initialization of the {NativeModule},
1650 // where no concurrent accesses are possible.
1651 main_jump_table_ = jump_table;
1652 main_far_jump_table_ = far_jump_table;
1653 }
1654
1655 code_space_data_.push_back(CodeSpaceData{region, jump_table, far_jump_table});
1656
1657 if (jump_table && !is_first_code_space) {
1658 // Patch the new jump table(s) with existing functions. If this is the first
1659 // code space, there cannot be any functions that have been compiled yet.
1660 const CodeSpaceData& new_code_space_data = code_space_data_.back();
1661 for (uint32_t slot_index = 0; slot_index < num_wasm_functions;
1662 ++slot_index) {
1663 if (code_table_[slot_index]) {
1664 PatchJumpTableLocked(new_code_space_data, slot_index,
1665 code_table_[slot_index]->instruction_start());
1666 } else if (lazy_compile_table_) {
1667 Address lazy_compile_target =
1668 lazy_compile_table_->instruction_start() +
1669 JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
1670 PatchJumpTableLocked(new_code_space_data, slot_index,
1671 lazy_compile_target);
1672 }
1673 }
1674 }
1675 }
1676
1677 namespace {
1678 class NativeModuleWireBytesStorage final : public WireBytesStorage {
1679 public:
NativeModuleWireBytesStorage(std::shared_ptr<base::OwnedVector<const uint8_t>> wire_bytes)1680 explicit NativeModuleWireBytesStorage(
1681 std::shared_ptr<base::OwnedVector<const uint8_t>> wire_bytes)
1682 : wire_bytes_(std::move(wire_bytes)) {}
1683
GetCode(WireBytesRef ref) const1684 base::Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
1685 return std::atomic_load(&wire_bytes_)
1686 ->as_vector()
1687 .SubVector(ref.offset(), ref.end_offset());
1688 }
1689
GetModuleBytes() const1690 base::Optional<ModuleWireBytes> GetModuleBytes() const final {
1691 return base::Optional<ModuleWireBytes>(
1692 std::atomic_load(&wire_bytes_)->as_vector());
1693 }
1694
1695 private:
1696 const std::shared_ptr<base::OwnedVector<const uint8_t>> wire_bytes_;
1697 };
1698 } // namespace
1699
SetWireBytes(base::OwnedVector<const uint8_t> wire_bytes)1700 void NativeModule::SetWireBytes(base::OwnedVector<const uint8_t> wire_bytes) {
1701 auto shared_wire_bytes =
1702 std::make_shared<base::OwnedVector<const uint8_t>>(std::move(wire_bytes));
1703 std::atomic_store(&wire_bytes_, shared_wire_bytes);
1704 if (!shared_wire_bytes->empty()) {
1705 compilation_state_->SetWireBytesStorage(
1706 std::make_shared<NativeModuleWireBytesStorage>(
1707 std::move(shared_wire_bytes)));
1708 }
1709 }
1710
UpdateCPUDuration(size_t cpu_duration,ExecutionTier tier)1711 void NativeModule::UpdateCPUDuration(size_t cpu_duration, ExecutionTier tier) {
1712 if (tier == WasmCompilationUnit::GetBaselineExecutionTier(this->module())) {
1713 if (!compilation_state_->baseline_compilation_finished()) {
1714 baseline_compilation_cpu_duration_.fetch_add(cpu_duration,
1715 std::memory_order_relaxed);
1716 }
1717 } else if (tier == ExecutionTier::kTurbofan) {
1718 if (!compilation_state_->top_tier_compilation_finished()) {
1719 tier_up_cpu_duration_.fetch_add(cpu_duration, std::memory_order_relaxed);
1720 }
1721 }
1722 }
1723
TransferNewOwnedCodeLocked() const1724 void NativeModule::TransferNewOwnedCodeLocked() const {
1725 allocation_mutex_.AssertHeld();
1726 DCHECK(!new_owned_code_.empty());
1727 // Sort the {new_owned_code_} vector reversed, such that the position of the
1728 // previously inserted element can be used as a hint for the next element. If
1729 // elements in {new_owned_code_} are adjacent, this will guarantee
1730 // constant-time insertion into the map.
1731 std::sort(new_owned_code_.begin(), new_owned_code_.end(),
1732 [](const std::unique_ptr<WasmCode>& a,
1733 const std::unique_ptr<WasmCode>& b) {
1734 return a->instruction_start() > b->instruction_start();
1735 });
1736 auto insertion_hint = owned_code_.end();
1737 for (auto& code : new_owned_code_) {
1738 DCHECK_EQ(0, owned_code_.count(code->instruction_start()));
1739 // Check plausibility of the insertion hint.
1740 DCHECK(insertion_hint == owned_code_.end() ||
1741 insertion_hint->first > code->instruction_start());
1742 insertion_hint = owned_code_.emplace_hint(
1743 insertion_hint, code->instruction_start(), std::move(code));
1744 }
1745 new_owned_code_.clear();
1746 }
1747
InsertToCodeCache(WasmCode * code)1748 void NativeModule::InsertToCodeCache(WasmCode* code) {
1749 allocation_mutex_.AssertHeld();
1750 DCHECK_NOT_NULL(cached_code_);
1751 if (code->IsAnonymous()) return;
1752 // Only cache Liftoff debugging code or TurboFan code (no breakpoints or
1753 // stepping).
1754 if (code->tier() == ExecutionTier::kLiftoff &&
1755 code->for_debugging() != kForDebugging) {
1756 return;
1757 }
1758 auto key = std::make_pair(code->tier(), code->index());
1759 if (cached_code_->insert(std::make_pair(key, code)).second) {
1760 code->IncRef();
1761 }
1762 }
1763
Lookup(Address pc) const1764 WasmCode* NativeModule::Lookup(Address pc) const {
1765 base::RecursiveMutexGuard lock(&allocation_mutex_);
1766 if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
1767 auto iter = owned_code_.upper_bound(pc);
1768 if (iter == owned_code_.begin()) return nullptr;
1769 --iter;
1770 WasmCode* candidate = iter->second.get();
1771 DCHECK_EQ(candidate->instruction_start(), iter->first);
1772 if (!candidate->contains(pc)) return nullptr;
1773 WasmCodeRefScope::AddRef(candidate);
1774 return candidate;
1775 }
1776
GetJumpTableOffset(uint32_t func_index) const1777 uint32_t NativeModule::GetJumpTableOffset(uint32_t func_index) const {
1778 uint32_t slot_idx = declared_function_index(module(), func_index);
1779 return JumpTableAssembler::JumpSlotIndexToOffset(slot_idx);
1780 }
1781
GetCallTargetForFunction(uint32_t func_index) const1782 Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
1783 // Return the jump table slot for that function index.
1784 DCHECK_NOT_NULL(main_jump_table_);
1785 uint32_t slot_offset = GetJumpTableOffset(func_index);
1786 DCHECK_LT(slot_offset, main_jump_table_->instructions().size());
1787 return main_jump_table_->instruction_start() + slot_offset;
1788 }
1789
FindJumpTablesForRegionLocked(base::AddressRegion code_region) const1790 NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegionLocked(
1791 base::AddressRegion code_region) const {
1792 allocation_mutex_.AssertHeld();
1793 auto jump_table_usable = [code_region](const WasmCode* jump_table) {
1794 Address table_start = jump_table->instruction_start();
1795 Address table_end = table_start + jump_table->instructions().size();
1796 // Compute the maximum distance from anywhere in the code region to anywhere
1797 // in the jump table, avoiding any underflow.
1798 size_t max_distance = std::max(
1799 code_region.end() > table_start ? code_region.end() - table_start : 0,
1800 table_end > code_region.begin() ? table_end - code_region.begin() : 0);
1801 // We can allow a max_distance that is equal to kMaxCodeSpaceSize, because
1802 // every call or jump will target an address *within* the region, but never
1803 // exactly the end of the region. So all occuring offsets are actually
1804 // smaller than max_distance.
1805 return max_distance <= WasmCodeAllocator::kMaxCodeSpaceSize;
1806 };
1807
1808 for (auto& code_space_data : code_space_data_) {
1809 DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
1810 if (!code_space_data.far_jump_table) continue;
1811 // Only return these jump tables if they are reachable from the whole
1812 // {code_region}.
1813 if (kNeedsFarJumpsBetweenCodeSpaces &&
1814 (!jump_table_usable(code_space_data.far_jump_table) ||
1815 (code_space_data.jump_table &&
1816 !jump_table_usable(code_space_data.jump_table)))) {
1817 continue;
1818 }
1819 return {code_space_data.jump_table
1820 ? code_space_data.jump_table->instruction_start()
1821 : kNullAddress,
1822 code_space_data.far_jump_table->instruction_start()};
1823 }
1824 return {};
1825 }
1826
GetNearCallTargetForFunction(uint32_t func_index,const JumpTablesRef & jump_tables) const1827 Address NativeModule::GetNearCallTargetForFunction(
1828 uint32_t func_index, const JumpTablesRef& jump_tables) const {
1829 DCHECK(jump_tables.is_valid());
1830 uint32_t slot_offset = GetJumpTableOffset(func_index);
1831 return jump_tables.jump_table_start + slot_offset;
1832 }
1833
GetNearRuntimeStubEntry(WasmCode::RuntimeStubId index,const JumpTablesRef & jump_tables) const1834 Address NativeModule::GetNearRuntimeStubEntry(
1835 WasmCode::RuntimeStubId index, const JumpTablesRef& jump_tables) const {
1836 DCHECK(jump_tables.is_valid());
1837 auto offset = JumpTableAssembler::FarJumpSlotIndexToOffset(index);
1838 return jump_tables.far_jump_table_start + offset;
1839 }
1840
GetFunctionIndexFromJumpTableSlot(Address slot_address) const1841 uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
1842 Address slot_address) const {
1843 WasmCodeRefScope code_refs;
1844 WasmCode* code = Lookup(slot_address);
1845 DCHECK_NOT_NULL(code);
1846 DCHECK_EQ(WasmCode::kJumpTable, code->kind());
1847 uint32_t slot_offset =
1848 static_cast<uint32_t>(slot_address - code->instruction_start());
1849 uint32_t slot_idx = JumpTableAssembler::SlotOffsetToIndex(slot_offset);
1850 DCHECK_LT(slot_idx, module_->num_declared_functions);
1851 DCHECK_EQ(slot_address,
1852 code->instruction_start() +
1853 JumpTableAssembler::JumpSlotIndexToOffset(slot_idx));
1854 return module_->num_imported_functions + slot_idx;
1855 }
1856
GetRuntimeStubId(Address target) const1857 WasmCode::RuntimeStubId NativeModule::GetRuntimeStubId(Address target) const {
1858 base::RecursiveMutexGuard guard(&allocation_mutex_);
1859
1860 for (auto& code_space_data : code_space_data_) {
1861 if (code_space_data.far_jump_table != nullptr &&
1862 code_space_data.far_jump_table->contains(target)) {
1863 uint32_t offset = static_cast<uint32_t>(
1864 target - code_space_data.far_jump_table->instruction_start());
1865 uint32_t index = JumpTableAssembler::FarJumpSlotOffsetToIndex(offset);
1866 if (index >= WasmCode::kRuntimeStubCount) continue;
1867 if (JumpTableAssembler::FarJumpSlotIndexToOffset(index) != offset) {
1868 continue;
1869 }
1870 return static_cast<WasmCode::RuntimeStubId>(index);
1871 }
1872 }
1873
1874 // Invalid address.
1875 return WasmCode::kRuntimeStubCount;
1876 }
1877
~NativeModule()1878 NativeModule::~NativeModule() {
1879 TRACE_HEAP("Deleting native module: %p\n", this);
1880 // Cancel all background compilation before resetting any field of the
1881 // NativeModule or freeing anything.
1882 compilation_state_->CancelCompilation();
1883 GetWasmEngine()->FreeNativeModule(this);
1884 // Free the import wrapper cache before releasing the {WasmCode} objects in
1885 // {owned_code_}. The destructor of {WasmImportWrapperCache} still needs to
1886 // decrease reference counts on the {WasmCode} objects.
1887 import_wrapper_cache_.reset();
1888 }
1889
WasmCodeManager()1890 WasmCodeManager::WasmCodeManager()
1891 : max_committed_code_space_(FLAG_wasm_max_code_space * MB),
1892 critical_committed_code_space_(max_committed_code_space_ / 2),
1893 memory_protection_key_(AllocateMemoryProtectionKey()) {}
1894
~WasmCodeManager()1895 WasmCodeManager::~WasmCodeManager() {
1896 // No more committed code space.
1897 DCHECK_EQ(0, total_committed_code_space_.load());
1898
1899 FreeMemoryProtectionKey(memory_protection_key_);
1900 }
1901
1902 #if defined(V8_OS_WIN64)
1903 // static
CanRegisterUnwindInfoForNonABICompliantCodeRange()1904 bool WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange() {
1905 return win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
1906 FLAG_win64_unwinding_info;
1907 }
1908 #endif // V8_OS_WIN64
1909
Commit(base::AddressRegion region)1910 void WasmCodeManager::Commit(base::AddressRegion region) {
1911 // TODO(v8:8462): Remove eager commit once perf supports remapping.
1912 if (FLAG_perf_prof) return;
1913 DCHECK(IsAligned(region.begin(), CommitPageSize()));
1914 DCHECK(IsAligned(region.size(), CommitPageSize()));
1915 // Reserve the size. Use CAS loop to avoid overflow on
1916 // {total_committed_code_space_}.
1917 size_t old_value = total_committed_code_space_.load();
1918 while (true) {
1919 DCHECK_GE(max_committed_code_space_, old_value);
1920 if (region.size() > max_committed_code_space_ - old_value) {
1921 V8::FatalProcessOutOfMemory(
1922 nullptr,
1923 "WasmCodeManager::Commit: Exceeding maximum wasm code space");
1924 UNREACHABLE();
1925 }
1926 if (total_committed_code_space_.compare_exchange_weak(
1927 old_value, old_value + region.size())) {
1928 break;
1929 }
1930 }
1931 // Even when we employ W^X with FLAG_wasm_write_protect_code_memory == true,
1932 // code pages need to be initially allocated with RWX permission because of
1933 // concurrent compilation/execution. For this reason there is no distinction
1934 // here based on FLAG_wasm_write_protect_code_memory.
1935 // TODO(dlehmann): This allocates initially as writable and executable, and
1936 // as such is not safe-by-default. In particular, if
1937 // {WasmCodeAllocator::SetWritable(false)} is never called afterwards (e.g.,
1938 // because no {CodeSpaceWriteScope} is created), the writable permission is
1939 // never withdrawn.
1940 // One potential fix is to allocate initially with kReadExecute only, which
1941 // forces all compilation threads to add the missing {CodeSpaceWriteScope}s
1942 // before modification; and/or adding DCHECKs that {CodeSpaceWriteScope} is
1943 // open when calling this method.
1944 PageAllocator::Permission permission = PageAllocator::kReadWriteExecute;
1945
1946 bool success;
1947 if (MemoryProtectionKeysEnabled()) {
1948 TRACE_HEAP(
1949 "Setting rwx permissions and memory protection key %d for 0x%" PRIxPTR
1950 ":0x%" PRIxPTR "\n",
1951 memory_protection_key_, region.begin(), region.end());
1952 success = SetPermissionsAndMemoryProtectionKey(
1953 GetPlatformPageAllocator(), region, permission, memory_protection_key_);
1954 } else {
1955 TRACE_HEAP("Setting rwx permissions for 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
1956 region.begin(), region.end());
1957 success = SetPermissions(GetPlatformPageAllocator(), region.begin(),
1958 region.size(), permission);
1959 }
1960
1961 if (V8_UNLIKELY(!success)) {
1962 V8::FatalProcessOutOfMemory(
1963 nullptr,
1964 "WasmCodeManager::Commit: Cannot make pre-reserved region writable");
1965 UNREACHABLE();
1966 }
1967 }
1968
Decommit(base::AddressRegion region)1969 void WasmCodeManager::Decommit(base::AddressRegion region) {
1970 // TODO(v8:8462): Remove this once perf supports remapping.
1971 if (FLAG_perf_prof) return;
1972 PageAllocator* allocator = GetPlatformPageAllocator();
1973 DCHECK(IsAligned(region.begin(), allocator->CommitPageSize()));
1974 DCHECK(IsAligned(region.size(), allocator->CommitPageSize()));
1975 size_t old_committed = total_committed_code_space_.fetch_sub(region.size());
1976 DCHECK_LE(region.size(), old_committed);
1977 USE(old_committed);
1978 TRACE_HEAP("Decommitting system pages 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
1979 region.begin(), region.end());
1980 CHECK(allocator->DecommitPages(reinterpret_cast<void*>(region.begin()),
1981 region.size()));
1982 }
1983
AssignRange(base::AddressRegion region,NativeModule * native_module)1984 void WasmCodeManager::AssignRange(base::AddressRegion region,
1985 NativeModule* native_module) {
1986 base::MutexGuard lock(&native_modules_mutex_);
1987 lookup_map_.insert(std::make_pair(
1988 region.begin(), std::make_pair(region.end(), native_module)));
1989 }
1990
TryAllocate(size_t size,void * hint)1991 VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
1992 v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
1993 DCHECK_GT(size, 0);
1994 size_t allocate_page_size = page_allocator->AllocatePageSize();
1995 size = RoundUp(size, allocate_page_size);
1996 if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
1997
1998 // When we start exposing Wasm in jitless mode, then the jitless flag
1999 // will have to determine whether we set kMapAsJittable or not.
2000 DCHECK(!FLAG_jitless);
2001 VirtualMemory mem(page_allocator, size, hint, allocate_page_size,
2002 VirtualMemory::kMapAsJittable);
2003 if (!mem.IsReserved()) return {};
2004 TRACE_HEAP("VMem alloc: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n", mem.address(),
2005 mem.end(), mem.size());
2006
2007 // TODO(v8:8462): Remove eager commit once perf supports remapping.
2008 if (FLAG_perf_prof) {
2009 SetPermissions(GetPlatformPageAllocator(), mem.address(), mem.size(),
2010 PageAllocator::kReadWriteExecute);
2011 }
2012 return mem;
2013 }
2014
2015 namespace {
2016 // The numbers here are rough estimates, used to calculate the size of the
2017 // initial code reservation and for estimating the amount of external memory
2018 // reported to the GC.
2019 // They do not need to be accurate. Choosing them too small will result in
2020 // separate code spaces being allocated (compile time and runtime overhead),
2021 // choosing them too large results in over-reservation (virtual address space
2022 // only).
2023 // In doubt, choose the numbers slightly too large, because over-reservation is
2024 // less critical than multiple separate code spaces (especially on 64-bit).
2025 // Numbers can be determined by running benchmarks with
2026 // --trace-wasm-compilation-times, and piping the output through
2027 // tools/wasm/code-size-factors.py.
2028 #if V8_TARGET_ARCH_X64
2029 constexpr size_t kTurbofanFunctionOverhead = 24;
2030 constexpr size_t kTurbofanCodeSizeMultiplier = 3;
2031 constexpr size_t kLiftoffFunctionOverhead = 56;
2032 constexpr size_t kLiftoffCodeSizeMultiplier = 4;
2033 constexpr size_t kImportSize = 640;
2034 #elif V8_TARGET_ARCH_IA32
2035 constexpr size_t kTurbofanFunctionOverhead = 20;
2036 constexpr size_t kTurbofanCodeSizeMultiplier = 4;
2037 constexpr size_t kLiftoffFunctionOverhead = 48;
2038 constexpr size_t kLiftoffCodeSizeMultiplier = 5;
2039 constexpr size_t kImportSize = 320;
2040 #elif V8_TARGET_ARCH_ARM
2041 constexpr size_t kTurbofanFunctionOverhead = 44;
2042 constexpr size_t kTurbofanCodeSizeMultiplier = 4;
2043 constexpr size_t kLiftoffFunctionOverhead = 96;
2044 constexpr size_t kLiftoffCodeSizeMultiplier = 5;
2045 constexpr size_t kImportSize = 550;
2046 #elif V8_TARGET_ARCH_ARM64
2047 constexpr size_t kTurbofanFunctionOverhead = 40;
2048 constexpr size_t kTurbofanCodeSizeMultiplier = 3;
2049 constexpr size_t kLiftoffFunctionOverhead = 68;
2050 constexpr size_t kLiftoffCodeSizeMultiplier = 4;
2051 constexpr size_t kImportSize = 750;
2052 #else
2053 // Other platforms should add their own estimates for best performance. Numbers
2054 // below are the maximum of other architectures.
2055 constexpr size_t kTurbofanFunctionOverhead = 44;
2056 constexpr size_t kTurbofanCodeSizeMultiplier = 4;
2057 constexpr size_t kLiftoffFunctionOverhead = 96;
2058 constexpr size_t kLiftoffCodeSizeMultiplier = 5;
2059 constexpr size_t kImportSize = 750;
2060 #endif
2061 } // namespace
2062
2063 // static
EstimateLiftoffCodeSize(int body_size)2064 size_t WasmCodeManager::EstimateLiftoffCodeSize(int body_size) {
2065 return kLiftoffFunctionOverhead + kCodeAlignment / 2 +
2066 body_size * kLiftoffCodeSizeMultiplier;
2067 }
2068
2069 // static
EstimateNativeModuleCodeSize(const WasmModule * module,bool include_liftoff,DynamicTiering dynamic_tiering)2070 size_t WasmCodeManager::EstimateNativeModuleCodeSize(
2071 const WasmModule* module, bool include_liftoff,
2072 DynamicTiering dynamic_tiering) {
2073 int num_functions = static_cast<int>(module->num_declared_functions);
2074 int num_imported_functions = static_cast<int>(module->num_imported_functions);
2075 int code_section_length = 0;
2076 if (num_functions > 0) {
2077 DCHECK_EQ(module->functions.size(), num_imported_functions + num_functions);
2078 auto* first_fn = &module->functions[module->num_imported_functions];
2079 auto* last_fn = &module->functions.back();
2080 code_section_length =
2081 static_cast<int>(last_fn->code.end_offset() - first_fn->code.offset());
2082 }
2083 return EstimateNativeModuleCodeSize(num_functions, num_imported_functions,
2084 code_section_length, include_liftoff,
2085 dynamic_tiering);
2086 }
2087
2088 // static
EstimateNativeModuleCodeSize(int num_functions,int num_imported_functions,int code_section_length,bool include_liftoff,DynamicTiering dynamic_tiering)2089 size_t WasmCodeManager::EstimateNativeModuleCodeSize(
2090 int num_functions, int num_imported_functions, int code_section_length,
2091 bool include_liftoff, DynamicTiering dynamic_tiering) {
2092 // Note that the size for jump tables is added later, in {ReservationSize} /
2093 // {OverheadPerCodeSpace}.
2094
2095 const size_t size_of_imports = kImportSize * num_imported_functions;
2096
2097 const size_t overhead_per_function_turbofan =
2098 kTurbofanFunctionOverhead + kCodeAlignment / 2;
2099 size_t size_of_turbofan = overhead_per_function_turbofan * num_functions +
2100 kTurbofanCodeSizeMultiplier * code_section_length;
2101
2102 const size_t overhead_per_function_liftoff =
2103 kLiftoffFunctionOverhead + kCodeAlignment / 2;
2104 size_t size_of_liftoff = overhead_per_function_liftoff * num_functions +
2105 kLiftoffCodeSizeMultiplier * code_section_length;
2106
2107 if (!include_liftoff) {
2108 size_of_liftoff = 0;
2109 }
2110 // With dynamic tiering we don't expect to compile more than 25% with
2111 // TurboFan. If there is no liftoff though then all code will get generated
2112 // by TurboFan.
2113 if (include_liftoff && dynamic_tiering == DynamicTiering::kEnabled) {
2114 size_of_turbofan /= 4;
2115 }
2116
2117 return size_of_imports + size_of_liftoff + size_of_turbofan;
2118 }
2119
2120 // static
EstimateNativeModuleMetaDataSize(const WasmModule * module)2121 size_t WasmCodeManager::EstimateNativeModuleMetaDataSize(
2122 const WasmModule* module) {
2123 size_t wasm_module_estimate = EstimateStoredSize(module);
2124
2125 uint32_t num_wasm_functions = module->num_declared_functions;
2126
2127 // TODO(wasm): Include wire bytes size.
2128 size_t native_module_estimate =
2129 sizeof(NativeModule) + // NativeModule struct
2130 (sizeof(WasmCode*) * num_wasm_functions) + // code table size
2131 (sizeof(WasmCode) * num_wasm_functions); // code object size
2132
2133 size_t jump_table_size = RoundUp<kCodeAlignment>(
2134 JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
2135 size_t far_jump_table_size =
2136 RoundUp<kCodeAlignment>(JumpTableAssembler::SizeForNumberOfFarJumpSlots(
2137 WasmCode::kRuntimeStubCount,
2138 NumWasmFunctionsInFarJumpTable(num_wasm_functions)));
2139
2140 return wasm_module_estimate + native_module_estimate + jump_table_size +
2141 far_jump_table_size;
2142 }
2143
SetThreadWritable(bool writable)2144 void WasmCodeManager::SetThreadWritable(bool writable) {
2145 DCHECK(MemoryProtectionKeysEnabled());
2146
2147 MemoryProtectionKeyPermission permissions =
2148 writable ? kNoRestrictions : kDisableWrite;
2149
2150 // When switching to writable we should not already be writable. Otherwise
2151 // this points at a problem with counting writers, or with wrong
2152 // initialization (globally or per thread).
2153 DCHECK_IMPLIES(writable, !MemoryProtectionKeyWritable());
2154
2155 TRACE_HEAP("Setting memory protection key %d to writable: %d.\n",
2156 memory_protection_key_, writable);
2157 SetPermissionsForMemoryProtectionKey(memory_protection_key_, permissions);
2158 }
2159
HasMemoryProtectionKeySupport() const2160 bool WasmCodeManager::HasMemoryProtectionKeySupport() const {
2161 return memory_protection_key_ != kNoMemoryProtectionKey;
2162 }
2163
MemoryProtectionKeysEnabled() const2164 bool WasmCodeManager::MemoryProtectionKeysEnabled() const {
2165 return HasMemoryProtectionKeySupport() && FLAG_wasm_memory_protection_keys;
2166 }
2167
MemoryProtectionKeyWritable() const2168 bool WasmCodeManager::MemoryProtectionKeyWritable() const {
2169 return GetMemoryProtectionKeyPermission(memory_protection_key_) ==
2170 MemoryProtectionKeyPermission::kNoRestrictions;
2171 }
2172
InitializeMemoryProtectionKeyPermissionsIfSupported() const2173 void WasmCodeManager::InitializeMemoryProtectionKeyPermissionsIfSupported()
2174 const {
2175 if (!HasMemoryProtectionKeySupport()) return;
2176 // The default permission is {kDisableAccess}. Switch from that to
2177 // {kDisableWrite}. Leave other permissions untouched, as the thread did
2178 // already use the memory protection key in that case.
2179 if (GetMemoryProtectionKeyPermission(memory_protection_key_) ==
2180 kDisableAccess) {
2181 SetPermissionsForMemoryProtectionKey(memory_protection_key_, kDisableWrite);
2182 }
2183 }
2184
NewNativeModule(Isolate * isolate,const WasmFeatures & enabled,size_t code_size_estimate,std::shared_ptr<const WasmModule> module)2185 std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
2186 Isolate* isolate, const WasmFeatures& enabled, size_t code_size_estimate,
2187 std::shared_ptr<const WasmModule> module) {
2188 if (total_committed_code_space_.load() >
2189 critical_committed_code_space_.load()) {
2190 (reinterpret_cast<v8::Isolate*>(isolate))
2191 ->MemoryPressureNotification(MemoryPressureLevel::kCritical);
2192 size_t committed = total_committed_code_space_.load();
2193 DCHECK_GE(max_committed_code_space_, committed);
2194 critical_committed_code_space_.store(
2195 committed + (max_committed_code_space_ - committed) / 2);
2196 }
2197
2198 size_t code_vmem_size =
2199 ReservationSize(code_size_estimate, module->num_declared_functions, 0);
2200
2201 // The '--wasm-max-initial-code-space-reservation' testing flag can be used to
2202 // reduce the maximum size of the initial code space reservation (in MB).
2203 if (FLAG_wasm_max_initial_code_space_reservation > 0) {
2204 size_t flag_max_bytes =
2205 static_cast<size_t>(FLAG_wasm_max_initial_code_space_reservation) * MB;
2206 if (flag_max_bytes < code_vmem_size) code_vmem_size = flag_max_bytes;
2207 }
2208
2209 // Try up to two times; getting rid of dead JSArrayBuffer allocations might
2210 // require two GCs because the first GC maybe incremental and may have
2211 // floating garbage.
2212 static constexpr int kAllocationRetries = 2;
2213 VirtualMemory code_space;
2214 for (int retries = 0;; ++retries) {
2215 code_space = TryAllocate(code_vmem_size);
2216 if (code_space.IsReserved()) break;
2217 if (retries == kAllocationRetries) {
2218 constexpr auto format = base::StaticCharVector(
2219 "NewNativeModule cannot allocate code space of %zu bytes");
2220 constexpr int kMaxMessageLength =
2221 format.size() - 3 + std::numeric_limits<size_t>::digits10;
2222 base::EmbeddedVector<char, kMaxMessageLength + 1> message;
2223 SNPrintF(message, format.begin(), code_vmem_size);
2224 V8::FatalProcessOutOfMemory(isolate, message.begin());
2225 UNREACHABLE();
2226 }
2227 // Run one GC, then try the allocation again.
2228 isolate->heap()->MemoryPressureNotification(MemoryPressureLevel::kCritical,
2229 true);
2230 }
2231
2232 Address start = code_space.address();
2233 size_t size = code_space.size();
2234 Address end = code_space.end();
2235 std::shared_ptr<NativeModule> ret;
2236 DynamicTiering dynamic_tiering = isolate->IsWasmDynamicTieringEnabled()
2237 ? DynamicTiering::kEnabled
2238 : DynamicTiering::kDisabled;
2239 new NativeModule(enabled, dynamic_tiering, std::move(code_space),
2240 std::move(module), isolate->async_counters(), &ret);
2241 // The constructor initialized the shared_ptr.
2242 DCHECK_NOT_NULL(ret);
2243 TRACE_HEAP("New NativeModule %p: Mem: 0x%" PRIxPTR ",+%zu\n", ret.get(),
2244 start, size);
2245
2246 base::MutexGuard lock(&native_modules_mutex_);
2247 lookup_map_.insert(std::make_pair(start, std::make_pair(end, ret.get())));
2248 return ret;
2249 }
2250
SampleCodeSize(Counters * counters,NativeModule::CodeSamplingTime sampling_time) const2251 void NativeModule::SampleCodeSize(
2252 Counters* counters, NativeModule::CodeSamplingTime sampling_time) const {
2253 size_t code_size = sampling_time == kSampling
2254 ? code_allocator_.committed_code_space()
2255 : code_allocator_.generated_code_size();
2256 int code_size_mb = static_cast<int>(code_size / MB);
2257 Histogram* histogram = nullptr;
2258 switch (sampling_time) {
2259 case kAfterBaseline:
2260 histogram = counters->wasm_module_code_size_mb_after_baseline();
2261 break;
2262 case kAfterTopTier:
2263 histogram = counters->wasm_module_code_size_mb_after_top_tier();
2264 break;
2265 case kSampling: {
2266 histogram = counters->wasm_module_code_size_mb();
2267 // If this is a wasm module of >= 2MB, also sample the freed code size,
2268 // absolute and relative. Code GC does not happen on asm.js modules, and
2269 // small modules will never trigger GC anyway.
2270 size_t generated_size = code_allocator_.generated_code_size();
2271 if (generated_size >= 2 * MB && module()->origin == kWasmOrigin) {
2272 size_t freed_size = code_allocator_.freed_code_size();
2273 DCHECK_LE(freed_size, generated_size);
2274 int freed_percent = static_cast<int>(100 * freed_size / generated_size);
2275 counters->wasm_module_freed_code_size_percent()->AddSample(
2276 freed_percent);
2277 }
2278 break;
2279 }
2280 }
2281 histogram->AddSample(code_size_mb);
2282 }
2283
AddCompiledCode(WasmCompilationResult result)2284 std::unique_ptr<WasmCode> NativeModule::AddCompiledCode(
2285 WasmCompilationResult result) {
2286 std::vector<std::unique_ptr<WasmCode>> code = AddCompiledCode({&result, 1});
2287 return std::move(code[0]);
2288 }
2289
AddCompiledCode(base::Vector<WasmCompilationResult> results)2290 std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
2291 base::Vector<WasmCompilationResult> results) {
2292 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
2293 "wasm.AddCompiledCode", "num", results.size());
2294 DCHECK(!results.empty());
2295 // First, allocate code space for all the results.
2296 size_t total_code_space = 0;
2297 for (auto& result : results) {
2298 DCHECK(result.succeeded());
2299 total_code_space += RoundUp<kCodeAlignment>(result.code_desc.instr_size);
2300 if (result.result_tier == ExecutionTier::kLiftoff) {
2301 int index = result.func_index;
2302 int* slots = &module()->functions[index].feedback_slots;
2303 #if DEBUG
2304 int current_value = base::Relaxed_Load(slots);
2305 DCHECK(current_value == 0 ||
2306 current_value == result.feedback_vector_slots);
2307 #endif
2308 base::Relaxed_Store(slots, result.feedback_vector_slots);
2309 }
2310 }
2311 base::Vector<byte> code_space;
2312 NativeModule::JumpTablesRef jump_tables;
2313 CodeSpaceWriteScope code_space_write_scope(this);
2314 {
2315 base::RecursiveMutexGuard guard{&allocation_mutex_};
2316 code_space = code_allocator_.AllocateForCode(this, total_code_space);
2317 // Lookup the jump tables to use once, then use for all code objects.
2318 jump_tables =
2319 FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
2320 }
2321 // If we happen to have a {total_code_space} which is bigger than
2322 // {kMaxCodeSpaceSize}, we would not find valid jump tables for the whole
2323 // region. If this ever happens, we need to handle this case (by splitting the
2324 // {results} vector in smaller chunks).
2325 CHECK(jump_tables.is_valid());
2326
2327 std::vector<std::unique_ptr<WasmCode>> generated_code;
2328 generated_code.reserve(results.size());
2329
2330 // Now copy the generated code into the code space and relocate it.
2331 for (auto& result : results) {
2332 DCHECK_EQ(result.code_desc.buffer, result.instr_buffer->start());
2333 size_t code_size = RoundUp<kCodeAlignment>(result.code_desc.instr_size);
2334 base::Vector<byte> this_code_space = code_space.SubVector(0, code_size);
2335 code_space += code_size;
2336 generated_code.emplace_back(AddCodeWithCodeSpace(
2337 result.func_index, result.code_desc, result.frame_slot_count,
2338 result.tagged_parameter_slots,
2339 result.protected_instructions_data.as_vector(),
2340 result.source_positions.as_vector(), GetCodeKind(result),
2341 result.result_tier, result.for_debugging, this_code_space,
2342 jump_tables));
2343 }
2344 DCHECK_EQ(0, code_space.size());
2345
2346 return generated_code;
2347 }
2348
SetTieringState(TieringState new_tiering_state)2349 void NativeModule::SetTieringState(TieringState new_tiering_state) {
2350 // Do not tier down asm.js (just never change the tiering state).
2351 if (module()->origin != kWasmOrigin) return;
2352
2353 base::RecursiveMutexGuard lock(&allocation_mutex_);
2354 tiering_state_ = new_tiering_state;
2355 }
2356
IsTieredDown()2357 bool NativeModule::IsTieredDown() {
2358 base::RecursiveMutexGuard lock(&allocation_mutex_);
2359 return tiering_state_ == kTieredDown;
2360 }
2361
RecompileForTiering()2362 void NativeModule::RecompileForTiering() {
2363 // If baseline compilation is not finished yet, we do not tier down now. This
2364 // would be tricky because not all code is guaranteed to be available yet.
2365 // Instead, we tier down after streaming compilation finished.
2366 if (!compilation_state_->baseline_compilation_finished()) return;
2367
2368 // Read the tiering state under the lock, then trigger recompilation after
2369 // releasing the lock. If the tiering state was changed when the triggered
2370 // compilation units finish, code installation will handle that correctly.
2371 TieringState current_state;
2372 {
2373 base::RecursiveMutexGuard lock(&allocation_mutex_);
2374 current_state = tiering_state_;
2375
2376 // Initialize {cached_code_} to signal that this cache should get filled
2377 // from now on.
2378 if (!cached_code_) {
2379 cached_code_ = std::make_unique<
2380 std::map<std::pair<ExecutionTier, int>, WasmCode*>>();
2381 // Fill with existing code.
2382 for (auto& code_entry : owned_code_) {
2383 InsertToCodeCache(code_entry.second.get());
2384 }
2385 }
2386 }
2387 RecompileNativeModule(this, current_state);
2388 }
2389
FindFunctionsToRecompile(TieringState new_tiering_state)2390 std::vector<int> NativeModule::FindFunctionsToRecompile(
2391 TieringState new_tiering_state) {
2392 WasmCodeRefScope code_ref_scope;
2393 base::RecursiveMutexGuard guard(&allocation_mutex_);
2394 // Get writable permission already here (and not inside the loop in
2395 // {PatchJumpTablesLocked}), to avoid switching for each slot individually.
2396 CodeSpaceWriteScope code_space_write_scope(this);
2397 std::vector<int> function_indexes;
2398 int imported = module()->num_imported_functions;
2399 int declared = module()->num_declared_functions;
2400 const bool tier_down = new_tiering_state == kTieredDown;
2401 for (int slot_index = 0; slot_index < declared; ++slot_index) {
2402 int function_index = imported + slot_index;
2403 WasmCode* old_code = code_table_[slot_index];
2404 bool code_is_good =
2405 tier_down ? old_code && old_code->for_debugging()
2406 : old_code && old_code->tier() == ExecutionTier::kTurbofan;
2407 if (code_is_good) continue;
2408 DCHECK_NOT_NULL(cached_code_);
2409 auto cache_it = cached_code_->find(std::make_pair(
2410 tier_down ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan,
2411 function_index));
2412 if (cache_it != cached_code_->end()) {
2413 WasmCode* cached_code = cache_it->second;
2414 if (old_code) {
2415 WasmCodeRefScope::AddRef(old_code);
2416 // The code is added to the current {WasmCodeRefScope}, hence the ref
2417 // count cannot drop to zero here.
2418 old_code->DecRefOnLiveCode();
2419 }
2420 code_table_[slot_index] = cached_code;
2421 PatchJumpTablesLocked(slot_index, cached_code->instruction_start());
2422 cached_code->IncRef();
2423 continue;
2424 }
2425 // Otherwise add the function to the set of functions to recompile.
2426 function_indexes.push_back(function_index);
2427 }
2428 return function_indexes;
2429 }
2430
FreeCode(base::Vector<WasmCode * const> codes)2431 void NativeModule::FreeCode(base::Vector<WasmCode* const> codes) {
2432 base::RecursiveMutexGuard guard(&allocation_mutex_);
2433 // Free the code space.
2434 code_allocator_.FreeCode(codes);
2435
2436 if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
2437 DebugInfo* debug_info = debug_info_.get();
2438 // Free the {WasmCode} objects. This will also unregister trap handler data.
2439 for (WasmCode* code : codes) {
2440 DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
2441 owned_code_.erase(code->instruction_start());
2442 }
2443 // Remove debug side tables for all removed code objects, after releasing our
2444 // lock. This is to avoid lock order inversion.
2445 if (debug_info) debug_info->RemoveDebugSideTables(codes);
2446 }
2447
GetNumberOfCodeSpacesForTesting() const2448 size_t NativeModule::GetNumberOfCodeSpacesForTesting() const {
2449 base::RecursiveMutexGuard guard{&allocation_mutex_};
2450 return code_allocator_.GetNumCodeSpaces();
2451 }
2452
HasDebugInfo() const2453 bool NativeModule::HasDebugInfo() const {
2454 base::RecursiveMutexGuard guard(&allocation_mutex_);
2455 return debug_info_ != nullptr;
2456 }
2457
GetDebugInfo()2458 DebugInfo* NativeModule::GetDebugInfo() {
2459 base::RecursiveMutexGuard guard(&allocation_mutex_);
2460 if (!debug_info_) debug_info_ = std::make_unique<DebugInfo>(this);
2461 return debug_info_.get();
2462 }
2463
FreeNativeModule(base::Vector<VirtualMemory> owned_code_space,size_t committed_size)2464 void WasmCodeManager::FreeNativeModule(
2465 base::Vector<VirtualMemory> owned_code_space, size_t committed_size) {
2466 base::MutexGuard lock(&native_modules_mutex_);
2467 for (auto& code_space : owned_code_space) {
2468 DCHECK(code_space.IsReserved());
2469 TRACE_HEAP("VMem Release: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n",
2470 code_space.address(), code_space.end(), code_space.size());
2471
2472 #if defined(V8_OS_WIN64)
2473 if (CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
2474 win64_unwindinfo::UnregisterNonABICompliantCodeRange(
2475 reinterpret_cast<void*>(code_space.address()));
2476 }
2477 #endif // V8_OS_WIN64
2478
2479 lookup_map_.erase(code_space.address());
2480 code_space.Free();
2481 DCHECK(!code_space.IsReserved());
2482 }
2483
2484 DCHECK(IsAligned(committed_size, CommitPageSize()));
2485 // TODO(v8:8462): Remove this once perf supports remapping.
2486 if (!FLAG_perf_prof) {
2487 size_t old_committed =
2488 total_committed_code_space_.fetch_sub(committed_size);
2489 DCHECK_LE(committed_size, old_committed);
2490 USE(old_committed);
2491 }
2492 }
2493
LookupNativeModule(Address pc) const2494 NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
2495 base::MutexGuard lock(&native_modules_mutex_);
2496 if (lookup_map_.empty()) return nullptr;
2497
2498 auto iter = lookup_map_.upper_bound(pc);
2499 if (iter == lookup_map_.begin()) return nullptr;
2500 --iter;
2501 Address region_start = iter->first;
2502 Address region_end = iter->second.first;
2503 NativeModule* candidate = iter->second.second;
2504
2505 DCHECK_NOT_NULL(candidate);
2506 return region_start <= pc && pc < region_end ? candidate : nullptr;
2507 }
2508
LookupCode(Address pc) const2509 WasmCode* WasmCodeManager::LookupCode(Address pc) const {
2510 NativeModule* candidate = LookupNativeModule(pc);
2511 return candidate ? candidate->Lookup(pc) : nullptr;
2512 }
2513
2514 namespace {
2515 thread_local WasmCodeRefScope* current_code_refs_scope = nullptr;
2516 } // namespace
2517
WasmCodeRefScope()2518 WasmCodeRefScope::WasmCodeRefScope()
2519 : previous_scope_(current_code_refs_scope) {
2520 current_code_refs_scope = this;
2521 }
2522
~WasmCodeRefScope()2523 WasmCodeRefScope::~WasmCodeRefScope() {
2524 DCHECK_EQ(this, current_code_refs_scope);
2525 current_code_refs_scope = previous_scope_;
2526 WasmCode::DecrementRefCount(base::VectorOf(code_ptrs_));
2527 }
2528
2529 // static
AddRef(WasmCode * code)2530 void WasmCodeRefScope::AddRef(WasmCode* code) {
2531 DCHECK_NOT_NULL(code);
2532 WasmCodeRefScope* current_scope = current_code_refs_scope;
2533 DCHECK_NOT_NULL(current_scope);
2534 current_scope->code_ptrs_.push_back(code);
2535 code->IncRef();
2536 }
2537
RuntimeStubIdToBuiltinName(WasmCode::RuntimeStubId stub_id)2538 Builtin RuntimeStubIdToBuiltinName(WasmCode::RuntimeStubId stub_id) {
2539 #define RUNTIME_STUB_NAME(Name) Builtin::k##Name,
2540 #define RUNTIME_STUB_NAME_TRAP(Name) Builtin::kThrowWasm##Name,
2541 constexpr Builtin builtin_names[] = {
2542 WASM_RUNTIME_STUB_LIST(RUNTIME_STUB_NAME, RUNTIME_STUB_NAME_TRAP)};
2543 #undef RUNTIME_STUB_NAME
2544 #undef RUNTIME_STUB_NAME_TRAP
2545 STATIC_ASSERT(arraysize(builtin_names) == WasmCode::kRuntimeStubCount);
2546
2547 DCHECK_GT(arraysize(builtin_names), stub_id);
2548 return builtin_names[stub_id];
2549 }
2550
GetRuntimeStubName(WasmCode::RuntimeStubId stub_id)2551 const char* GetRuntimeStubName(WasmCode::RuntimeStubId stub_id) {
2552 #define RUNTIME_STUB_NAME(Name) #Name,
2553 #define RUNTIME_STUB_NAME_TRAP(Name) "ThrowWasm" #Name,
2554 constexpr const char* runtime_stub_names[] = {WASM_RUNTIME_STUB_LIST(
2555 RUNTIME_STUB_NAME, RUNTIME_STUB_NAME_TRAP) "<unknown>"};
2556 #undef RUNTIME_STUB_NAME
2557 #undef RUNTIME_STUB_NAME_TRAP
2558 STATIC_ASSERT(arraysize(runtime_stub_names) ==
2559 WasmCode::kRuntimeStubCount + 1);
2560
2561 DCHECK_GT(arraysize(runtime_stub_names), stub_id);
2562 return runtime_stub_names[stub_id];
2563 }
2564
2565 } // namespace wasm
2566 } // namespace internal
2567 } // namespace v8
2568 #undef TRACE_HEAP
2569