// Copyright 2015 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include #include "src/assembler-inl.h" #include "src/base/adapters.h" #include "src/base/atomic-utils.h" #include "src/code-stubs.h" #include "src/compiler/wasm-compiler.h" #include "src/debug/interface-types.h" #include "src/objects.h" #include "src/property-descriptor.h" #include "src/simulator.h" #include "src/snapshot/snapshot.h" #include "src/v8.h" #include "src/asmjs/asm-wasm-builder.h" #include "src/wasm/function-body-decoder.h" #include "src/wasm/module-decoder.h" #include "src/wasm/wasm-code-specialization.h" #include "src/wasm/wasm-js.h" #include "src/wasm/wasm-limits.h" #include "src/wasm/wasm-module.h" #include "src/wasm/wasm-objects.h" #include "src/wasm/wasm-result.h" using namespace v8::internal; using namespace v8::internal::wasm; namespace base = v8::base; #define TRACE(...) \ do { \ if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \ } while (false) #define TRACE_CHAIN(instance) \ do { \ instance->PrintInstancesChain(); \ } while (false) namespace { static const int kInvalidSigIndex = -1; byte* raw_buffer_ptr(MaybeHandle buffer, int offset) { return static_cast(buffer.ToHandleChecked()->backing_store()) + offset; } static void MemoryFinalizer(const v8::WeakCallbackInfo& data) { DisallowHeapAllocation no_gc; JSArrayBuffer** p = reinterpret_cast(data.GetParameter()); JSArrayBuffer* buffer = *p; if (!buffer->was_neutered()) { void* memory = buffer->backing_store(); DCHECK(memory != nullptr); base::OS::Free(memory, RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize())); data.GetIsolate()->AdjustAmountOfExternalAllocatedMemory( -buffer->byte_length()->Number()); } GlobalHandles::Destroy(reinterpret_cast(p)); } #if V8_TARGET_ARCH_64_BIT const bool kGuardRegionsSupported = true; #else const bool kGuardRegionsSupported = false; #endif bool EnableGuardRegions() { return FLAG_wasm_guard_pages && kGuardRegionsSupported; } static void RecordStats(Isolate* isolate, Code* code) { isolate->counters()->wasm_generated_code_size()->Increment(code->body_size()); isolate->counters()->wasm_reloc_size()->Increment( code->relocation_info()->length()); } static void RecordStats(Isolate* isolate, Handle functions) { DisallowHeapAllocation no_gc; for (int i = 0; i < functions->length(); ++i) { RecordStats(isolate, Code::cast(functions->get(i))); } } void* TryAllocateBackingStore(Isolate* isolate, size_t size, bool enable_guard_regions, bool& is_external) { is_external = false; // TODO(eholk): Right now enable_guard_regions has no effect on 32-bit // systems. It may be safer to fail instead, given that other code might do // things that would be unsafe if they expected guard pages where there // weren't any. if (enable_guard_regions && kGuardRegionsSupported) { // TODO(eholk): On Windows we want to make sure we don't commit the guard // pages yet. // We always allocate the largest possible offset into the heap, so the // addressable memory after the guard page can be made inaccessible. const size_t alloc_size = RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize()); DCHECK_EQ(0, size % base::OS::CommitPageSize()); // AllocateGuarded makes the whole region inaccessible by default. void* memory = base::OS::AllocateGuarded(alloc_size); if (memory == nullptr) { return nullptr; } // Make the part we care about accessible. base::OS::Unprotect(memory, size); reinterpret_cast(isolate) ->AdjustAmountOfExternalAllocatedMemory(size); is_external = true; return memory; } else { void* memory = isolate->array_buffer_allocator()->Allocate(size); return memory; } } void FlushICache(Isolate* isolate, Handle code_table) { for (int i = 0; i < code_table->length(); ++i) { Handle code = code_table->GetValueChecked(isolate, i); Assembler::FlushICache(isolate, code->instruction_start(), code->instruction_size()); } } Handle