1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/execution/isolate.h"
6
7 #include <stdlib.h>
8
9 #include <atomic>
10 #include <fstream> // NOLINT(readability/streams)
11 #include <memory>
12 #include <sstream>
13 #include <string>
14 #include <unordered_map>
15 #include <utility>
16
17 #include "src/api/api-inl.h"
18 #include "src/ast/ast-value-factory.h"
19 #include "src/ast/scopes.h"
20 #include "src/base/hashmap.h"
21 #include "src/base/logging.h"
22 #include "src/base/platform/platform.h"
23 #include "src/base/sys-info.h"
24 #include "src/base/utils/random-number-generator.h"
25 #include "src/builtins/builtins-promise.h"
26 #include "src/builtins/constants-table-builder.h"
27 #include "src/codegen/assembler-inl.h"
28 #include "src/codegen/compilation-cache.h"
29 #include "src/codegen/flush-instruction-cache.h"
30 #include "src/common/assert-scope.h"
31 #include "src/common/ptr-compr.h"
32 #include "src/compiler-dispatcher/compiler-dispatcher.h"
33 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
34 #include "src/date/date.h"
35 #include "src/debug/debug-frames.h"
36 #include "src/debug/debug.h"
37 #include "src/deoptimizer/deoptimizer.h"
38 #include "src/diagnostics/basic-block-profiler.h"
39 #include "src/diagnostics/compilation-statistics.h"
40 #include "src/execution/frames-inl.h"
41 #include "src/execution/isolate-inl.h"
42 #include "src/execution/messages.h"
43 #include "src/execution/microtask-queue.h"
44 #include "src/execution/protectors-inl.h"
45 #include "src/execution/runtime-profiler.h"
46 #include "src/execution/simulator.h"
47 #include "src/execution/v8threads.h"
48 #include "src/execution/vm-state-inl.h"
49 #include "src/handles/persistent-handles.h"
50 #include "src/heap/heap-inl.h"
51 #include "src/heap/read-only-heap.h"
52 #include "src/ic/stub-cache.h"
53 #include "src/init/bootstrapper.h"
54 #include "src/init/setup-isolate.h"
55 #include "src/init/v8.h"
56 #include "src/interpreter/interpreter.h"
57 #include "src/libsampler/sampler.h"
58 #include "src/logging/counters.h"
59 #include "src/logging/log.h"
60 #include "src/logging/metrics.h"
61 #include "src/numbers/hash-seed-inl.h"
62 #include "src/objects/backing-store.h"
63 #include "src/objects/elements.h"
64 #include "src/objects/feedback-vector.h"
65 #include "src/objects/frame-array-inl.h"
66 #include "src/objects/hash-table-inl.h"
67 #include "src/objects/js-array-inl.h"
68 #include "src/objects/js-generator-inl.h"
69 #include "src/objects/js-weak-refs-inl.h"
70 #include "src/objects/module-inl.h"
71 #include "src/objects/promise-inl.h"
72 #include "src/objects/prototype.h"
73 #include "src/objects/slots.h"
74 #include "src/objects/smi.h"
75 #include "src/objects/stack-frame-info-inl.h"
76 #include "src/objects/visitors.h"
77 #include "src/profiler/heap-profiler.h"
78 #include "src/profiler/tracing-cpu-profiler.h"
79 #include "src/regexp/regexp-stack.h"
80 #include "src/snapshot/embedded/embedded-data.h"
81 #include "src/snapshot/embedded/embedded-file-writer.h"
82 #include "src/snapshot/read-only-deserializer.h"
83 #include "src/snapshot/startup-deserializer.h"
84 #include "src/strings/string-builder-inl.h"
85 #include "src/strings/string-stream.h"
86 #include "src/tasks/cancelable-task.h"
87 #include "src/tracing/tracing-category-observer.h"
88 #include "src/trap-handler/trap-handler.h"
89 #include "src/utils/address-map.h"
90 #include "src/utils/ostreams.h"
91 #include "src/utils/version.h"
92 #include "src/wasm/wasm-code-manager.h"
93 #include "src/wasm/wasm-engine.h"
94 #include "src/wasm/wasm-objects.h"
95 #include "src/zone/accounting-allocator.h"
96 #include "src/zone/type-stats.h"
97 #ifdef V8_INTL_SUPPORT
98 #include "unicode/uobject.h"
99 #endif // V8_INTL_SUPPORT
100
101 #if defined(V8_OS_WIN64)
102 #include "src/diagnostics/unwinding-info-win64.h"
103 #endif // V8_OS_WIN64
104
105 #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
106 #include "src/heap/conservative-stack-visitor.h"
107 #endif
108
109 extern "C" const uint8_t* v8_Default_embedded_blob_code_;
110 extern "C" uint32_t v8_Default_embedded_blob_code_size_;
111 extern "C" const uint8_t* v8_Default_embedded_blob_data_;
112 extern "C" uint32_t v8_Default_embedded_blob_data_size_;
113
114 namespace v8 {
115 namespace internal {
116
117 #ifdef DEBUG
118 #define TRACE_ISOLATE(tag) \
119 do { \
120 if (FLAG_trace_isolates) { \
121 PrintF("Isolate %p (id %d)" #tag "\n", reinterpret_cast<void*>(this), \
122 id()); \
123 } \
124 } while (false)
125 #else
126 #define TRACE_ISOLATE(tag)
127 #endif
128
DefaultEmbeddedBlobCode()129 const uint8_t* DefaultEmbeddedBlobCode() {
130 return v8_Default_embedded_blob_code_;
131 }
DefaultEmbeddedBlobCodeSize()132 uint32_t DefaultEmbeddedBlobCodeSize() {
133 return v8_Default_embedded_blob_code_size_;
134 }
DefaultEmbeddedBlobData()135 const uint8_t* DefaultEmbeddedBlobData() {
136 return v8_Default_embedded_blob_data_;
137 }
DefaultEmbeddedBlobDataSize()138 uint32_t DefaultEmbeddedBlobDataSize() {
139 return v8_Default_embedded_blob_data_size_;
140 }
141
142 #ifdef V8_MULTI_SNAPSHOTS
143 extern "C" const uint8_t* v8_Trusted_embedded_blob_code_;
144 extern "C" uint32_t v8_Trusted_embedded_blob_code_size_;
145 extern "C" const uint8_t* v8_Trusted_embedded_blob_data_;
146 extern "C" uint32_t v8_Trusted_embedded_blob_data_size_;
147
TrustedEmbeddedBlobCode()148 const uint8_t* TrustedEmbeddedBlobCode() {
149 return v8_Trusted_embedded_blob_code_;
150 }
TrustedEmbeddedBlobCodeSize()151 uint32_t TrustedEmbeddedBlobCodeSize() {
152 return v8_Trusted_embedded_blob_code_size_;
153 }
TrustedEmbeddedBlobData()154 const uint8_t* TrustedEmbeddedBlobData() {
155 return v8_Trusted_embedded_blob_data_;
156 }
TrustedEmbeddedBlobDataSize()157 uint32_t TrustedEmbeddedBlobDataSize() {
158 return v8_Trusted_embedded_blob_data_size_;
159 }
160 #endif
161
162 namespace {
163 // These variables provide access to the current embedded blob without requiring
164 // an isolate instance. This is needed e.g. by Code::InstructionStart, which may
165 // not have access to an isolate but still needs to access the embedded blob.
166 // The variables are initialized by each isolate in Init(). Writes and reads are
167 // relaxed since we can guarantee that the current thread has initialized these
168 // variables before accessing them. Different threads may race, but this is fine
169 // since they all attempt to set the same values of the blob pointer and size.
170
171 std::atomic<const uint8_t*> current_embedded_blob_code_(nullptr);
172 std::atomic<uint32_t> current_embedded_blob_code_size_(0);
173 std::atomic<const uint8_t*> current_embedded_blob_data_(nullptr);
174 std::atomic<uint32_t> current_embedded_blob_data_size_(0);
175
176 // The various workflows around embedded snapshots are fairly complex. We need
177 // to support plain old snapshot builds, nosnap builds, and the requirements of
178 // subtly different serialization tests. There's two related knobs to twiddle:
179 //
180 // - The default embedded blob may be overridden by setting the sticky embedded
181 // blob. This is set automatically whenever we create a new embedded blob.
182 //
183 // - Lifecycle management can be either manual or set to refcounting.
184 //
185 // A few situations to demonstrate their use:
186 //
187 // - A plain old snapshot build neither overrides the default blob nor
188 // refcounts.
189 //
190 // - mksnapshot sets the sticky blob and manually frees the embedded
191 // blob once done.
192 //
193 // - Most serializer tests do the same.
194 //
195 // - Nosnapshot builds set the sticky blob and enable refcounting.
196
197 // This mutex protects access to the following variables:
198 // - sticky_embedded_blob_code_
199 // - sticky_embedded_blob_code_size_
200 // - sticky_embedded_blob_data_
201 // - sticky_embedded_blob_data_size_
202 // - enable_embedded_blob_refcounting_
203 // - current_embedded_blob_refs_
204 base::LazyMutex current_embedded_blob_refcount_mutex_ = LAZY_MUTEX_INITIALIZER;
205
206 const uint8_t* sticky_embedded_blob_code_ = nullptr;
207 uint32_t sticky_embedded_blob_code_size_ = 0;
208 const uint8_t* sticky_embedded_blob_data_ = nullptr;
209 uint32_t sticky_embedded_blob_data_size_ = 0;
210
211 bool enable_embedded_blob_refcounting_ = true;
212 int current_embedded_blob_refs_ = 0;
213
StickyEmbeddedBlobCode()214 const uint8_t* StickyEmbeddedBlobCode() { return sticky_embedded_blob_code_; }
StickyEmbeddedBlobCodeSize()215 uint32_t StickyEmbeddedBlobCodeSize() {
216 return sticky_embedded_blob_code_size_;
217 }
StickyEmbeddedBlobData()218 const uint8_t* StickyEmbeddedBlobData() { return sticky_embedded_blob_data_; }
StickyEmbeddedBlobDataSize()219 uint32_t StickyEmbeddedBlobDataSize() {
220 return sticky_embedded_blob_data_size_;
221 }
222
SetStickyEmbeddedBlob(const uint8_t * code,uint32_t code_size,const uint8_t * data,uint32_t data_size)223 void SetStickyEmbeddedBlob(const uint8_t* code, uint32_t code_size,
224 const uint8_t* data, uint32_t data_size) {
225 sticky_embedded_blob_code_ = code;
226 sticky_embedded_blob_code_size_ = code_size;
227 sticky_embedded_blob_data_ = data;
228 sticky_embedded_blob_data_size_ = data_size;
229 }
230
231 } // namespace
232
DisableEmbeddedBlobRefcounting()233 void DisableEmbeddedBlobRefcounting() {
234 base::MutexGuard guard(current_embedded_blob_refcount_mutex_.Pointer());
235 enable_embedded_blob_refcounting_ = false;
236 }
237
FreeCurrentEmbeddedBlob()238 void FreeCurrentEmbeddedBlob() {
239 CHECK(!enable_embedded_blob_refcounting_);
240 base::MutexGuard guard(current_embedded_blob_refcount_mutex_.Pointer());
241
242 if (StickyEmbeddedBlobCode() == nullptr) return;
243
244 CHECK_EQ(StickyEmbeddedBlobCode(), Isolate::CurrentEmbeddedBlobCode());
245 CHECK_EQ(StickyEmbeddedBlobData(), Isolate::CurrentEmbeddedBlobData());
246
247 InstructionStream::FreeOffHeapInstructionStream(
248 const_cast<uint8_t*>(Isolate::CurrentEmbeddedBlobCode()),
249 Isolate::CurrentEmbeddedBlobCodeSize(),
250 const_cast<uint8_t*>(Isolate::CurrentEmbeddedBlobData()),
251 Isolate::CurrentEmbeddedBlobDataSize());
252
253 current_embedded_blob_code_.store(nullptr, std::memory_order_relaxed);
254 current_embedded_blob_code_size_.store(0, std::memory_order_relaxed);
255 current_embedded_blob_data_.store(nullptr, std::memory_order_relaxed);
256 current_embedded_blob_data_size_.store(0, std::memory_order_relaxed);
257 sticky_embedded_blob_code_ = nullptr;
258 sticky_embedded_blob_code_size_ = 0;
259 sticky_embedded_blob_data_ = nullptr;
260 sticky_embedded_blob_data_size_ = 0;
261 }
262
263 // static
CurrentEmbeddedBlobIsBinaryEmbedded()264 bool Isolate::CurrentEmbeddedBlobIsBinaryEmbedded() {
265 // In some situations, we must be able to rely on the embedded blob being
266 // immortal immovable. This is the case if the blob is binary-embedded.
267 // See blob lifecycle controls above for descriptions of when the current
268 // embedded blob may change (e.g. in tests or mksnapshot). If the blob is
269 // binary-embedded, it is immortal immovable.
270 const uint8_t* code =
271 current_embedded_blob_code_.load(std::memory_order::memory_order_relaxed);
272 if (code == nullptr) return false;
273 #ifdef V8_MULTI_SNAPSHOTS
274 if (code == TrustedEmbeddedBlobCode()) return true;
275 #endif
276 return code == DefaultEmbeddedBlobCode();
277 }
278
SetEmbeddedBlob(const uint8_t * code,uint32_t code_size,const uint8_t * data,uint32_t data_size)279 void Isolate::SetEmbeddedBlob(const uint8_t* code, uint32_t code_size,
280 const uint8_t* data, uint32_t data_size) {
281 CHECK_NOT_NULL(code);
282 CHECK_NOT_NULL(data);
283
284 embedded_blob_code_ = code;
285 embedded_blob_code_size_ = code_size;
286 embedded_blob_data_ = data;
287 embedded_blob_data_size_ = data_size;
288 current_embedded_blob_code_.store(code, std::memory_order_relaxed);
289 current_embedded_blob_code_size_.store(code_size, std::memory_order_relaxed);
290 current_embedded_blob_data_.store(data, std::memory_order_relaxed);
291 current_embedded_blob_data_size_.store(data_size, std::memory_order_relaxed);
292
293 #ifdef DEBUG
294 // Verify that the contents of the embedded blob are unchanged from
295 // serialization-time, just to ensure the compiler isn't messing with us.
296 EmbeddedData d = EmbeddedData::FromBlob();
297 if (d.EmbeddedBlobDataHash() != d.CreateEmbeddedBlobDataHash()) {
298 FATAL(
299 "Embedded blob data section checksum verification failed. This "
300 "indicates that the embedded blob has been modified since compilation "
301 "time.");
302 }
303 if (FLAG_text_is_readable) {
304 if (d.EmbeddedBlobCodeHash() != d.CreateEmbeddedBlobCodeHash()) {
305 FATAL(
306 "Embedded blob code section checksum verification failed. This "
307 "indicates that the embedded blob has been modified since "
308 "compilation time. A common cause is a debugging breakpoint set "
309 "within builtin code.");
310 }
311 }
312 #endif // DEBUG
313
314 if (FLAG_experimental_flush_embedded_blob_icache) {
315 FlushInstructionCache(const_cast<uint8_t*>(code), code_size);
316 }
317 }
318
ClearEmbeddedBlob()319 void Isolate::ClearEmbeddedBlob() {
320 CHECK(enable_embedded_blob_refcounting_);
321 CHECK_EQ(embedded_blob_code_, CurrentEmbeddedBlobCode());
322 CHECK_EQ(embedded_blob_code_, StickyEmbeddedBlobCode());
323 CHECK_EQ(embedded_blob_data_, CurrentEmbeddedBlobData());
324 CHECK_EQ(embedded_blob_data_, StickyEmbeddedBlobData());
325
326 embedded_blob_code_ = nullptr;
327 embedded_blob_code_size_ = 0;
328 embedded_blob_data_ = nullptr;
329 embedded_blob_data_size_ = 0;
330 current_embedded_blob_code_.store(nullptr, std::memory_order_relaxed);
331 current_embedded_blob_code_size_.store(0, std::memory_order_relaxed);
332 current_embedded_blob_data_.store(nullptr, std::memory_order_relaxed);
333 current_embedded_blob_data_size_.store(0, std::memory_order_relaxed);
334 sticky_embedded_blob_code_ = nullptr;
335 sticky_embedded_blob_code_size_ = 0;
336 sticky_embedded_blob_data_ = nullptr;
337 sticky_embedded_blob_data_size_ = 0;
338 }
339
embedded_blob_code() const340 const uint8_t* Isolate::embedded_blob_code() const {
341 return embedded_blob_code_;
342 }
embedded_blob_code_size() const343 uint32_t Isolate::embedded_blob_code_size() const {
344 return embedded_blob_code_size_;
345 }
embedded_blob_data() const346 const uint8_t* Isolate::embedded_blob_data() const {
347 return embedded_blob_data_;
348 }
embedded_blob_data_size() const349 uint32_t Isolate::embedded_blob_data_size() const {
350 return embedded_blob_data_size_;
351 }
352
353 // static
CurrentEmbeddedBlobCode()354 const uint8_t* Isolate::CurrentEmbeddedBlobCode() {
355 return current_embedded_blob_code_.load(
356 std::memory_order::memory_order_relaxed);
357 }
358
359 // static
CurrentEmbeddedBlobCodeSize()360 uint32_t Isolate::CurrentEmbeddedBlobCodeSize() {
361 return current_embedded_blob_code_size_.load(
362 std::memory_order::memory_order_relaxed);
363 }
364
365 // static
CurrentEmbeddedBlobData()366 const uint8_t* Isolate::CurrentEmbeddedBlobData() {
367 return current_embedded_blob_data_.load(
368 std::memory_order::memory_order_relaxed);
369 }
370
371 // static
CurrentEmbeddedBlobDataSize()372 uint32_t Isolate::CurrentEmbeddedBlobDataSize() {
373 return current_embedded_blob_data_size_.load(
374 std::memory_order::memory_order_relaxed);
375 }
376
HashIsolateForEmbeddedBlob()377 size_t Isolate::HashIsolateForEmbeddedBlob() {
378 DCHECK(builtins_.is_initialized());
379 DCHECK(Builtins::AllBuiltinsAreIsolateIndependent());
380
381 DisallowHeapAllocation no_gc;
382
383 static constexpr size_t kSeed = 0;
384 size_t hash = kSeed;
385
386 // Hash data sections of builtin code objects.
387 for (int i = 0; i < Builtins::builtin_count; i++) {
388 Code code = heap_.builtin(i);
389
390 DCHECK(Internals::HasHeapObjectTag(code.ptr()));
391 uint8_t* const code_ptr =
392 reinterpret_cast<uint8_t*>(code.ptr() - kHeapObjectTag);
393
394 // These static asserts ensure we don't miss relevant fields. We don't hash
395 // instruction/metadata size and flags since they change when creating the
396 // off-heap trampolines. Other data fields must remain the same.
397 STATIC_ASSERT(Code::kInstructionSizeOffset == Code::kDataStart);
398 STATIC_ASSERT(Code::kMetadataSizeOffset ==
399 Code::kInstructionSizeOffsetEnd + 1);
400 STATIC_ASSERT(Code::kFlagsOffset == Code::kMetadataSizeOffsetEnd + 1);
401 STATIC_ASSERT(Code::kBuiltinIndexOffset == Code::kFlagsOffsetEnd + 1);
402 static constexpr int kStartOffset = Code::kBuiltinIndexOffset;
403
404 for (int j = kStartOffset; j < Code::kUnalignedHeaderSize; j++) {
405 hash = base::hash_combine(hash, size_t{code_ptr[j]});
406 }
407 }
408
409 // The builtins constants table is also tightly tied to embedded builtins.
410 hash = base::hash_combine(
411 hash, static_cast<size_t>(heap_.builtins_constants_table().length()));
412
413 return hash;
414 }
415
416 base::Thread::LocalStorageKey Isolate::isolate_key_;
417 base::Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
418 #if DEBUG
419 std::atomic<bool> Isolate::isolate_key_created_{false};
420 #endif
421
422 namespace {
423 // A global counter for all generated Isolates, might overflow.
424 std::atomic<int> isolate_counter{0};
425 } // namespace
426
427 Isolate::PerIsolateThreadData*
FindOrAllocatePerThreadDataForThisThread()428 Isolate::FindOrAllocatePerThreadDataForThisThread() {
429 ThreadId thread_id = ThreadId::Current();
430 PerIsolateThreadData* per_thread = nullptr;
431 {
432 base::MutexGuard lock_guard(&thread_data_table_mutex_);
433 per_thread = thread_data_table_.Lookup(thread_id);
434 if (per_thread == nullptr) {
435 if (FLAG_adjust_os_scheduling_parameters) {
436 base::OS::AdjustSchedulingParams();
437 }
438 per_thread = new PerIsolateThreadData(this, thread_id);
439 thread_data_table_.Insert(per_thread);
440 }
441 DCHECK(thread_data_table_.Lookup(thread_id) == per_thread);
442 }
443 return per_thread;
444 }
445
DiscardPerThreadDataForThisThread()446 void Isolate::DiscardPerThreadDataForThisThread() {
447 ThreadId thread_id = ThreadId::TryGetCurrent();
448 if (thread_id.IsValid()) {
449 DCHECK_NE(thread_manager_->mutex_owner_.load(std::memory_order_relaxed),
450 thread_id);
451 base::MutexGuard lock_guard(&thread_data_table_mutex_);
452 PerIsolateThreadData* per_thread = thread_data_table_.Lookup(thread_id);
453 if (per_thread) {
454 DCHECK(!per_thread->thread_state_);
455 thread_data_table_.Remove(per_thread);
456 }
457 }
458 }
459
FindPerThreadDataForThisThread()460 Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThisThread() {
461 ThreadId thread_id = ThreadId::Current();
462 return FindPerThreadDataForThread(thread_id);
463 }
464
FindPerThreadDataForThread(ThreadId thread_id)465 Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
466 ThreadId thread_id) {
467 PerIsolateThreadData* per_thread = nullptr;
468 {
469 base::MutexGuard lock_guard(&thread_data_table_mutex_);
470 per_thread = thread_data_table_.Lookup(thread_id);
471 }
472 return per_thread;
473 }
474
InitializeOncePerProcess()475 void Isolate::InitializeOncePerProcess() {
476 isolate_key_ = base::Thread::CreateThreadLocalKey();
477 #if DEBUG
478 bool expected = false;
479 DCHECK_EQ(true, isolate_key_created_.compare_exchange_strong(
480 expected, true, std::memory_order_relaxed));
481 #endif
482 per_isolate_thread_data_key_ = base::Thread::CreateThreadLocalKey();
483 }
484
get_address_from_id(IsolateAddressId id)485 Address Isolate::get_address_from_id(IsolateAddressId id) {
486 return isolate_addresses_[id];
487 }
488
Iterate(RootVisitor * v,char * thread_storage)489 char* Isolate::Iterate(RootVisitor* v, char* thread_storage) {
490 ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
491 Iterate(v, thread);
492 return thread_storage + sizeof(ThreadLocalTop);
493 }
494
IterateThread(ThreadVisitor * v,char * t)495 void Isolate::IterateThread(ThreadVisitor* v, char* t) {
496 ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
497 v->VisitThread(this, thread);
498 }
499
Iterate(RootVisitor * v,ThreadLocalTop * thread)500 void Isolate::Iterate(RootVisitor* v, ThreadLocalTop* thread) {
501 // Visit the roots from the top for a given thread.
502 v->VisitRootPointer(Root::kTop, nullptr,
503 FullObjectSlot(&thread->pending_exception_));
504 v->VisitRootPointer(Root::kTop, nullptr,
505 FullObjectSlot(&thread->pending_message_obj_));
506 v->VisitRootPointer(Root::kTop, nullptr, FullObjectSlot(&thread->context_));
507 v->VisitRootPointer(Root::kTop, nullptr,
508 FullObjectSlot(&thread->scheduled_exception_));
509
510 for (v8::TryCatch* block = thread->try_catch_handler_; block != nullptr;
511 block = block->next_) {
512 // TODO(3770): Make TryCatch::exception_ an Address (and message_obj_ too).
513 v->VisitRootPointer(
514 Root::kTop, nullptr,
515 FullObjectSlot(reinterpret_cast<Address>(&(block->exception_))));
516 v->VisitRootPointer(
517 Root::kTop, nullptr,
518 FullObjectSlot(reinterpret_cast<Address>(&(block->message_obj_))));
519 }
520
521 #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
522 ConservativeStackVisitor stack_visitor(this, v);
523 thread_local_top()->stack_.IteratePointers(&stack_visitor);
524 #endif
525
526 // Iterate over pointers on native execution stack.
527 wasm::WasmCodeRefScope wasm_code_ref_scope;
528 for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
529 it.frame()->Iterate(v);
530 }
531 }
532
Iterate(RootVisitor * v)533 void Isolate::Iterate(RootVisitor* v) {
534 ThreadLocalTop* current_t = thread_local_top();
535 Iterate(v, current_t);
536 }
537
RegisterTryCatchHandler(v8::TryCatch * that)538 void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
539 thread_local_top()->try_catch_handler_ = that;
540 }
541
UnregisterTryCatchHandler(v8::TryCatch * that)542 void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) {
543 DCHECK(thread_local_top()->try_catch_handler_ == that);
544 thread_local_top()->try_catch_handler_ = that->next_;
545 }
546
StackTraceString()547 Handle<String> Isolate::StackTraceString() {
548 if (stack_trace_nesting_level_ == 0) {
549 stack_trace_nesting_level_++;
550 HeapStringAllocator allocator;
551 StringStream::ClearMentionedObjectCache(this);
552 StringStream accumulator(&allocator);
553 incomplete_message_ = &accumulator;
554 PrintStack(&accumulator);
555 Handle<String> stack_trace = accumulator.ToString(this);
556 incomplete_message_ = nullptr;
557 stack_trace_nesting_level_ = 0;
558 return stack_trace;
559 } else if (stack_trace_nesting_level_ == 1) {
560 stack_trace_nesting_level_++;
561 base::OS::PrintError(
562 "\n\nAttempt to print stack while printing stack (double fault)\n");
563 base::OS::PrintError(
564 "If you are lucky you may find a partial stack dump on stdout.\n\n");
565 incomplete_message_->OutputToStdOut();
566 return factory()->empty_string();
567 } else {
568 base::OS::Abort();
569 // Unreachable
570 return factory()->empty_string();
571 }
572 }
573
PushStackTraceAndDie(void * ptr1,void * ptr2,void * ptr3,void * ptr4)574 void Isolate::PushStackTraceAndDie(void* ptr1, void* ptr2, void* ptr3,
575 void* ptr4) {
576 StackTraceFailureMessage message(this, ptr1, ptr2, ptr3, ptr4);
577 message.Print();
578 base::OS::Abort();
579 }
580
Print()581 void StackTraceFailureMessage::Print() volatile {
582 // Print the details of this failure message object, including its own address
583 // to force stack allocation.
584 base::OS::PrintError(
585 "Stacktrace:\n ptr1=%p\n ptr2=%p\n ptr3=%p\n ptr4=%p\n "
586 "failure_message_object=%p\n%s",
587 ptr1_, ptr2_, ptr3_, ptr4_, this, &js_stack_trace_[0]);
588 }
589
StackTraceFailureMessage(Isolate * isolate,void * ptr1,void * ptr2,void * ptr3,void * ptr4)590 StackTraceFailureMessage::StackTraceFailureMessage(Isolate* isolate, void* ptr1,
591 void* ptr2, void* ptr3,
592 void* ptr4) {
593 isolate_ = isolate;
594 ptr1_ = ptr1;
595 ptr2_ = ptr2;
596 ptr3_ = ptr3;
597 ptr4_ = ptr4;
598 // Write a stracktrace into the {js_stack_trace_} buffer.
599 const size_t buffer_length = arraysize(js_stack_trace_);
600 memset(&js_stack_trace_, 0, buffer_length);
601 FixedStringAllocator fixed(&js_stack_trace_[0], buffer_length - 1);
602 StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
603 isolate->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
604 // Keeping a reference to the last code objects to increase likelyhood that
605 // they get included in the minidump.
606 const size_t code_objects_length = arraysize(code_objects_);
607 size_t i = 0;
608 StackFrameIterator it(isolate);
609 for (; !it.done() && i < code_objects_length; it.Advance()) {
610 code_objects_[i++] =
611 reinterpret_cast<void*>(it.frame()->unchecked_code().ptr());
612 }
613 }
614
615 class FrameArrayBuilder {
616 public:
617 enum FrameFilterMode { ALL, CURRENT_SECURITY_CONTEXT };
618
FrameArrayBuilder(Isolate * isolate,FrameSkipMode mode,int limit,Handle<Object> caller,FrameFilterMode filter_mode)619 FrameArrayBuilder(Isolate* isolate, FrameSkipMode mode, int limit,
620 Handle<Object> caller, FrameFilterMode filter_mode)
621 : isolate_(isolate),
622 mode_(mode),
623 limit_(limit),
624 caller_(caller),
625 check_security_context_(filter_mode == CURRENT_SECURITY_CONTEXT) {
626 switch (mode_) {
627 case SKIP_FIRST:
628 skip_next_frame_ = true;
629 break;
630 case SKIP_UNTIL_SEEN:
631 DCHECK(caller_->IsJSFunction());
632 skip_next_frame_ = true;
633 break;
634 case SKIP_NONE:
635 skip_next_frame_ = false;
636 break;
637 }
638
639 elements_ = isolate->factory()->NewFrameArray(Min(limit, 10));
640 }
641
AppendAsyncFrame(Handle<JSGeneratorObject> generator_object)642 void AppendAsyncFrame(Handle<JSGeneratorObject> generator_object) {
643 if (full()) return;
644 Handle<JSFunction> function(generator_object->function(), isolate_);
645 if (!IsVisibleInStackTrace(function)) return;
646 int flags = FrameArray::kIsAsync;
647 if (IsStrictFrame(function)) flags |= FrameArray::kIsStrict;
648
649 Handle<Object> receiver(generator_object->receiver(), isolate_);
650 Handle<AbstractCode> code(
651 AbstractCode::cast(function->shared().GetBytecodeArray()), isolate_);
652 int offset = Smi::ToInt(generator_object->input_or_debug_pos());
653 // The stored bytecode offset is relative to a different base than what
654 // is used in the source position table, hence the subtraction.
655 offset -= BytecodeArray::kHeaderSize - kHeapObjectTag;
656
657 Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
658 if (V8_UNLIKELY(FLAG_detailed_error_stack_trace)) {
659 int param_count = function->shared().internal_formal_parameter_count();
660 parameters = isolate_->factory()->NewFixedArray(param_count);
661 for (int i = 0; i < param_count; i++) {
662 parameters->set(i, generator_object->parameters_and_registers().get(i));
663 }
664 }
665
666 elements_ = FrameArray::AppendJSFrame(elements_, receiver, function, code,
667 offset, flags, parameters);
668 }
669
AppendPromiseCombinatorFrame(Handle<JSFunction> element_function,Handle<JSFunction> combinator,FrameArray::Flag combinator_flag,Handle<Context> context)670 void AppendPromiseCombinatorFrame(Handle<JSFunction> element_function,
671 Handle<JSFunction> combinator,
672 FrameArray::Flag combinator_flag,
673 Handle<Context> context) {
674 if (full()) return;
675 int flags = FrameArray::kIsAsync | combinator_flag;
676
677 Handle<Context> native_context(context->native_context(), isolate_);
678 if (!IsVisibleInStackTrace(combinator)) return;
679
680 Handle<Object> receiver(native_context->promise_function(), isolate_);
681 Handle<AbstractCode> code(AbstractCode::cast(combinator->code()), isolate_);
682
683 // TODO(mmarchini) save Promises list from the Promise combinator
684 Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
685
686 // We store the offset of the promise into the element function's
687 // hash field for element callbacks.
688 int const offset =
689 Smi::ToInt(Smi::cast(element_function->GetIdentityHash())) - 1;
690
691 elements_ = FrameArray::AppendJSFrame(elements_, receiver, combinator, code,
692 offset, flags, parameters);
693 }
694
AppendJavaScriptFrame(FrameSummary::JavaScriptFrameSummary const & summary)695 void AppendJavaScriptFrame(
696 FrameSummary::JavaScriptFrameSummary const& summary) {
697 // Filter out internal frames that we do not want to show.
698 if (!IsVisibleInStackTrace(summary.function())) return;
699
700 Handle<AbstractCode> abstract_code = summary.abstract_code();
701 const int offset = summary.code_offset();
702
703 const bool is_constructor = summary.is_constructor();
704
705 int flags = 0;
706 Handle<JSFunction> function = summary.function();
707 if (IsStrictFrame(function)) flags |= FrameArray::kIsStrict;
708 if (is_constructor) flags |= FrameArray::kIsConstructor;
709
710 Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
711 if (V8_UNLIKELY(FLAG_detailed_error_stack_trace)) {
712 parameters = summary.parameters();
713 }
714
715 elements_ = FrameArray::AppendJSFrame(
716 elements_, TheHoleToUndefined(isolate_, summary.receiver()), function,
717 abstract_code, offset, flags, parameters);
718 }
719
AppendWasmFrame(FrameSummary::WasmFrameSummary const & summary)720 void AppendWasmFrame(FrameSummary::WasmFrameSummary const& summary) {
721 if (summary.code()->kind() != wasm::WasmCode::kFunction) return;
722 Handle<WasmInstanceObject> instance = summary.wasm_instance();
723 int flags = 0;
724 if (instance->module_object().is_asm_js()) {
725 flags |= FrameArray::kIsAsmJsWasmFrame;
726 if (summary.at_to_number_conversion()) {
727 flags |= FrameArray::kAsmJsAtNumberConversion;
728 }
729 } else {
730 flags |= FrameArray::kIsWasmFrame;
731 }
732
733 elements_ = FrameArray::AppendWasmFrame(
734 elements_, instance, summary.function_index(), summary.code(),
735 summary.code_offset(), flags);
736 }
737
AppendBuiltinExitFrame(BuiltinExitFrame * exit_frame)738 void AppendBuiltinExitFrame(BuiltinExitFrame* exit_frame) {
739 Handle<JSFunction> function = handle(exit_frame->function(), isolate_);
740
741 // Filter out internal frames that we do not want to show.
742 if (!IsVisibleInStackTrace(function)) return;
743
744 // TODO(szuend): Remove this check once the flag is enabled
745 // by default.
746 if (!FLAG_experimental_stack_trace_frames &&
747 function->shared().IsApiFunction()) {
748 return;
749 }
750
751 Handle<Object> receiver(exit_frame->receiver(), isolate_);
752 Handle<Code> code(exit_frame->LookupCode(), isolate_);
753 const int offset =
754 static_cast<int>(exit_frame->pc() - code->InstructionStart());
755
756 int flags = 0;
757 if (IsStrictFrame(function)) flags |= FrameArray::kIsStrict;
758 if (exit_frame->IsConstructor()) flags |= FrameArray::kIsConstructor;
759
760 Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
761 if (V8_UNLIKELY(FLAG_detailed_error_stack_trace)) {
762 int param_count = exit_frame->ComputeParametersCount();
763 parameters = isolate_->factory()->NewFixedArray(param_count);
764 for (int i = 0; i < param_count; i++) {
765 parameters->set(i, exit_frame->GetParameter(i));
766 }
767 }
768
769 elements_ = FrameArray::AppendJSFrame(elements_, receiver, function,
770 Handle<AbstractCode>::cast(code),
771 offset, flags, parameters);
772 }
773
full()774 bool full() { return elements_->FrameCount() >= limit_; }
775
GetElements()776 Handle<FrameArray> GetElements() {
777 elements_->ShrinkToFit(isolate_);
778 return elements_;
779 }
780
781 // Creates a StackTraceFrame object for each frame in the FrameArray.
GetElementsAsStackTraceFrameArray()782 Handle<FixedArray> GetElementsAsStackTraceFrameArray() {
783 elements_->ShrinkToFit(isolate_);
784 const int frame_count = elements_->FrameCount();
785 Handle<FixedArray> stack_trace =
786 isolate_->factory()->NewFixedArray(frame_count);
787
788 for (int i = 0; i < frame_count; ++i) {
789 Handle<StackTraceFrame> frame =
790 isolate_->factory()->NewStackTraceFrame(elements_, i);
791 stack_trace->set(i, *frame);
792 }
793 return stack_trace;
794 }
795
796 private:
797 // Poison stack frames below the first strict mode frame.
798 // The stack trace API should not expose receivers and function
799 // objects on frames deeper than the top-most one with a strict mode
800 // function.
IsStrictFrame(Handle<JSFunction> function)801 bool IsStrictFrame(Handle<JSFunction> function) {
802 if (!encountered_strict_function_) {
803 encountered_strict_function_ =
804 is_strict(function->shared().language_mode());
805 }
806 return encountered_strict_function_;
807 }
808
809 // Determines whether the given stack frame should be displayed in a stack
810 // trace.
IsVisibleInStackTrace(Handle<JSFunction> function)811 bool IsVisibleInStackTrace(Handle<JSFunction> function) {
812 return ShouldIncludeFrame(function) && IsNotHidden(function) &&
813 IsInSameSecurityContext(function);
814 }
815
816 // This mechanism excludes a number of uninteresting frames from the stack
817 // trace. This can be be the first frame (which will be a builtin-exit frame
818 // for the error constructor builtin) or every frame until encountering a
819 // user-specified function.
ShouldIncludeFrame(Handle<JSFunction> function)820 bool ShouldIncludeFrame(Handle<JSFunction> function) {
821 switch (mode_) {
822 case SKIP_NONE:
823 return true;
824 case SKIP_FIRST:
825 if (!skip_next_frame_) return true;
826 skip_next_frame_ = false;
827 return false;
828 case SKIP_UNTIL_SEEN:
829 if (skip_next_frame_ && (*function == *caller_)) {
830 skip_next_frame_ = false;
831 return false;
832 }
833 return !skip_next_frame_;
834 }
835 UNREACHABLE();
836 }
837
IsNotHidden(Handle<JSFunction> function)838 bool IsNotHidden(Handle<JSFunction> function) {
839 // Functions defined not in user scripts are not visible unless directly
840 // exposed, in which case the native flag is set.
841 // The --builtins-in-stack-traces command line flag allows including
842 // internal call sites in the stack trace for debugging purposes.
843 if (!FLAG_builtins_in_stack_traces &&
844 !function->shared().IsUserJavaScript()) {
845 return function->shared().native() || function->shared().IsApiFunction();
846 }
847 return true;
848 }
849
IsInSameSecurityContext(Handle<JSFunction> function)850 bool IsInSameSecurityContext(Handle<JSFunction> function) {
851 if (!check_security_context_) return true;
852 return isolate_->context().HasSameSecurityTokenAs(function->context());
853 }
854
855 // TODO(jgruber): Fix all cases in which frames give us a hole value (e.g. the
856 // receiver in RegExp constructor frames.
TheHoleToUndefined(Isolate * isolate,Handle<Object> in)857 Handle<Object> TheHoleToUndefined(Isolate* isolate, Handle<Object> in) {
858 return (in->IsTheHole(isolate))
859 ? Handle<Object>::cast(isolate->factory()->undefined_value())
860 : in;
861 }
862
863 Isolate* isolate_;
864 const FrameSkipMode mode_;
865 int limit_;
866 const Handle<Object> caller_;
867 bool skip_next_frame_ = true;
868 bool encountered_strict_function_ = false;
869 const bool check_security_context_;
870 Handle<FrameArray> elements_;
871 };
872
GetStackTraceLimit(Isolate * isolate,int * result)873 bool GetStackTraceLimit(Isolate* isolate, int* result) {
874 Handle<JSObject> error = isolate->error_function();
875
876 Handle<String> key = isolate->factory()->stackTraceLimit_string();
877 Handle<Object> stack_trace_limit = JSReceiver::GetDataProperty(error, key);
878 if (!stack_trace_limit->IsNumber()) return false;
879
880 // Ensure that limit is not negative.
881 *result = Max(FastD2IChecked(stack_trace_limit->Number()), 0);
882
883 if (*result != FLAG_stack_trace_limit) {
884 isolate->CountUsage(v8::Isolate::kErrorStackTraceLimit);
885 }
886
887 return true;
888 }
889
NoExtension(const v8::FunctionCallbackInfo<v8::Value> &)890 bool NoExtension(const v8::FunctionCallbackInfo<v8::Value>&) { return false; }
891
IsBuiltinFunction(Isolate * isolate,HeapObject object,Builtins::Name builtin_index)892 bool IsBuiltinFunction(Isolate* isolate, HeapObject object,
893 Builtins::Name builtin_index) {
894 if (!object.IsJSFunction()) return false;
895 JSFunction const function = JSFunction::cast(object);
896 return function.code() == isolate->builtins()->builtin(builtin_index);
897 }
898
CaptureAsyncStackTrace(Isolate * isolate,Handle<JSPromise> promise,FrameArrayBuilder * builder)899 void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
900 FrameArrayBuilder* builder) {
901 while (!builder->full()) {
902 // Check that the {promise} is not settled.
903 if (promise->status() != Promise::kPending) return;
904
905 // Check that we have exactly one PromiseReaction on the {promise}.
906 if (!promise->reactions().IsPromiseReaction()) return;
907 Handle<PromiseReaction> reaction(
908 PromiseReaction::cast(promise->reactions()), isolate);
909 if (!reaction->next().IsSmi()) return;
910
911 // Check if the {reaction} has one of the known async function or
912 // async generator continuations as its fulfill handler.
913 if (IsBuiltinFunction(isolate, reaction->fulfill_handler(),
914 Builtins::kAsyncFunctionAwaitResolveClosure) ||
915 IsBuiltinFunction(isolate, reaction->fulfill_handler(),
916 Builtins::kAsyncGeneratorAwaitResolveClosure) ||
917 IsBuiltinFunction(isolate, reaction->fulfill_handler(),
918 Builtins::kAsyncGeneratorYieldResolveClosure)) {
919 // Now peak into the handlers' AwaitContext to get to
920 // the JSGeneratorObject for the async function.
921 Handle<Context> context(
922 JSFunction::cast(reaction->fulfill_handler()).context(), isolate);
923 Handle<JSGeneratorObject> generator_object(
924 JSGeneratorObject::cast(context->extension()), isolate);
925 CHECK(generator_object->is_suspended());
926
927 // Append async frame corresponding to the {generator_object}.
928 builder->AppendAsyncFrame(generator_object);
929
930 // Try to continue from here.
931 if (generator_object->IsJSAsyncFunctionObject()) {
932 Handle<JSAsyncFunctionObject> async_function_object =
933 Handle<JSAsyncFunctionObject>::cast(generator_object);
934 promise = handle(async_function_object->promise(), isolate);
935 } else {
936 Handle<JSAsyncGeneratorObject> async_generator_object =
937 Handle<JSAsyncGeneratorObject>::cast(generator_object);
938 if (async_generator_object->queue().IsUndefined(isolate)) return;
939 Handle<AsyncGeneratorRequest> async_generator_request(
940 AsyncGeneratorRequest::cast(async_generator_object->queue()),
941 isolate);
942 promise = handle(JSPromise::cast(async_generator_request->promise()),
943 isolate);
944 }
945 } else if (IsBuiltinFunction(isolate, reaction->fulfill_handler(),
946 Builtins::kPromiseAllResolveElementClosure)) {
947 Handle<JSFunction> function(JSFunction::cast(reaction->fulfill_handler()),
948 isolate);
949 Handle<Context> context(function->context(), isolate);
950 Handle<JSFunction> combinator(context->native_context().promise_all(),
951 isolate);
952 builder->AppendPromiseCombinatorFrame(function, combinator,
953 FrameArray::kIsPromiseAll, context);
954
955 // Now peak into the Promise.all() resolve element context to
956 // find the promise capability that's being resolved when all
957 // the concurrent promises resolve.
958 int const index =
959 PromiseBuiltins::kPromiseAllResolveElementCapabilitySlot;
960 Handle<PromiseCapability> capability(
961 PromiseCapability::cast(context->get(index)), isolate);
962 if (!capability->promise().IsJSPromise()) return;
963 promise = handle(JSPromise::cast(capability->promise()), isolate);
964 } else if (IsBuiltinFunction(isolate, reaction->reject_handler(),
965 Builtins::kPromiseAnyRejectElementClosure)) {
966 Handle<JSFunction> function(JSFunction::cast(reaction->reject_handler()),
967 isolate);
968 Handle<Context> context(function->context(), isolate);
969 Handle<JSFunction> combinator(context->native_context().promise_any(),
970 isolate);
971 builder->AppendPromiseCombinatorFrame(function, combinator,
972 FrameArray::kIsPromiseAny, context);
973
974 // Now peak into the Promise.any() reject element context to
975 // find the promise capability that's being resolved when any of
976 // the concurrent promises resolve.
977 int const index = PromiseBuiltins::kPromiseAnyRejectElementCapabilitySlot;
978 Handle<PromiseCapability> capability(
979 PromiseCapability::cast(context->get(index)), isolate);
980 if (!capability->promise().IsJSPromise()) return;
981 promise = handle(JSPromise::cast(capability->promise()), isolate);
982 } else if (IsBuiltinFunction(isolate, reaction->fulfill_handler(),
983 Builtins::kPromiseCapabilityDefaultResolve)) {
984 Handle<JSFunction> function(JSFunction::cast(reaction->fulfill_handler()),
985 isolate);
986 Handle<Context> context(function->context(), isolate);
987 promise =
988 handle(JSPromise::cast(context->get(PromiseBuiltins::kPromiseSlot)),
989 isolate);
990 } else {
991 // We have some generic promise chain here, so try to
992 // continue with the chained promise on the reaction
993 // (only works for native promise chains).
994 Handle<HeapObject> promise_or_capability(
995 reaction->promise_or_capability(), isolate);
996 if (promise_or_capability->IsJSPromise()) {
997 promise = Handle<JSPromise>::cast(promise_or_capability);
998 } else if (promise_or_capability->IsPromiseCapability()) {
999 Handle<PromiseCapability> capability =
1000 Handle<PromiseCapability>::cast(promise_or_capability);
1001 if (!capability->promise().IsJSPromise()) return;
1002 promise = handle(JSPromise::cast(capability->promise()), isolate);
1003 } else {
1004 // Otherwise the {promise_or_capability} must be undefined here.
1005 CHECK(promise_or_capability->IsUndefined(isolate));
1006 return;
1007 }
1008 }
1009 }
1010 }
1011
1012 namespace {
1013
1014 struct CaptureStackTraceOptions {
1015 int limit;
1016 // 'filter_mode' and 'skip_mode' are somewhat orthogonal. 'filter_mode'
1017 // specifies whether to capture all frames, or just frames in the same
1018 // security context. While 'skip_mode' allows skipping the first frame.
1019 FrameSkipMode skip_mode;
1020 FrameArrayBuilder::FrameFilterMode filter_mode;
1021
1022 bool capture_builtin_exit_frames;
1023 bool capture_only_frames_subject_to_debugging;
1024 bool async_stack_trace;
1025 };
1026
CaptureStackTrace(Isolate * isolate,Handle<Object> caller,CaptureStackTraceOptions options)1027 Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
1028 CaptureStackTraceOptions options) {
1029 DisallowJavascriptExecution no_js(isolate);
1030
1031 TRACE_EVENT_BEGIN1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
1032 "CaptureStackTrace", "maxFrameCount", options.limit);
1033
1034 wasm::WasmCodeRefScope code_ref_scope;
1035 FrameArrayBuilder builder(isolate, options.skip_mode, options.limit, caller,
1036 options.filter_mode);
1037
1038 // Build the regular stack trace, and remember the last relevant
1039 // frame ID and inlined index (for the async stack trace handling
1040 // below, which starts from this last frame).
1041 for (StackFrameIterator it(isolate); !it.done() && !builder.full();
1042 it.Advance()) {
1043 StackFrame* const frame = it.frame();
1044 switch (frame->type()) {
1045 case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION:
1046 case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH:
1047 case StackFrame::OPTIMIZED:
1048 case StackFrame::INTERPRETED:
1049 case StackFrame::BUILTIN:
1050 case StackFrame::WASM: {
1051 // A standard frame may include many summarized frames (due to
1052 // inlining).
1053 std::vector<FrameSummary> frames;
1054 CommonFrame::cast(frame)->Summarize(&frames);
1055 for (size_t i = frames.size(); i-- != 0 && !builder.full();) {
1056 auto& summary = frames[i];
1057 if (options.capture_only_frames_subject_to_debugging &&
1058 !summary.is_subject_to_debugging()) {
1059 continue;
1060 }
1061
1062 if (summary.IsJavaScript()) {
1063 //=========================================================
1064 // Handle a JavaScript frame.
1065 //=========================================================
1066 auto const& java_script = summary.AsJavaScript();
1067 builder.AppendJavaScriptFrame(java_script);
1068 } else if (summary.IsWasm()) {
1069 //=========================================================
1070 // Handle a Wasm frame.
1071 //=========================================================
1072 auto const& wasm = summary.AsWasm();
1073 builder.AppendWasmFrame(wasm);
1074 }
1075 }
1076 break;
1077 }
1078
1079 case StackFrame::BUILTIN_EXIT:
1080 if (!options.capture_builtin_exit_frames) continue;
1081
1082 // BuiltinExitFrames are not standard frames, so they do not have
1083 // Summarize(). However, they may have one JS frame worth showing.
1084 builder.AppendBuiltinExitFrame(BuiltinExitFrame::cast(frame));
1085 break;
1086
1087 default:
1088 break;
1089 }
1090 }
1091
1092 // If --async-stack-traces are enabled and the "current microtask" is a
1093 // PromiseReactionJobTask, we try to enrich the stack trace with async
1094 // frames.
1095 if (options.async_stack_trace) {
1096 Handle<Object> current_microtask = isolate->factory()->current_microtask();
1097 if (current_microtask->IsPromiseReactionJobTask()) {
1098 Handle<PromiseReactionJobTask> promise_reaction_job_task =
1099 Handle<PromiseReactionJobTask>::cast(current_microtask);
1100 // Check if the {reaction} has one of the known async function or
1101 // async generator continuations as its fulfill handler.
1102 if (IsBuiltinFunction(isolate, promise_reaction_job_task->handler(),
1103 Builtins::kAsyncFunctionAwaitResolveClosure) ||
1104 IsBuiltinFunction(isolate, promise_reaction_job_task->handler(),
1105 Builtins::kAsyncGeneratorAwaitResolveClosure) ||
1106 IsBuiltinFunction(isolate, promise_reaction_job_task->handler(),
1107 Builtins::kAsyncGeneratorYieldResolveClosure) ||
1108 IsBuiltinFunction(isolate, promise_reaction_job_task->handler(),
1109 Builtins::kAsyncFunctionAwaitRejectClosure) ||
1110 IsBuiltinFunction(isolate, promise_reaction_job_task->handler(),
1111 Builtins::kAsyncGeneratorAwaitRejectClosure)) {
1112 // Now peak into the handlers' AwaitContext to get to
1113 // the JSGeneratorObject for the async function.
1114 Handle<Context> context(
1115 JSFunction::cast(promise_reaction_job_task->handler()).context(),
1116 isolate);
1117 Handle<JSGeneratorObject> generator_object(
1118 JSGeneratorObject::cast(context->extension()), isolate);
1119 if (generator_object->is_executing()) {
1120 if (generator_object->IsJSAsyncFunctionObject()) {
1121 Handle<JSAsyncFunctionObject> async_function_object =
1122 Handle<JSAsyncFunctionObject>::cast(generator_object);
1123 Handle<JSPromise> promise(async_function_object->promise(),
1124 isolate);
1125 CaptureAsyncStackTrace(isolate, promise, &builder);
1126 } else {
1127 Handle<JSAsyncGeneratorObject> async_generator_object =
1128 Handle<JSAsyncGeneratorObject>::cast(generator_object);
1129 Handle<Object> queue(async_generator_object->queue(), isolate);
1130 if (!queue->IsUndefined(isolate)) {
1131 Handle<AsyncGeneratorRequest> async_generator_request =
1132 Handle<AsyncGeneratorRequest>::cast(queue);
1133 Handle<JSPromise> promise(
1134 JSPromise::cast(async_generator_request->promise()), isolate);
1135 CaptureAsyncStackTrace(isolate, promise, &builder);
1136 }
1137 }
1138 }
1139 } else {
1140 // The {promise_reaction_job_task} doesn't belong to an await (or
1141 // yield inside an async generator), but we might still be able to
1142 // find an async frame if we follow along the chain of promises on
1143 // the {promise_reaction_job_task}.
1144 Handle<HeapObject> promise_or_capability(
1145 promise_reaction_job_task->promise_or_capability(), isolate);
1146 if (promise_or_capability->IsJSPromise()) {
1147 Handle<JSPromise> promise =
1148 Handle<JSPromise>::cast(promise_or_capability);
1149 CaptureAsyncStackTrace(isolate, promise, &builder);
1150 }
1151 }
1152 }
1153 }
1154
1155 Handle<FixedArray> stack_trace = builder.GetElementsAsStackTraceFrameArray();
1156 TRACE_EVENT_END1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
1157 "CaptureStackTrace", "frameCount", stack_trace->length());
1158 return stack_trace;
1159 }
1160
1161 } // namespace
1162
CaptureSimpleStackTrace(Handle<JSReceiver> error_object,FrameSkipMode mode,Handle<Object> caller)1163 Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
1164 FrameSkipMode mode,
1165 Handle<Object> caller) {
1166 int limit;
1167 if (!GetStackTraceLimit(this, &limit)) return factory()->undefined_value();
1168
1169 CaptureStackTraceOptions options;
1170 options.limit = limit;
1171 options.skip_mode = mode;
1172 options.capture_builtin_exit_frames = true;
1173 options.async_stack_trace = FLAG_async_stack_traces;
1174 options.filter_mode = FrameArrayBuilder::CURRENT_SECURITY_CONTEXT;
1175 options.capture_only_frames_subject_to_debugging = false;
1176
1177 return CaptureStackTrace(this, caller, options);
1178 }
1179
CaptureAndSetDetailedStackTrace(Handle<JSReceiver> error_object)1180 MaybeHandle<JSReceiver> Isolate::CaptureAndSetDetailedStackTrace(
1181 Handle<JSReceiver> error_object) {
1182 if (capture_stack_trace_for_uncaught_exceptions_) {
1183 // Capture stack trace for a detailed exception message.
1184 Handle<Name> key = factory()->detailed_stack_trace_symbol();
1185 Handle<FixedArray> stack_trace = CaptureCurrentStackTrace(
1186 stack_trace_for_uncaught_exceptions_frame_limit_,
1187 stack_trace_for_uncaught_exceptions_options_);
1188 RETURN_ON_EXCEPTION(
1189 this,
1190 Object::SetProperty(this, error_object, key, stack_trace,
1191 StoreOrigin::kMaybeKeyed,
1192 Just(ShouldThrow::kThrowOnError)),
1193 JSReceiver);
1194 }
1195 return error_object;
1196 }
1197
CaptureAndSetSimpleStackTrace(Handle<JSReceiver> error_object,FrameSkipMode mode,Handle<Object> caller)1198 MaybeHandle<JSReceiver> Isolate::CaptureAndSetSimpleStackTrace(
1199 Handle<JSReceiver> error_object, FrameSkipMode mode,
1200 Handle<Object> caller) {
1201 // Capture stack trace for simple stack trace string formatting.
1202 Handle<Name> key = factory()->stack_trace_symbol();
1203 Handle<Object> stack_trace =
1204 CaptureSimpleStackTrace(error_object, mode, caller);
1205 RETURN_ON_EXCEPTION(this,
1206 Object::SetProperty(this, error_object, key, stack_trace,
1207 StoreOrigin::kMaybeKeyed,
1208 Just(ShouldThrow::kThrowOnError)),
1209 JSReceiver);
1210 return error_object;
1211 }
1212
GetDetailedStackTrace(Handle<JSObject> error_object)1213 Handle<FixedArray> Isolate::GetDetailedStackTrace(
1214 Handle<JSObject> error_object) {
1215 Handle<Name> key_detailed = factory()->detailed_stack_trace_symbol();
1216 Handle<Object> stack_trace =
1217 JSReceiver::GetDataProperty(error_object, key_detailed);
1218 if (stack_trace->IsFixedArray()) return Handle<FixedArray>::cast(stack_trace);
1219 return Handle<FixedArray>();
1220 }
1221
GetAbstractPC(int * line,int * column)1222 Address Isolate::GetAbstractPC(int* line, int* column) {
1223 JavaScriptFrameIterator it(this);
1224
1225 if (it.done()) {
1226 *line = -1;
1227 *column = -1;
1228 return kNullAddress;
1229 }
1230 JavaScriptFrame* frame = it.frame();
1231 DCHECK(!frame->is_builtin());
1232
1233 Handle<SharedFunctionInfo> shared = handle(frame->function().shared(), this);
1234 SharedFunctionInfo::EnsureSourcePositionsAvailable(this, shared);
1235 int position = frame->position();
1236
1237 Object maybe_script = frame->function().shared().script();
1238 if (maybe_script.IsScript()) {
1239 Handle<Script> script(Script::cast(maybe_script), this);
1240 Script::PositionInfo info;
1241 Script::GetPositionInfo(script, position, &info, Script::WITH_OFFSET);
1242 *line = info.line + 1;
1243 *column = info.column + 1;
1244 } else {
1245 *line = position;
1246 *column = -1;
1247 }
1248
1249 if (frame->is_interpreted()) {
1250 InterpretedFrame* iframe = static_cast<InterpretedFrame*>(frame);
1251 Address bytecode_start =
1252 iframe->GetBytecodeArray().GetFirstBytecodeAddress();
1253 return bytecode_start + iframe->GetBytecodeOffset();
1254 }
1255
1256 return frame->pc();
1257 }
1258
CaptureCurrentStackTrace(int frame_limit,StackTrace::StackTraceOptions stack_trace_options)1259 Handle<FixedArray> Isolate::CaptureCurrentStackTrace(
1260 int frame_limit, StackTrace::StackTraceOptions stack_trace_options) {
1261 CaptureStackTraceOptions options;
1262 options.limit = Max(frame_limit, 0); // Ensure no negative values.
1263 options.skip_mode = SKIP_NONE;
1264 options.capture_builtin_exit_frames = false;
1265 options.async_stack_trace = false;
1266 options.filter_mode =
1267 (stack_trace_options & StackTrace::kExposeFramesAcrossSecurityOrigins)
1268 ? FrameArrayBuilder::ALL
1269 : FrameArrayBuilder::CURRENT_SECURITY_CONTEXT;
1270 options.capture_only_frames_subject_to_debugging = true;
1271
1272 return Handle<FixedArray>::cast(
1273 CaptureStackTrace(this, factory()->undefined_value(), options));
1274 }
1275
PrintStack(FILE * out,PrintStackMode mode)1276 void Isolate::PrintStack(FILE* out, PrintStackMode mode) {
1277 if (stack_trace_nesting_level_ == 0) {
1278 stack_trace_nesting_level_++;
1279 StringStream::ClearMentionedObjectCache(this);
1280 HeapStringAllocator allocator;
1281 StringStream accumulator(&allocator);
1282 incomplete_message_ = &accumulator;
1283 PrintStack(&accumulator, mode);
1284 accumulator.OutputToFile(out);
1285 InitializeLoggingAndCounters();
1286 accumulator.Log(this);
1287 incomplete_message_ = nullptr;
1288 stack_trace_nesting_level_ = 0;
1289 } else if (stack_trace_nesting_level_ == 1) {
1290 stack_trace_nesting_level_++;
1291 base::OS::PrintError(
1292 "\n\nAttempt to print stack while printing stack (double fault)\n");
1293 base::OS::PrintError(
1294 "If you are lucky you may find a partial stack dump on stdout.\n\n");
1295 incomplete_message_->OutputToFile(out);
1296 }
1297 }
1298
PrintFrames(Isolate * isolate,StringStream * accumulator,StackFrame::PrintMode mode)1299 static void PrintFrames(Isolate* isolate, StringStream* accumulator,
1300 StackFrame::PrintMode mode) {
1301 StackFrameIterator it(isolate);
1302 for (int i = 0; !it.done(); it.Advance()) {
1303 it.frame()->Print(accumulator, mode, i++);
1304 }
1305 }
1306
PrintStack(StringStream * accumulator,PrintStackMode mode)1307 void Isolate::PrintStack(StringStream* accumulator, PrintStackMode mode) {
1308 HandleScope scope(this);
1309 wasm::WasmCodeRefScope wasm_code_ref_scope;
1310 DCHECK(accumulator->IsMentionedObjectCacheClear(this));
1311
1312 // Avoid printing anything if there are no frames.
1313 if (c_entry_fp(thread_local_top()) == 0) return;
1314
1315 accumulator->Add(
1316 "\n==== JS stack trace =========================================\n\n");
1317 PrintFrames(this, accumulator, StackFrame::OVERVIEW);
1318 if (mode == kPrintStackVerbose) {
1319 accumulator->Add(
1320 "\n==== Details ================================================\n\n");
1321 PrintFrames(this, accumulator, StackFrame::DETAILS);
1322 accumulator->PrintMentionedObjectCache(this);
1323 }
1324 accumulator->Add("=====================\n\n");
1325 }
1326
SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback)1327 void Isolate::SetFailedAccessCheckCallback(
1328 v8::FailedAccessCheckCallback callback) {
1329 thread_local_top()->failed_access_check_callback_ = callback;
1330 }
1331
ReportFailedAccessCheck(Handle<JSObject> receiver)1332 void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver) {
1333 if (!thread_local_top()->failed_access_check_callback_) {
1334 return ScheduleThrow(*factory()->NewTypeError(MessageTemplate::kNoAccess));
1335 }
1336
1337 DCHECK(receiver->IsAccessCheckNeeded());
1338 DCHECK(!context().is_null());
1339
1340 // Get the data object from access check info.
1341 HandleScope scope(this);
1342 Handle<Object> data;
1343 {
1344 DisallowHeapAllocation no_gc;
1345 AccessCheckInfo access_check_info = AccessCheckInfo::Get(this, receiver);
1346 if (access_check_info.is_null()) {
1347 AllowHeapAllocation doesnt_matter_anymore;
1348 return ScheduleThrow(
1349 *factory()->NewTypeError(MessageTemplate::kNoAccess));
1350 }
1351 data = handle(access_check_info.data(), this);
1352 }
1353
1354 // Leaving JavaScript.
1355 VMState<EXTERNAL> state(this);
1356 thread_local_top()->failed_access_check_callback_(
1357 v8::Utils::ToLocal(receiver), v8::ACCESS_HAS, v8::Utils::ToLocal(data));
1358 }
1359
MayAccess(Handle<Context> accessing_context,Handle<JSObject> receiver)1360 bool Isolate::MayAccess(Handle<Context> accessing_context,
1361 Handle<JSObject> receiver) {
1362 DCHECK(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
1363
1364 // Check for compatibility between the security tokens in the
1365 // current lexical context and the accessed object.
1366
1367 // During bootstrapping, callback functions are not enabled yet.
1368 if (bootstrapper()->IsActive()) return true;
1369 {
1370 DisallowHeapAllocation no_gc;
1371
1372 if (receiver->IsJSGlobalProxy()) {
1373 Object receiver_context = JSGlobalProxy::cast(*receiver).native_context();
1374 if (!receiver_context.IsContext()) return false;
1375
1376 // Get the native context of current top context.
1377 // avoid using Isolate::native_context() because it uses Handle.
1378 Context native_context =
1379 accessing_context->global_object().native_context();
1380 if (receiver_context == native_context) return true;
1381
1382 if (Context::cast(receiver_context).security_token() ==
1383 native_context.security_token())
1384 return true;
1385 }
1386 }
1387
1388 HandleScope scope(this);
1389 Handle<Object> data;
1390 v8::AccessCheckCallback callback = nullptr;
1391 {
1392 DisallowHeapAllocation no_gc;
1393 AccessCheckInfo access_check_info = AccessCheckInfo::Get(this, receiver);
1394 if (access_check_info.is_null()) return false;
1395 Object fun_obj = access_check_info.callback();
1396 callback = v8::ToCData<v8::AccessCheckCallback>(fun_obj);
1397 data = handle(access_check_info.data(), this);
1398 }
1399
1400 LOG(this, ApiSecurityCheck());
1401
1402 {
1403 // Leaving JavaScript.
1404 VMState<EXTERNAL> state(this);
1405 return callback(v8::Utils::ToLocal(accessing_context),
1406 v8::Utils::ToLocal(receiver), v8::Utils::ToLocal(data));
1407 }
1408 }
1409
StackOverflow()1410 Object Isolate::StackOverflow() {
1411 if (FLAG_correctness_fuzzer_suppressions) {
1412 FATAL("Aborting on stack overflow");
1413 }
1414
1415 DisallowJavascriptExecution no_js(this);
1416 HandleScope scope(this);
1417
1418 Handle<JSFunction> fun = range_error_function();
1419 Handle<Object> msg = factory()->NewStringFromAsciiChecked(
1420 MessageFormatter::TemplateString(MessageTemplate::kStackOverflow));
1421 Handle<Object> no_caller;
1422 Handle<Object> exception;
1423 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
1424 this, exception,
1425 ErrorUtils::Construct(this, fun, fun, msg, SKIP_NONE, no_caller,
1426 ErrorUtils::StackTraceCollection::kSimple));
1427
1428 Throw(*exception);
1429
1430 #ifdef VERIFY_HEAP
1431 if (FLAG_verify_heap && FLAG_stress_compaction) {
1432 heap()->CollectAllGarbage(Heap::kNoGCFlags,
1433 GarbageCollectionReason::kTesting);
1434 }
1435 #endif // VERIFY_HEAP
1436
1437 return ReadOnlyRoots(heap()).exception();
1438 }
1439
ThrowAt(Handle<JSObject> exception,MessageLocation * location)1440 Object Isolate::ThrowAt(Handle<JSObject> exception, MessageLocation* location) {
1441 Handle<Name> key_start_pos = factory()->error_start_pos_symbol();
1442 Object::SetProperty(this, exception, key_start_pos,
1443 handle(Smi::FromInt(location->start_pos()), this),
1444 StoreOrigin::kMaybeKeyed,
1445 Just(ShouldThrow::kThrowOnError))
1446 .Check();
1447
1448 Handle<Name> key_end_pos = factory()->error_end_pos_symbol();
1449 Object::SetProperty(this, exception, key_end_pos,
1450 handle(Smi::FromInt(location->end_pos()), this),
1451 StoreOrigin::kMaybeKeyed,
1452 Just(ShouldThrow::kThrowOnError))
1453 .Check();
1454
1455 Handle<Name> key_script = factory()->error_script_symbol();
1456 Object::SetProperty(this, exception, key_script, location->script(),
1457 StoreOrigin::kMaybeKeyed,
1458 Just(ShouldThrow::kThrowOnError))
1459 .Check();
1460
1461 return ThrowInternal(*exception, location);
1462 }
1463
TerminateExecution()1464 Object Isolate::TerminateExecution() {
1465 return Throw(ReadOnlyRoots(this).termination_exception());
1466 }
1467
CancelTerminateExecution()1468 void Isolate::CancelTerminateExecution() {
1469 if (try_catch_handler()) {
1470 try_catch_handler()->has_terminated_ = false;
1471 }
1472 if (has_pending_exception() &&
1473 pending_exception() == ReadOnlyRoots(this).termination_exception()) {
1474 thread_local_top()->external_caught_exception_ = false;
1475 clear_pending_exception();
1476 }
1477 if (has_scheduled_exception() &&
1478 scheduled_exception() == ReadOnlyRoots(this).termination_exception()) {
1479 thread_local_top()->external_caught_exception_ = false;
1480 clear_scheduled_exception();
1481 }
1482 }
1483
RequestInterrupt(InterruptCallback callback,void * data)1484 void Isolate::RequestInterrupt(InterruptCallback callback, void* data) {
1485 ExecutionAccess access(this);
1486 api_interrupts_queue_.push(InterruptEntry(callback, data));
1487 stack_guard()->RequestApiInterrupt();
1488 }
1489
InvokeApiInterruptCallbacks()1490 void Isolate::InvokeApiInterruptCallbacks() {
1491 RuntimeCallTimerScope runtimeTimer(
1492 this, RuntimeCallCounterId::kInvokeApiInterruptCallbacks);
1493 // Note: callback below should be called outside of execution access lock.
1494 while (true) {
1495 InterruptEntry entry;
1496 {
1497 ExecutionAccess access(this);
1498 if (api_interrupts_queue_.empty()) return;
1499 entry = api_interrupts_queue_.front();
1500 api_interrupts_queue_.pop();
1501 }
1502 VMState<EXTERNAL> state(this);
1503 HandleScope handle_scope(this);
1504 entry.first(reinterpret_cast<v8::Isolate*>(this), entry.second);
1505 }
1506 }
1507
1508 namespace {
1509
ReportBootstrappingException(Handle<Object> exception,MessageLocation * location)1510 void ReportBootstrappingException(Handle<Object> exception,
1511 MessageLocation* location) {
1512 base::OS::PrintError("Exception thrown during bootstrapping\n");
1513 if (location == nullptr || location->script().is_null()) return;
1514 // We are bootstrapping and caught an error where the location is set
1515 // and we have a script for the location.
1516 // In this case we could have an extension (or an internal error
1517 // somewhere) and we print out the line number at which the error occurred
1518 // to the console for easier debugging.
1519 int line_number =
1520 location->script()->GetLineNumber(location->start_pos()) + 1;
1521 if (exception->IsString() && location->script()->name().IsString()) {
1522 base::OS::PrintError(
1523 "Extension or internal compilation error: %s in %s at line %d.\n",
1524 String::cast(*exception).ToCString().get(),
1525 String::cast(location->script()->name()).ToCString().get(),
1526 line_number);
1527 } else if (location->script()->name().IsString()) {
1528 base::OS::PrintError(
1529 "Extension or internal compilation error in %s at line %d.\n",
1530 String::cast(location->script()->name()).ToCString().get(),
1531 line_number);
1532 } else if (exception->IsString()) {
1533 base::OS::PrintError("Extension or internal compilation error: %s.\n",
1534 String::cast(*exception).ToCString().get());
1535 } else {
1536 base::OS::PrintError("Extension or internal compilation error.\n");
1537 }
1538 #ifdef OBJECT_PRINT
1539 // Since comments and empty lines have been stripped from the source of
1540 // builtins, print the actual source here so that line numbers match.
1541 if (location->script()->source().IsString()) {
1542 Handle<String> src(String::cast(location->script()->source()),
1543 location->script()->GetIsolate());
1544 PrintF("Failing script:");
1545 int len = src->length();
1546 if (len == 0) {
1547 PrintF(" <not available>\n");
1548 } else {
1549 PrintF("\n");
1550 int line_number = 1;
1551 PrintF("%5d: ", line_number);
1552 for (int i = 0; i < len; i++) {
1553 uint16_t character = src->Get(i);
1554 PrintF("%c", character);
1555 if (character == '\n' && i < len - 2) {
1556 PrintF("%5d: ", ++line_number);
1557 }
1558 }
1559 PrintF("\n");
1560 }
1561 }
1562 #endif
1563 }
1564
1565 } // anonymous namespace
1566
CreateMessageOrAbort(Handle<Object> exception,MessageLocation * location)1567 Handle<JSMessageObject> Isolate::CreateMessageOrAbort(
1568 Handle<Object> exception, MessageLocation* location) {
1569 Handle<JSMessageObject> message_obj = CreateMessage(exception, location);
1570
1571 // If the abort-on-uncaught-exception flag is specified, and if the
1572 // embedder didn't specify a custom uncaught exception callback,
1573 // or if the custom callback determined that V8 should abort, then
1574 // abort.
1575 if (FLAG_abort_on_uncaught_exception) {
1576 CatchType prediction = PredictExceptionCatcher();
1577 if ((prediction == NOT_CAUGHT || prediction == CAUGHT_BY_EXTERNAL) &&
1578 (!abort_on_uncaught_exception_callback_ ||
1579 abort_on_uncaught_exception_callback_(
1580 reinterpret_cast<v8::Isolate*>(this)))) {
1581 // Prevent endless recursion.
1582 FLAG_abort_on_uncaught_exception = false;
1583 // This flag is intended for use by JavaScript developers, so
1584 // print a user-friendly stack trace (not an internal one).
1585 PrintF(stderr, "%s\n\nFROM\n",
1586 MessageHandler::GetLocalizedMessage(this, message_obj).get());
1587 PrintCurrentStackTrace(stderr);
1588 base::OS::Abort();
1589 }
1590 }
1591
1592 return message_obj;
1593 }
1594
ThrowInternal(Object raw_exception,MessageLocation * location)1595 Object Isolate::ThrowInternal(Object raw_exception, MessageLocation* location) {
1596 DCHECK(!has_pending_exception());
1597
1598 HandleScope scope(this);
1599 Handle<Object> exception(raw_exception, this);
1600
1601 if (FLAG_print_all_exceptions) {
1602 printf("=========================================================\n");
1603 printf("Exception thrown:\n");
1604 if (location) {
1605 Handle<Script> script = location->script();
1606 Handle<Object> name(script->GetNameOrSourceURL(), this);
1607 printf("at ");
1608 if (name->IsString() && String::cast(*name).length() > 0)
1609 String::cast(*name).PrintOn(stdout);
1610 else
1611 printf("<anonymous>");
1612 // Script::GetLineNumber and Script::GetColumnNumber can allocate on the heap to
1613 // initialize the line_ends array, so be careful when calling them.
1614 #ifdef DEBUG
1615 if (AllowHeapAllocation::IsAllowed() &&
1616 AllowGarbageCollection::IsAllowed()) {
1617 #else
1618 if ((false)) {
1619 #endif
1620 printf(", %d:%d - %d:%d\n",
1621 Script::GetLineNumber(script, location->start_pos()) + 1,
1622 Script::GetColumnNumber(script, location->start_pos()),
1623 Script::GetLineNumber(script, location->end_pos()) + 1,
1624 Script::GetColumnNumber(script, location->end_pos()));
1625 // Make sure to update the raw exception pointer in case it moved.
1626 raw_exception = *exception;
1627 } else {
1628 printf(", line %d\n", script->GetLineNumber(location->start_pos()) + 1);
1629 }
1630 }
1631 raw_exception.Print();
1632 printf("Stack Trace:\n");
1633 PrintStack(stdout);
1634 printf("=========================================================\n");
1635 }
1636
1637 // Determine whether a message needs to be created for the given exception
1638 // depending on the following criteria:
1639 // 1) External v8::TryCatch missing: Always create a message because any
1640 // JavaScript handler for a finally-block might re-throw to top-level.
1641 // 2) External v8::TryCatch exists: Only create a message if the handler
1642 // captures messages or is verbose (which reports despite the catch).
1643 // 3) ReThrow from v8::TryCatch: The message from a previous throw still
1644 // exists and we preserve it instead of creating a new message.
1645 bool requires_message = try_catch_handler() == nullptr ||
1646 try_catch_handler()->is_verbose_ ||
1647 try_catch_handler()->capture_message_;
1648 bool rethrowing_message = thread_local_top()->rethrowing_message_;
1649
1650 thread_local_top()->rethrowing_message_ = false;
1651
1652 // Notify debugger of exception.
1653 if (is_catchable_by_javascript(raw_exception)) {
1654 base::Optional<Object> maybe_exception = debug()->OnThrow(exception);
1655 if (maybe_exception.has_value()) {
1656 return *maybe_exception;
1657 }
1658 }
1659
1660 // Generate the message if required.
1661 if (requires_message && !rethrowing_message) {
1662 MessageLocation computed_location;
1663 // If no location was specified we try to use a computed one instead.
1664 if (location == nullptr && ComputeLocation(&computed_location)) {
1665 location = &computed_location;
1666 }
1667 if (bootstrapper()->IsActive()) {
1668 // It's not safe to try to make message objects or collect stack traces
1669 // while the bootstrapper is active since the infrastructure may not have
1670 // been properly initialized.
1671 ReportBootstrappingException(exception, location);
1672 } else {
1673 Handle<Object> message_obj = CreateMessageOrAbort(exception, location);
1674 thread_local_top()->pending_message_obj_ = *message_obj;
1675 }
1676 }
1677
1678 // Set the exception being thrown.
1679 set_pending_exception(*exception);
1680 return ReadOnlyRoots(heap()).exception();
1681 }
1682
1683 Object Isolate::ReThrow(Object exception) {
1684 DCHECK(!has_pending_exception());
1685
1686 // Set the exception being re-thrown.
1687 set_pending_exception(exception);
1688 return ReadOnlyRoots(heap()).exception();
1689 }
1690
1691 Object Isolate::UnwindAndFindHandler() {
1692 Object exception = pending_exception();
1693
1694 auto FoundHandler = [&](Context context, Address instruction_start,
1695 intptr_t handler_offset,
1696 Address constant_pool_address, Address handler_sp,
1697 Address handler_fp) {
1698 // Store information to be consumed by the CEntry.
1699 thread_local_top()->pending_handler_context_ = context;
1700 thread_local_top()->pending_handler_entrypoint_ =
1701 instruction_start + handler_offset;
1702 thread_local_top()->pending_handler_constant_pool_ = constant_pool_address;
1703 thread_local_top()->pending_handler_fp_ = handler_fp;
1704 thread_local_top()->pending_handler_sp_ = handler_sp;
1705
1706 // Return and clear pending exception. The contract is that:
1707 // (1) the pending exception is stored in one place (no duplication), and
1708 // (2) within generated-code land, that one place is the return register.
1709 // If/when we unwind back into C++ (returning to the JSEntry stub,
1710 // or to Execution::CallWasm), the returned exception will be sent
1711 // back to isolate->set_pending_exception(...).
1712 clear_pending_exception();
1713 return exception;
1714 };
1715
1716 // Special handling of termination exceptions, uncatchable by JavaScript and
1717 // Wasm code, we unwind the handlers until the top ENTRY handler is found.
1718 bool catchable_by_js = is_catchable_by_javascript(exception);
1719 bool catchable_by_wasm = is_catchable_by_wasm(exception);
1720
1721 // Compute handler and stack unwinding information by performing a full walk
1722 // over the stack and dispatching according to the frame type.
1723 for (StackFrameIterator iter(this);; iter.Advance()) {
1724 // Handler must exist.
1725 DCHECK(!iter.done());
1726
1727 StackFrame* frame = iter.frame();
1728
1729 switch (frame->type()) {
1730 case StackFrame::ENTRY:
1731 case StackFrame::CONSTRUCT_ENTRY: {
1732 // For JSEntry frames we always have a handler.
1733 StackHandler* handler = frame->top_handler();
1734
1735 // Restore the next handler.
1736 thread_local_top()->handler_ = handler->next_address();
1737
1738 // Gather information from the handler.
1739 Code code = frame->LookupCode();
1740 HandlerTable table(code);
1741 return FoundHandler(Context(), code.InstructionStart(),
1742 table.LookupReturn(0), code.constant_pool(),
1743 handler->address() + StackHandlerConstants::kSize,
1744 0);
1745 }
1746
1747 case StackFrame::C_WASM_ENTRY: {
1748 StackHandler* handler = frame->top_handler();
1749 thread_local_top()->handler_ = handler->next_address();
1750 Code code = frame->LookupCode();
1751 HandlerTable table(code);
1752 Address instruction_start = code.InstructionStart();
1753 int return_offset = static_cast<int>(frame->pc() - instruction_start);
1754 int handler_offset = table.LookupReturn(return_offset);
1755 DCHECK_NE(-1, handler_offset);
1756 // Compute the stack pointer from the frame pointer. This ensures that
1757 // argument slots on the stack are dropped as returning would.
1758 Address return_sp = frame->fp() +
1759 StandardFrameConstants::kFixedFrameSizeAboveFp -
1760 code.stack_slots() * kSystemPointerSize;
1761 return FoundHandler(Context(), instruction_start, handler_offset,
1762 code.constant_pool(), return_sp, frame->fp());
1763 }
1764
1765 case StackFrame::WASM: {
1766 if (trap_handler::IsThreadInWasm()) {
1767 trap_handler::ClearThreadInWasm();
1768 }
1769
1770 if (!catchable_by_wasm) break;
1771
1772 // For WebAssembly frames we perform a lookup in the handler table.
1773 // This code ref scope is here to avoid a check failure when looking up
1774 // the code. It's not actually necessary to keep the code alive as it's
1775 // currently being executed.
1776 wasm::WasmCodeRefScope code_ref_scope;
1777 WasmFrame* wasm_frame = static_cast<WasmFrame*>(frame);
1778 wasm::WasmCode* wasm_code =
1779 wasm_engine()->code_manager()->LookupCode(frame->pc());
1780 int offset = wasm_frame->LookupExceptionHandlerInTable();
1781 if (offset < 0) break;
1782 // Compute the stack pointer from the frame pointer. This ensures that
1783 // argument slots on the stack are dropped as returning would.
1784 Address return_sp = frame->fp() +
1785 StandardFrameConstants::kFixedFrameSizeAboveFp -
1786 wasm_code->stack_slots() * kSystemPointerSize;
1787
1788 // This is going to be handled by Wasm, so we need to set the TLS flag
1789 // again. It was cleared above assuming the frame would be unwound.
1790 trap_handler::SetThreadInWasm();
1791
1792 return FoundHandler(Context(), wasm_code->instruction_start(), offset,
1793 wasm_code->constant_pool(), return_sp, frame->fp());
1794 }
1795
1796 case StackFrame::WASM_COMPILE_LAZY: {
1797 // Can only fail directly on invocation. This happens if an invalid
1798 // function was validated lazily.
1799 DCHECK_IMPLIES(trap_handler::IsTrapHandlerEnabled(),
1800 trap_handler::IsThreadInWasm());
1801 DCHECK(FLAG_wasm_lazy_validation);
1802 trap_handler::ClearThreadInWasm();
1803 break;
1804 }
1805
1806 case StackFrame::OPTIMIZED: {
1807 // For optimized frames we perform a lookup in the handler table.
1808 if (!catchable_by_js) break;
1809 OptimizedFrame* js_frame = static_cast<OptimizedFrame*>(frame);
1810 Code code = frame->LookupCode();
1811 int offset = js_frame->LookupExceptionHandlerInTable(nullptr, nullptr);
1812 if (offset < 0) break;
1813 // Compute the stack pointer from the frame pointer. This ensures
1814 // that argument slots on the stack are dropped as returning would.
1815 Address return_sp = frame->fp() +
1816 StandardFrameConstants::kFixedFrameSizeAboveFp -
1817 code.stack_slots() * kSystemPointerSize;
1818
1819 // TODO(bmeurer): Turbofanned BUILTIN frames appear as OPTIMIZED,
1820 // but do not have a code kind of TURBOFAN.
1821 if (CodeKindCanDeoptimize(code.kind()) &&
1822 code.marked_for_deoptimization()) {
1823 // If the target code is lazy deoptimized, we jump to the original
1824 // return address, but we make a note that we are throwing, so
1825 // that the deoptimizer can do the right thing.
1826 offset = static_cast<int>(frame->pc() - code.entry());
1827 set_deoptimizer_lazy_throw(true);
1828 }
1829
1830 return FoundHandler(Context(), code.InstructionStart(), offset,
1831 code.constant_pool(), return_sp, frame->fp());
1832 }
1833
1834 case StackFrame::STUB: {
1835 // Some stubs are able to handle exceptions.
1836 if (!catchable_by_js) break;
1837 StubFrame* stub_frame = static_cast<StubFrame*>(frame);
1838 #ifdef DEBUG
1839 wasm::WasmCodeRefScope code_ref_scope;
1840 DCHECK_NULL(wasm_engine()->code_manager()->LookupCode(frame->pc()));
1841 #endif // DEBUG
1842 Code code = stub_frame->LookupCode();
1843 if (!code.IsCode() || code.kind() != CodeKind::BUILTIN ||
1844 !code.has_handler_table() || !code.is_turbofanned()) {
1845 break;
1846 }
1847
1848 int offset = stub_frame->LookupExceptionHandlerInTable();
1849 if (offset < 0) break;
1850
1851 // Compute the stack pointer from the frame pointer. This ensures
1852 // that argument slots on the stack are dropped as returning would.
1853 Address return_sp = frame->fp() +
1854 StandardFrameConstants::kFixedFrameSizeAboveFp -
1855 code.stack_slots() * kSystemPointerSize;
1856
1857 return FoundHandler(Context(), code.InstructionStart(), offset,
1858 code.constant_pool(), return_sp, frame->fp());
1859 }
1860
1861 case StackFrame::INTERPRETED: {
1862 // For interpreted frame we perform a range lookup in the handler table.
1863 if (!catchable_by_js) break;
1864 InterpretedFrame* js_frame = static_cast<InterpretedFrame*>(frame);
1865 int register_slots = InterpreterFrameConstants::RegisterStackSlotCount(
1866 js_frame->GetBytecodeArray().register_count());
1867 int context_reg = 0; // Will contain register index holding context.
1868 int offset =
1869 js_frame->LookupExceptionHandlerInTable(&context_reg, nullptr);
1870 if (offset < 0) break;
1871 // Compute the stack pointer from the frame pointer. This ensures that
1872 // argument slots on the stack are dropped as returning would.
1873 // Note: This is only needed for interpreted frames that have been
1874 // materialized by the deoptimizer. If there is a handler frame
1875 // in between then {frame->sp()} would already be correct.
1876 Address return_sp = frame->fp() -
1877 InterpreterFrameConstants::kFixedFrameSizeFromFp -
1878 register_slots * kSystemPointerSize;
1879
1880 // Patch the bytecode offset in the interpreted frame to reflect the
1881 // position of the exception handler. The special builtin below will
1882 // take care of continuing to dispatch at that position. Also restore
1883 // the correct context for the handler from the interpreter register.
1884 Context context =
1885 Context::cast(js_frame->ReadInterpreterRegister(context_reg));
1886 js_frame->PatchBytecodeOffset(static_cast<int>(offset));
1887
1888 Code code =
1889 builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
1890 return FoundHandler(context, code.InstructionStart(), 0,
1891 code.constant_pool(), return_sp, frame->fp());
1892 }
1893
1894 case StackFrame::BUILTIN:
1895 // For builtin frames we are guaranteed not to find a handler.
1896 if (catchable_by_js) {
1897 CHECK_EQ(-1, BuiltinFrame::cast(frame)->LookupExceptionHandlerInTable(
1898 nullptr, nullptr));
1899 }
1900 break;
1901
1902 case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH: {
1903 // Builtin continuation frames with catch can handle exceptions.
1904 if (!catchable_by_js) break;
1905 JavaScriptBuiltinContinuationWithCatchFrame* js_frame =
1906 JavaScriptBuiltinContinuationWithCatchFrame::cast(frame);
1907 js_frame->SetException(exception);
1908
1909 // Reconstruct the stack pointer from the frame pointer.
1910 Address return_sp = js_frame->fp() - js_frame->GetSPToFPDelta();
1911 Code code = js_frame->LookupCode();
1912 return FoundHandler(Context(), code.InstructionStart(), 0,
1913 code.constant_pool(), return_sp, frame->fp());
1914 } break;
1915
1916 default:
1917 // All other types can not handle exception.
1918 break;
1919 }
1920
1921 if (frame->is_optimized()) {
1922 // Remove per-frame stored materialized objects.
1923 bool removed = materialized_object_store_->Remove(frame->fp());
1924 USE(removed);
1925 // If there were any materialized objects, the code should be
1926 // marked for deopt.
1927 DCHECK_IMPLIES(removed, frame->LookupCode().marked_for_deoptimization());
1928 }
1929 }
1930
1931 UNREACHABLE();
1932 }
1933
1934 namespace {
1935 HandlerTable::CatchPrediction PredictException(JavaScriptFrame* frame) {
1936 HandlerTable::CatchPrediction prediction;
1937 if (frame->is_optimized()) {
1938 if (frame->LookupExceptionHandlerInTable(nullptr, nullptr) > 0) {
1939 // This optimized frame will catch. It's handler table does not include
1940 // exception prediction, and we need to use the corresponding handler
1941 // tables on the unoptimized code objects.
1942 std::vector<FrameSummary> summaries;
1943 frame->Summarize(&summaries);
1944 for (size_t i = summaries.size(); i != 0; i--) {
1945 const FrameSummary& summary = summaries[i - 1];
1946 Handle<AbstractCode> code = summary.AsJavaScript().abstract_code();
1947 if (code->IsCode() && code->kind() == CodeKind::BUILTIN) {
1948 prediction = code->GetCode().GetBuiltinCatchPrediction();
1949 if (prediction == HandlerTable::UNCAUGHT) continue;
1950 return prediction;
1951 }
1952
1953 // Must have been constructed from a bytecode array.
1954 CHECK_EQ(CodeKind::INTERPRETED_FUNCTION, code->kind());
1955 int code_offset = summary.code_offset();
1956 HandlerTable table(code->GetBytecodeArray());
1957 int index = table.LookupRange(code_offset, nullptr, &prediction);
1958 if (index <= 0) continue;
1959 if (prediction == HandlerTable::UNCAUGHT) continue;
1960 return prediction;
1961 }
1962 }
1963 } else if (frame->LookupExceptionHandlerInTable(nullptr, &prediction) > 0) {
1964 return prediction;
1965 }
1966 return HandlerTable::UNCAUGHT;
1967 }
1968
1969 Isolate::CatchType ToCatchType(HandlerTable::CatchPrediction prediction) {
1970 switch (prediction) {
1971 case HandlerTable::UNCAUGHT:
1972 return Isolate::NOT_CAUGHT;
1973 case HandlerTable::CAUGHT:
1974 return Isolate::CAUGHT_BY_JAVASCRIPT;
1975 case HandlerTable::PROMISE:
1976 return Isolate::CAUGHT_BY_PROMISE;
1977 case HandlerTable::DESUGARING:
1978 return Isolate::CAUGHT_BY_DESUGARING;
1979 case HandlerTable::UNCAUGHT_ASYNC_AWAIT:
1980 case HandlerTable::ASYNC_AWAIT:
1981 return Isolate::CAUGHT_BY_ASYNC_AWAIT;
1982 default:
1983 UNREACHABLE();
1984 }
1985 }
1986 } // anonymous namespace
1987
1988 Isolate::CatchType Isolate::PredictExceptionCatcher() {
1989 Address external_handler = thread_local_top()->try_catch_handler_address();
1990 if (IsExternalHandlerOnTop(Object())) return CAUGHT_BY_EXTERNAL;
1991
1992 // Search for an exception handler by performing a full walk over the stack.
1993 for (StackFrameIterator iter(this); !iter.done(); iter.Advance()) {
1994 StackFrame* frame = iter.frame();
1995
1996 switch (frame->type()) {
1997 case StackFrame::ENTRY:
1998 case StackFrame::CONSTRUCT_ENTRY: {
1999 Address entry_handler = frame->top_handler()->next_address();
2000 // The exception has been externally caught if and only if there is an
2001 // external handler which is on top of the top-most JS_ENTRY handler.
2002 if (external_handler != kNullAddress &&
2003 !try_catch_handler()->is_verbose_) {
2004 if (entry_handler == kNullAddress ||
2005 entry_handler > external_handler) {
2006 return CAUGHT_BY_EXTERNAL;
2007 }
2008 }
2009 } break;
2010
2011 // For JavaScript frames we perform a lookup in the handler table.
2012 case StackFrame::OPTIMIZED:
2013 case StackFrame::INTERPRETED:
2014 case StackFrame::BUILTIN: {
2015 JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
2016 Isolate::CatchType prediction = ToCatchType(PredictException(js_frame));
2017 if (prediction == NOT_CAUGHT) break;
2018 return prediction;
2019 } break;
2020
2021 case StackFrame::STUB: {
2022 Handle<Code> code(frame->LookupCode(), this);
2023 if (!code->IsCode() || code->kind() != CodeKind::BUILTIN ||
2024 !code->has_handler_table() || !code->is_turbofanned()) {
2025 break;
2026 }
2027
2028 CatchType prediction = ToCatchType(code->GetBuiltinCatchPrediction());
2029 if (prediction != NOT_CAUGHT) return prediction;
2030 } break;
2031
2032 case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH: {
2033 Handle<Code> code(frame->LookupCode(), this);
2034 CatchType prediction = ToCatchType(code->GetBuiltinCatchPrediction());
2035 if (prediction != NOT_CAUGHT) return prediction;
2036 } break;
2037
2038 default:
2039 // All other types can not handle exception.
2040 break;
2041 }
2042 }
2043
2044 // Handler not found.
2045 return NOT_CAUGHT;
2046 }
2047
2048 Object Isolate::ThrowIllegalOperation() {
2049 if (FLAG_stack_trace_on_illegal) PrintStack(stdout);
2050 return Throw(ReadOnlyRoots(heap()).illegal_access_string());
2051 }
2052
2053 void Isolate::ScheduleThrow(Object exception) {
2054 // When scheduling a throw we first throw the exception to get the
2055 // error reporting if it is uncaught before rescheduling it.
2056 Throw(exception);
2057 PropagatePendingExceptionToExternalTryCatch();
2058 if (has_pending_exception()) {
2059 thread_local_top()->scheduled_exception_ = pending_exception();
2060 thread_local_top()->external_caught_exception_ = false;
2061 clear_pending_exception();
2062 }
2063 }
2064
2065 void Isolate::RestorePendingMessageFromTryCatch(v8::TryCatch* handler) {
2066 DCHECK(handler == try_catch_handler());
2067 DCHECK(handler->HasCaught());
2068 DCHECK(handler->rethrow_);
2069 DCHECK(handler->capture_message_);
2070 Object message(reinterpret_cast<Address>(handler->message_obj_));
2071 DCHECK(message.IsJSMessageObject() || message.IsTheHole(this));
2072 thread_local_top()->pending_message_obj_ = message;
2073 }
2074
2075 void Isolate::CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler) {
2076 DCHECK(has_scheduled_exception());
2077 if (reinterpret_cast<void*>(scheduled_exception().ptr()) ==
2078 handler->exception_) {
2079 DCHECK_NE(scheduled_exception(),
2080 ReadOnlyRoots(heap()).termination_exception());
2081 clear_scheduled_exception();
2082 } else {
2083 DCHECK_EQ(scheduled_exception(),
2084 ReadOnlyRoots(heap()).termination_exception());
2085 // Clear termination once we returned from all V8 frames.
2086 if (thread_local_top()->CallDepthIsZero()) {
2087 thread_local_top()->external_caught_exception_ = false;
2088 clear_scheduled_exception();
2089 }
2090 }
2091 if (reinterpret_cast<void*>(thread_local_top()->pending_message_obj_.ptr()) ==
2092 handler->message_obj_) {
2093 clear_pending_message();
2094 }
2095 }
2096
2097 Object Isolate::PromoteScheduledException() {
2098 Object thrown = scheduled_exception();
2099 clear_scheduled_exception();
2100 // Re-throw the exception to avoid getting repeated error reporting.
2101 return ReThrow(thrown);
2102 }
2103
2104 void Isolate::PrintCurrentStackTrace(FILE* out) {
2105 CaptureStackTraceOptions options;
2106 options.limit = 0;
2107 options.skip_mode = SKIP_NONE;
2108 options.capture_builtin_exit_frames = true;
2109 options.async_stack_trace = FLAG_async_stack_traces;
2110 options.filter_mode = FrameArrayBuilder::CURRENT_SECURITY_CONTEXT;
2111 options.capture_only_frames_subject_to_debugging = false;
2112
2113 Handle<FixedArray> frames = Handle<FixedArray>::cast(
2114 CaptureStackTrace(this, this->factory()->undefined_value(), options));
2115
2116 IncrementalStringBuilder builder(this);
2117 for (int i = 0; i < frames->length(); ++i) {
2118 Handle<StackTraceFrame> frame(StackTraceFrame::cast(frames->get(i)), this);
2119
2120 SerializeStackTraceFrame(this, frame, &builder);
2121 }
2122
2123 Handle<String> stack_trace = builder.Finish().ToHandleChecked();
2124 stack_trace->PrintOn(out);
2125 }
2126
2127 bool Isolate::ComputeLocation(MessageLocation* target) {
2128 StackTraceFrameIterator it(this);
2129 if (it.done()) return false;
2130 CommonFrame* frame = it.frame();
2131 // Compute the location from the function and the relocation info of the
2132 // baseline code. For optimized code this will use the deoptimization
2133 // information to get canonical location information.
2134 std::vector<FrameSummary> frames;
2135 wasm::WasmCodeRefScope code_ref_scope;
2136 frame->Summarize(&frames);
2137 FrameSummary& summary = frames.back();
2138 Handle<SharedFunctionInfo> shared;
2139 Handle<Object> script = summary.script();
2140 if (!script->IsScript() ||
2141 (Script::cast(*script).source().IsUndefined(this))) {
2142 return false;
2143 }
2144
2145 if (summary.IsJavaScript()) {
2146 shared = handle(summary.AsJavaScript().function()->shared(), this);
2147 }
2148 if (summary.AreSourcePositionsAvailable()) {
2149 int pos = summary.SourcePosition();
2150 *target =
2151 MessageLocation(Handle<Script>::cast(script), pos, pos + 1, shared);
2152 } else {
2153 *target = MessageLocation(Handle<Script>::cast(script), shared,
2154 summary.code_offset());
2155 }
2156 return true;
2157 }
2158
2159 bool Isolate::ComputeLocationFromException(MessageLocation* target,
2160 Handle<Object> exception) {
2161 if (!exception->IsJSObject()) return false;
2162
2163 Handle<Name> start_pos_symbol = factory()->error_start_pos_symbol();
2164 Handle<Object> start_pos = JSReceiver::GetDataProperty(
2165 Handle<JSObject>::cast(exception), start_pos_symbol);
2166 if (!start_pos->IsSmi()) return false;
2167 int start_pos_value = Handle<Smi>::cast(start_pos)->value();
2168
2169 Handle<Name> end_pos_symbol = factory()->error_end_pos_symbol();
2170 Handle<Object> end_pos = JSReceiver::GetDataProperty(
2171 Handle<JSObject>::cast(exception), end_pos_symbol);
2172 if (!end_pos->IsSmi()) return false;
2173 int end_pos_value = Handle<Smi>::cast(end_pos)->value();
2174
2175 Handle<Name> script_symbol = factory()->error_script_symbol();
2176 Handle<Object> script = JSReceiver::GetDataProperty(
2177 Handle<JSObject>::cast(exception), script_symbol);
2178 if (!script->IsScript()) return false;
2179
2180 Handle<Script> cast_script(Script::cast(*script), this);
2181 *target = MessageLocation(cast_script, start_pos_value, end_pos_value);
2182 return true;
2183 }
2184
2185 bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
2186 Handle<Object> exception) {
2187 if (!exception->IsJSObject()) return false;
2188 Handle<Name> key = factory()->stack_trace_symbol();
2189 Handle<Object> property =
2190 JSReceiver::GetDataProperty(Handle<JSObject>::cast(exception), key);
2191 if (!property->IsFixedArray()) return false;
2192
2193 Handle<FrameArray> elements =
2194 GetFrameArrayFromStackTrace(this, Handle<FixedArray>::cast(property));
2195
2196 const int frame_count = elements->FrameCount();
2197 for (int i = 0; i < frame_count; i++) {
2198 if (elements->IsWasmFrame(i) || elements->IsAsmJsWasmFrame(i)) {
2199 int func_index = elements->WasmFunctionIndex(i).value();
2200 int offset = elements->Offset(i).value();
2201 bool is_at_number_conversion =
2202 elements->IsAsmJsWasmFrame(i) &&
2203 elements->Flags(i).value() & FrameArray::kAsmJsAtNumberConversion;
2204 if (elements->IsWasmFrame(i) || elements->IsAsmJsWasmFrame(i)) {
2205 // WasmCode* held alive by the {GlobalWasmCodeRef}.
2206 wasm::WasmCode* code =
2207 Managed<wasm::GlobalWasmCodeRef>::cast(elements->WasmCodeObject(i))
2208 .get()
2209 ->code();
2210 offset = code->GetSourcePositionBefore(offset);
2211 }
2212 Handle<WasmInstanceObject> instance(elements->WasmInstance(i), this);
2213 const wasm::WasmModule* module = elements->WasmInstance(i).module();
2214 int pos = GetSourcePosition(module, func_index, offset,
2215 is_at_number_conversion);
2216 Handle<Script> script(instance->module_object().script(), this);
2217
2218 *target = MessageLocation(script, pos, pos + 1);
2219 return true;
2220 }
2221
2222 Handle<JSFunction> fun = handle(elements->Function(i), this);
2223 if (!fun->shared().IsSubjectToDebugging()) continue;
2224
2225 Object script = fun->shared().script();
2226 if (script.IsScript() &&
2227 !(Script::cast(script).source().IsUndefined(this))) {
2228 Handle<SharedFunctionInfo> shared = handle(fun->shared(), this);
2229
2230 AbstractCode abstract_code = elements->Code(i);
2231 const int code_offset = elements->Offset(i).value();
2232 Handle<Script> casted_script(Script::cast(script), this);
2233 if (shared->HasBytecodeArray() &&
2234 shared->GetBytecodeArray().HasSourcePositionTable()) {
2235 int pos = abstract_code.SourcePosition(code_offset);
2236 *target = MessageLocation(casted_script, pos, pos + 1, shared);
2237 } else {
2238 *target = MessageLocation(casted_script, shared, code_offset);
2239 }
2240
2241 return true;
2242 }
2243 }
2244 return false;
2245 }
2246
2247 Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
2248 MessageLocation* location) {
2249 Handle<FixedArray> stack_trace_object;
2250 if (capture_stack_trace_for_uncaught_exceptions_) {
2251 if (exception->IsJSError()) {
2252 // We fetch the stack trace that corresponds to this error object.
2253 // If the lookup fails, the exception is probably not a valid Error
2254 // object. In that case, we fall through and capture the stack trace
2255 // at this throw site.
2256 stack_trace_object =
2257 GetDetailedStackTrace(Handle<JSObject>::cast(exception));
2258 }
2259 if (stack_trace_object.is_null()) {
2260 // Not an error object, we capture stack and location at throw site.
2261 stack_trace_object = CaptureCurrentStackTrace(
2262 stack_trace_for_uncaught_exceptions_frame_limit_,
2263 stack_trace_for_uncaught_exceptions_options_);
2264 }
2265 }
2266 MessageLocation computed_location;
2267 if (location == nullptr &&
2268 (ComputeLocationFromException(&computed_location, exception) ||
2269 ComputeLocationFromStackTrace(&computed_location, exception) ||
2270 ComputeLocation(&computed_location))) {
2271 location = &computed_location;
2272 }
2273
2274 return MessageHandler::MakeMessageObject(
2275 this, MessageTemplate::kUncaughtException, location, exception,
2276 stack_trace_object);
2277 }
2278
2279 bool Isolate::IsJavaScriptHandlerOnTop(Object exception) {
2280 DCHECK_NE(ReadOnlyRoots(heap()).the_hole_value(), exception);
2281
2282 // For uncatchable exceptions, the JavaScript handler cannot be on top.
2283 if (!is_catchable_by_javascript(exception)) return false;
2284
2285 // Get the top-most JS_ENTRY handler, cannot be on top if it doesn't exist.
2286 Address entry_handler = Isolate::handler(thread_local_top());
2287 if (entry_handler == kNullAddress) return false;
2288
2289 // Get the address of the external handler so we can compare the address to
2290 // determine which one is closer to the top of the stack.
2291 Address external_handler = thread_local_top()->try_catch_handler_address();
2292 if (external_handler == kNullAddress) return true;
2293
2294 // The exception has been externally caught if and only if there is an
2295 // external handler which is on top of the top-most JS_ENTRY handler.
2296 //
2297 // Note, that finally clauses would re-throw an exception unless it's aborted
2298 // by jumps in control flow (like return, break, etc.) and we'll have another
2299 // chance to set proper v8::TryCatch later.
2300 return (entry_handler < external_handler);
2301 }
2302
2303 bool Isolate::IsExternalHandlerOnTop(Object exception) {
2304 DCHECK_NE(ReadOnlyRoots(heap()).the_hole_value(), exception);
2305
2306 // Get the address of the external handler so we can compare the address to
2307 // determine which one is closer to the top of the stack.
2308 Address external_handler = thread_local_top()->try_catch_handler_address();
2309 if (external_handler == kNullAddress) return false;
2310
2311 // For uncatchable exceptions, the external handler is always on top.
2312 if (!is_catchable_by_javascript(exception)) return true;
2313
2314 // Get the top-most JS_ENTRY handler, cannot be on top if it doesn't exist.
2315 Address entry_handler = Isolate::handler(thread_local_top());
2316 if (entry_handler == kNullAddress) return true;
2317
2318 // The exception has been externally caught if and only if there is an
2319 // external handler which is on top of the top-most JS_ENTRY handler.
2320 //
2321 // Note, that finally clauses would re-throw an exception unless it's aborted
2322 // by jumps in control flow (like return, break, etc.) and we'll have another
2323 // chance to set proper v8::TryCatch later.
2324 return (entry_handler > external_handler);
2325 }
2326
2327 std::vector<MemoryRange>* Isolate::GetCodePages() const {
2328 return code_pages_.load(std::memory_order_acquire);
2329 }
2330
2331 void Isolate::SetCodePages(std::vector<MemoryRange>* new_code_pages) {
2332 code_pages_.store(new_code_pages, std::memory_order_release);
2333 }
2334
2335 void Isolate::ReportPendingMessages() {
2336 DCHECK(AllowExceptions::IsAllowed(this));
2337
2338 // The embedder might run script in response to an exception.
2339 AllowJavascriptExecutionDebugOnly allow_script(this);
2340
2341 Object exception_obj = pending_exception();
2342
2343 // Try to propagate the exception to an external v8::TryCatch handler. If
2344 // propagation was unsuccessful, then we will get another chance at reporting
2345 // the pending message if the exception is re-thrown.
2346 bool has_been_propagated = PropagatePendingExceptionToExternalTryCatch();
2347 if (!has_been_propagated) return;
2348
2349 // Clear the pending message object early to avoid endless recursion.
2350 Object message_obj = thread_local_top()->pending_message_obj_;
2351 clear_pending_message();
2352
2353 // For uncatchable exceptions we do nothing. If needed, the exception and the
2354 // message have already been propagated to v8::TryCatch.
2355 if (!is_catchable_by_javascript(exception_obj)) return;
2356
2357 // Determine whether the message needs to be reported to all message handlers
2358 // depending on whether and external v8::TryCatch or an internal JavaScript
2359 // handler is on top.
2360 bool should_report_exception;
2361 if (IsExternalHandlerOnTop(exception_obj)) {
2362 // Only report the exception if the external handler is verbose.
2363 should_report_exception = try_catch_handler()->is_verbose_;
2364 } else {
2365 // Report the exception if it isn't caught by JavaScript code.
2366 should_report_exception = !IsJavaScriptHandlerOnTop(exception_obj);
2367 }
2368
2369 // Actually report the pending message to all message handlers.
2370 if (!message_obj.IsTheHole(this) && should_report_exception) {
2371 HandleScope scope(this);
2372 Handle<JSMessageObject> message(JSMessageObject::cast(message_obj), this);
2373 Handle<Object> exception(exception_obj, this);
2374 Handle<Script> script(message->script(), this);
2375 // Clear the exception and restore it afterwards, otherwise
2376 // CollectSourcePositions will abort.
2377 clear_pending_exception();
2378 JSMessageObject::EnsureSourcePositionsAvailable(this, message);
2379 set_pending_exception(*exception);
2380 int start_pos = message->GetStartPosition();
2381 int end_pos = message->GetEndPosition();
2382 MessageLocation location(script, start_pos, end_pos);
2383 MessageHandler::ReportMessage(this, &location, message);
2384 }
2385 }
2386
2387 bool Isolate::OptionalRescheduleException(bool clear_exception) {
2388 DCHECK(has_pending_exception());
2389 PropagatePendingExceptionToExternalTryCatch();
2390
2391 bool is_termination_exception =
2392 pending_exception() == ReadOnlyRoots(this).termination_exception();
2393
2394 if (is_termination_exception) {
2395 if (clear_exception) {
2396 thread_local_top()->external_caught_exception_ = false;
2397 clear_pending_exception();
2398 return false;
2399 }
2400 } else if (thread_local_top()->external_caught_exception_) {
2401 // If the exception is externally caught, clear it if there are no
2402 // JavaScript frames on the way to the C++ frame that has the
2403 // external handler.
2404 DCHECK_NE(thread_local_top()->try_catch_handler_address(), kNullAddress);
2405 Address external_handler_address =
2406 thread_local_top()->try_catch_handler_address();
2407 JavaScriptFrameIterator it(this);
2408 if (it.done() || (it.frame()->sp() > external_handler_address)) {
2409 clear_exception = true;
2410 }
2411 }
2412
2413 // Clear the exception if needed.
2414 if (clear_exception) {
2415 thread_local_top()->external_caught_exception_ = false;
2416 clear_pending_exception();
2417 return false;
2418 }
2419
2420 // Reschedule the exception.
2421 thread_local_top()->scheduled_exception_ = pending_exception();
2422 clear_pending_exception();
2423 return true;
2424 }
2425
2426 void Isolate::PushPromise(Handle<JSObject> promise) {
2427 ThreadLocalTop* tltop = thread_local_top();
2428 PromiseOnStack* prev = tltop->promise_on_stack_;
2429 Handle<JSObject> global_promise = global_handles()->Create(*promise);
2430 tltop->promise_on_stack_ = new PromiseOnStack(global_promise, prev);
2431 }
2432
2433 void Isolate::PopPromise() {
2434 ThreadLocalTop* tltop = thread_local_top();
2435 if (tltop->promise_on_stack_ == nullptr) return;
2436 PromiseOnStack* prev = tltop->promise_on_stack_->prev();
2437 Handle<Object> global_promise = tltop->promise_on_stack_->promise();
2438 delete tltop->promise_on_stack_;
2439 tltop->promise_on_stack_ = prev;
2440 global_handles()->Destroy(global_promise.location());
2441 }
2442
2443 namespace {
2444 bool PromiseIsRejectHandler(Isolate* isolate, Handle<JSReceiver> handler) {
2445 // Recurse to the forwarding Promise (e.g. return false) due to
2446 // - await reaction forwarding to the throwaway Promise, which has
2447 // a dependency edge to the outer Promise.
2448 // - PromiseIdResolveHandler forwarding to the output of .then
2449 // - Promise.all/Promise.race forwarding to a throwaway Promise, which
2450 // has a dependency edge to the generated outer Promise.
2451 // Otherwise, this is a real reject handler for the Promise.
2452 Handle<Symbol> key = isolate->factory()->promise_forwarding_handler_symbol();
2453 Handle<Object> forwarding_handler = JSReceiver::GetDataProperty(handler, key);
2454 return forwarding_handler->IsUndefined(isolate);
2455 }
2456
2457 bool PromiseHasUserDefinedRejectHandlerInternal(Isolate* isolate,
2458 Handle<JSPromise> promise) {
2459 Handle<Object> current(promise->reactions(), isolate);
2460 while (!current->IsSmi()) {
2461 Handle<PromiseReaction> reaction = Handle<PromiseReaction>::cast(current);
2462 Handle<HeapObject> promise_or_capability(reaction->promise_or_capability(),
2463 isolate);
2464 if (!promise_or_capability->IsUndefined(isolate)) {
2465 if (!promise_or_capability->IsJSPromise()) {
2466 promise_or_capability = handle(
2467 Handle<PromiseCapability>::cast(promise_or_capability)->promise(),
2468 isolate);
2469 }
2470 Handle<JSPromise> promise =
2471 Handle<JSPromise>::cast(promise_or_capability);
2472 if (!reaction->reject_handler().IsUndefined(isolate)) {
2473 Handle<JSReceiver> reject_handler(
2474 JSReceiver::cast(reaction->reject_handler()), isolate);
2475 if (PromiseIsRejectHandler(isolate, reject_handler)) return true;
2476 }
2477 if (isolate->PromiseHasUserDefinedRejectHandler(promise)) return true;
2478 }
2479 current = handle(reaction->next(), isolate);
2480 }
2481 return false;
2482 }
2483
2484 } // namespace
2485
2486 bool Isolate::PromiseHasUserDefinedRejectHandler(Handle<JSPromise> promise) {
2487 Handle<Symbol> key = factory()->promise_handled_by_symbol();
2488 std::stack<Handle<JSPromise>> promises;
2489 // First descend into the outermost promise and collect the stack of
2490 // Promises for reverse processing.
2491 while (true) {
2492 // If this promise was marked as being handled by a catch block
2493 // in an async function, then it has a user-defined reject handler.
2494 if (promise->handled_hint()) return true;
2495 if (promise->status() == Promise::kPending) {
2496 promises.push(promise);
2497 }
2498 Handle<Object> outer_promise_obj = JSObject::GetDataProperty(promise, key);
2499 if (!outer_promise_obj->IsJSPromise()) break;
2500 promise = Handle<JSPromise>::cast(outer_promise_obj);
2501 }
2502
2503 while (!promises.empty()) {
2504 promise = promises.top();
2505 if (PromiseHasUserDefinedRejectHandlerInternal(this, promise)) return true;
2506 promises.pop();
2507 }
2508 return false;
2509 }
2510
2511 Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
2512 Handle<Object> undefined = factory()->undefined_value();
2513 ThreadLocalTop* tltop = thread_local_top();
2514 if (tltop->promise_on_stack_ == nullptr) return undefined;
2515 // Find the top-most try-catch or try-finally handler.
2516 CatchType prediction = PredictExceptionCatcher();
2517 if (prediction == NOT_CAUGHT || prediction == CAUGHT_BY_EXTERNAL) {
2518 return undefined;
2519 }
2520 Handle<Object> retval = undefined;
2521 PromiseOnStack* promise_on_stack = tltop->promise_on_stack_;
2522 for (StackFrameIterator it(this); !it.done(); it.Advance()) {
2523 StackFrame* frame = it.frame();
2524 HandlerTable::CatchPrediction catch_prediction;
2525 if (frame->is_java_script()) {
2526 catch_prediction = PredictException(JavaScriptFrame::cast(frame));
2527 } else if (frame->type() == StackFrame::STUB) {
2528 Code code = frame->LookupCode();
2529 if (!code.IsCode() || code.kind() != CodeKind::BUILTIN ||
2530 !code.has_handler_table() || !code.is_turbofanned()) {
2531 continue;
2532 }
2533 catch_prediction = code.GetBuiltinCatchPrediction();
2534 } else {
2535 continue;
2536 }
2537
2538 switch (catch_prediction) {
2539 case HandlerTable::UNCAUGHT:
2540 continue;
2541 case HandlerTable::CAUGHT:
2542 case HandlerTable::DESUGARING:
2543 if (retval->IsJSPromise()) {
2544 // Caught the result of an inner async/await invocation.
2545 // Mark the inner promise as caught in the "synchronous case" so
2546 // that Debug::OnException will see. In the synchronous case,
2547 // namely in the code in an async function before the first
2548 // await, the function which has this exception event has not yet
2549 // returned, so the generated Promise has not yet been marked
2550 // by AsyncFunctionAwaitCaught with promiseHandledHintSymbol.
2551 Handle<JSPromise>::cast(retval)->set_handled_hint(true);
2552 }
2553 return retval;
2554 case HandlerTable::PROMISE:
2555 return promise_on_stack
2556 ? Handle<Object>::cast(promise_on_stack->promise())
2557 : undefined;
2558 case HandlerTable::UNCAUGHT_ASYNC_AWAIT:
2559 case HandlerTable::ASYNC_AWAIT: {
2560 // If in the initial portion of async/await, continue the loop to pop up
2561 // successive async/await stack frames until an asynchronous one with
2562 // dependents is found, or a non-async stack frame is encountered, in
2563 // order to handle the synchronous async/await catch prediction case:
2564 // assume that async function calls are awaited.
2565 if (!promise_on_stack) return retval;
2566 retval = promise_on_stack->promise();
2567 if (retval->IsJSPromise()) {
2568 if (PromiseHasUserDefinedRejectHandler(
2569 Handle<JSPromise>::cast(retval))) {
2570 return retval;
2571 }
2572 }
2573 promise_on_stack = promise_on_stack->prev();
2574 continue;
2575 }
2576 }
2577 }
2578 return retval;
2579 }
2580
2581 void Isolate::SetCaptureStackTraceForUncaughtExceptions(
2582 bool capture, int frame_limit, StackTrace::StackTraceOptions options) {
2583 capture_stack_trace_for_uncaught_exceptions_ = capture;
2584 stack_trace_for_uncaught_exceptions_frame_limit_ = frame_limit;
2585 stack_trace_for_uncaught_exceptions_options_ = options;
2586 }
2587
2588 bool Isolate::get_capture_stack_trace_for_uncaught_exceptions() const {
2589 return capture_stack_trace_for_uncaught_exceptions_;
2590 }
2591
2592 void Isolate::SetAbortOnUncaughtExceptionCallback(
2593 v8::Isolate::AbortOnUncaughtExceptionCallback callback) {
2594 abort_on_uncaught_exception_callback_ = callback;
2595 }
2596
2597 bool Isolate::AreWasmThreadsEnabled(Handle<Context> context) {
2598 if (wasm_threads_enabled_callback()) {
2599 v8::Local<v8::Context> api_context = v8::Utils::ToLocal(context);
2600 return wasm_threads_enabled_callback()(api_context);
2601 }
2602 return FLAG_experimental_wasm_threads;
2603 }
2604
2605 bool Isolate::IsWasmSimdEnabled(Handle<Context> context) {
2606 if (wasm_simd_enabled_callback()) {
2607 v8::Local<v8::Context> api_context = v8::Utils::ToLocal(context);
2608 return wasm_simd_enabled_callback()(api_context);
2609 }
2610 return FLAG_experimental_wasm_simd;
2611 }
2612
2613 Handle<Context> Isolate::GetIncumbentContext() {
2614 JavaScriptFrameIterator it(this);
2615
2616 // 1st candidate: most-recently-entered author function's context
2617 // if it's newer than the last Context::BackupIncumbentScope entry.
2618 //
2619 // NOTE: This code assumes that the stack grows downward.
2620 Address top_backup_incumbent =
2621 top_backup_incumbent_scope()
2622 ? top_backup_incumbent_scope()->JSStackComparableAddress()
2623 : 0;
2624 if (!it.done() &&
2625 (!top_backup_incumbent || it.frame()->sp() < top_backup_incumbent)) {
2626 Context context = Context::cast(it.frame()->context());
2627 return Handle<Context>(context.native_context(), this);
2628 }
2629
2630 // 2nd candidate: the last Context::Scope's incumbent context if any.
2631 if (top_backup_incumbent_scope()) {
2632 return Utils::OpenHandle(
2633 *top_backup_incumbent_scope()->backup_incumbent_context_);
2634 }
2635
2636 // Last candidate: the entered context or microtask context.
2637 // Given that there is no other author function is running, there must be
2638 // no cross-context function running, then the incumbent realm must match
2639 // the entry realm.
2640 v8::Local<v8::Context> entered_context =
2641 reinterpret_cast<v8::Isolate*>(this)->GetEnteredOrMicrotaskContext();
2642 return Utils::OpenHandle(*entered_context);
2643 }
2644
2645 char* Isolate::ArchiveThread(char* to) {
2646 MemCopy(to, reinterpret_cast<char*>(thread_local_top()),
2647 sizeof(ThreadLocalTop));
2648 return to + sizeof(ThreadLocalTop);
2649 }
2650
2651 char* Isolate::RestoreThread(char* from) {
2652 MemCopy(reinterpret_cast<char*>(thread_local_top()), from,
2653 sizeof(ThreadLocalTop));
2654 DCHECK(context().is_null() || context().IsContext());
2655 return from + sizeof(ThreadLocalTop);
2656 }
2657
2658 void Isolate::ReleaseSharedPtrs() {
2659 base::MutexGuard lock(&managed_ptr_destructors_mutex_);
2660 while (managed_ptr_destructors_head_) {
2661 ManagedPtrDestructor* l = managed_ptr_destructors_head_;
2662 ManagedPtrDestructor* n = nullptr;
2663 managed_ptr_destructors_head_ = nullptr;
2664 for (; l != nullptr; l = n) {
2665 l->destructor_(l->shared_ptr_ptr_);
2666 n = l->next_;
2667 delete l;
2668 }
2669 }
2670 }
2671
2672 bool Isolate::IsBuiltinsTableHandleLocation(Address* handle_location) {
2673 FullObjectSlot location(handle_location);
2674 FullObjectSlot first_root(builtins_table());
2675 FullObjectSlot last_root(builtins_table() + Builtins::builtin_count);
2676 if (location >= last_root) return false;
2677 if (location < first_root) return false;
2678 return true;
2679 }
2680
2681 void Isolate::RegisterManagedPtrDestructor(ManagedPtrDestructor* destructor) {
2682 base::MutexGuard lock(&managed_ptr_destructors_mutex_);
2683 DCHECK_NULL(destructor->prev_);
2684 DCHECK_NULL(destructor->next_);
2685 if (managed_ptr_destructors_head_) {
2686 managed_ptr_destructors_head_->prev_ = destructor;
2687 }
2688 destructor->next_ = managed_ptr_destructors_head_;
2689 managed_ptr_destructors_head_ = destructor;
2690 }
2691
2692 void Isolate::UnregisterManagedPtrDestructor(ManagedPtrDestructor* destructor) {
2693 base::MutexGuard lock(&managed_ptr_destructors_mutex_);
2694 if (destructor->prev_) {
2695 destructor->prev_->next_ = destructor->next_;
2696 } else {
2697 DCHECK_EQ(destructor, managed_ptr_destructors_head_);
2698 managed_ptr_destructors_head_ = destructor->next_;
2699 }
2700 if (destructor->next_) destructor->next_->prev_ = destructor->prev_;
2701 destructor->prev_ = nullptr;
2702 destructor->next_ = nullptr;
2703 }
2704
2705 void Isolate::SetWasmEngine(std::shared_ptr<wasm::WasmEngine> engine) {
2706 DCHECK_NULL(wasm_engine_); // Only call once before {Init}.
2707 wasm_engine_ = std::move(engine);
2708 wasm_engine_->AddIsolate(this);
2709 }
2710
2711 // NOLINTNEXTLINE
2712 Isolate::PerIsolateThreadData::~PerIsolateThreadData() {
2713 #if defined(USE_SIMULATOR)
2714 delete simulator_;
2715 #endif
2716 }
2717
2718 Isolate::PerIsolateThreadData* Isolate::ThreadDataTable::Lookup(
2719 ThreadId thread_id) {
2720 auto t = table_.find(thread_id);
2721 if (t == table_.end()) return nullptr;
2722 return t->second;
2723 }
2724
2725 void Isolate::ThreadDataTable::Insert(Isolate::PerIsolateThreadData* data) {
2726 bool inserted = table_.insert(std::make_pair(data->thread_id_, data)).second;
2727 CHECK(inserted);
2728 }
2729
2730 void Isolate::ThreadDataTable::Remove(PerIsolateThreadData* data) {
2731 table_.erase(data->thread_id_);
2732 delete data;
2733 }
2734
2735 void Isolate::ThreadDataTable::RemoveAllThreads() {
2736 for (auto& x : table_) {
2737 delete x.second;
2738 }
2739 table_.clear();
2740 }
2741
2742 class TracingAccountingAllocator : public AccountingAllocator {
2743 public:
2744 explicit TracingAccountingAllocator(Isolate* isolate) : isolate_(isolate) {}
2745 ~TracingAccountingAllocator() = default;
2746
2747 protected:
2748 void TraceAllocateSegmentImpl(v8::internal::Segment* segment) override {
2749 base::MutexGuard lock(&mutex_);
2750 UpdateMemoryTrafficAndReportMemoryUsage(segment->total_size());
2751 }
2752
2753 void TraceZoneCreationImpl(const Zone* zone) override {
2754 base::MutexGuard lock(&mutex_);
2755 active_zones_.insert(zone);
2756 nesting_depth_++;
2757 }
2758
2759 void TraceZoneDestructionImpl(const Zone* zone) override {
2760 base::MutexGuard lock(&mutex_);
2761 #ifdef V8_ENABLE_PRECISE_ZONE_STATS
2762 if (FLAG_trace_zone_type_stats) {
2763 type_stats_.MergeWith(zone->type_stats());
2764 }
2765 #endif
2766 UpdateMemoryTrafficAndReportMemoryUsage(zone->segment_bytes_allocated());
2767 active_zones_.erase(zone);
2768 nesting_depth_--;
2769
2770 #ifdef V8_ENABLE_PRECISE_ZONE_STATS
2771 if (FLAG_trace_zone_type_stats && active_zones_.empty()) {
2772 type_stats_.Dump();
2773 }
2774 #endif
2775 }
2776
2777 private:
2778 void UpdateMemoryTrafficAndReportMemoryUsage(size_t memory_traffic_delta) {
2779 if (!FLAG_trace_zone_stats &&
2780 !(TracingFlags::zone_stats.load(std::memory_order_relaxed) &
2781 v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
2782 // Don't print anything if the zone tracing was enabled only because of
2783 // FLAG_trace_zone_type_stats.
2784 return;
2785 }
2786
2787 memory_traffic_since_last_report_ += memory_traffic_delta;
2788 if (memory_traffic_since_last_report_ < FLAG_zone_stats_tolerance) return;
2789 memory_traffic_since_last_report_ = 0;
2790
2791 Dump(buffer_, true);
2792
2793 {
2794 std::string trace_str = buffer_.str();
2795
2796 if (FLAG_trace_zone_stats) {
2797 PrintF(
2798 "{"
2799 "\"type\": \"v8-zone-trace\", "
2800 "\"stats\": %s"
2801 "}\n",
2802 trace_str.c_str());
2803 }
2804 if (V8_UNLIKELY(
2805 TracingFlags::zone_stats.load(std::memory_order_relaxed) &
2806 v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
2807 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.zone_stats"),
2808 "V8.Zone_Stats", TRACE_EVENT_SCOPE_THREAD, "stats",
2809 TRACE_STR_COPY(trace_str.c_str()));
2810 }
2811 }
2812
2813 // Clear the buffer.
2814 buffer_.str(std::string());
2815 }
2816
2817 void Dump(std::ostringstream& out, bool dump_details) {
2818 // Note: Neither isolate nor zones are locked, so be careful with accesses
2819 // as the allocator is potentially used on a concurrent thread.
2820 double time = isolate_->time_millis_since_init();
2821 out << "{"
2822 << "\"isolate\": \"" << reinterpret_cast<void*>(isolate_) << "\", "
2823 << "\"time\": " << time << ", ";
2824 size_t total_segment_bytes_allocated = 0;
2825 size_t total_zone_allocation_size = 0;
2826 size_t total_zone_freed_size = 0;
2827
2828 if (dump_details) {
2829 // Print detailed zone stats if memory usage changes direction.
2830 out << "\"zones\": [";
2831 bool first = true;
2832 for (const Zone* zone : active_zones_) {
2833 size_t zone_segment_bytes_allocated = zone->segment_bytes_allocated();
2834 size_t zone_allocation_size = zone->allocation_size_for_tracing();
2835 size_t freed_size = zone->freed_size_for_tracing();
2836 if (first) {
2837 first = false;
2838 } else {
2839 out << ", ";
2840 }
2841 out << "{"
2842 << "\"name\": \"" << zone->name() << "\", "
2843 << "\"allocated\": " << zone_segment_bytes_allocated << ", "
2844 << "\"used\": " << zone_allocation_size << ", "
2845 << "\"freed\": " << freed_size << "}";
2846 total_segment_bytes_allocated += zone_segment_bytes_allocated;
2847 total_zone_allocation_size += zone_allocation_size;
2848 total_zone_freed_size += freed_size;
2849 }
2850 out << "], ";
2851 } else {
2852 // Just calculate total allocated/used memory values.
2853 for (const Zone* zone : active_zones_) {
2854 total_segment_bytes_allocated += zone->segment_bytes_allocated();
2855 total_zone_allocation_size += zone->allocation_size_for_tracing();
2856 total_zone_freed_size += zone->freed_size_for_tracing();
2857 }
2858 }
2859 out << "\"allocated\": " << total_segment_bytes_allocated << ", "
2860 << "\"used\": " << total_zone_allocation_size << ", "
2861 << "\"freed\": " << total_zone_freed_size << "}";
2862 }
2863
2864 Isolate* const isolate_;
2865 std::atomic<size_t> nesting_depth_{0};
2866
2867 base::Mutex mutex_;
2868 std::unordered_set<const Zone*> active_zones_;
2869 #ifdef V8_ENABLE_PRECISE_ZONE_STATS
2870 TypeStats type_stats_;
2871 #endif
2872 std::ostringstream buffer_;
2873 // This value is increased on both allocations and deallocations.
2874 size_t memory_traffic_since_last_report_ = 0;
2875 };
2876
2877 #ifdef DEBUG
2878 std::atomic<size_t> Isolate::non_disposed_isolates_;
2879 #endif // DEBUG
2880
2881 // static
2882 Isolate* Isolate::New() {
2883 // IsolateAllocator allocates the memory for the Isolate object according to
2884 // the given allocation mode.
2885 std::unique_ptr<IsolateAllocator> isolate_allocator =
2886 std::make_unique<IsolateAllocator>();
2887 // Construct Isolate object in the allocated memory.
2888 void* isolate_ptr = isolate_allocator->isolate_memory();
2889 Isolate* isolate = new (isolate_ptr) Isolate(std::move(isolate_allocator));
2890 #ifdef V8_COMPRESS_POINTERS
2891 DCHECK(IsAligned(isolate->isolate_root(), kPtrComprIsolateRootAlignment));
2892 #endif
2893
2894 #ifdef DEBUG
2895 non_disposed_isolates_++;
2896 #endif // DEBUG
2897
2898 return isolate;
2899 }
2900
2901 // static
2902 void Isolate::Delete(Isolate* isolate) {
2903 DCHECK_NOT_NULL(isolate);
2904 // Temporarily set this isolate as current so that various parts of
2905 // the isolate can access it in their destructors without having a
2906 // direct pointer. We don't use Enter/Exit here to avoid
2907 // initializing the thread data.
2908 PerIsolateThreadData* saved_data = isolate->CurrentPerIsolateThreadData();
2909 DCHECK_EQ(true, isolate_key_created_.load(std::memory_order_relaxed));
2910 Isolate* saved_isolate = reinterpret_cast<Isolate*>(
2911 base::Thread::GetThreadLocal(isolate->isolate_key_));
2912 SetIsolateThreadLocals(isolate, nullptr);
2913
2914 isolate->Deinit();
2915
2916 #ifdef DEBUG
2917 non_disposed_isolates_--;
2918 #endif // DEBUG
2919
2920 // Take ownership of the IsolateAllocator to ensure the Isolate memory will
2921 // be available during Isolate descructor call.
2922 std::unique_ptr<IsolateAllocator> isolate_allocator =
2923 std::move(isolate->isolate_allocator_);
2924 isolate->~Isolate();
2925 // Now free the memory owned by the allocator.
2926 isolate_allocator.reset();
2927
2928 // Restore the previous current isolate.
2929 SetIsolateThreadLocals(saved_isolate, saved_data);
2930 }
2931
2932 void Isolate::SetUpFromReadOnlyArtifacts(
2933 std::shared_ptr<ReadOnlyArtifacts> artifacts, ReadOnlyHeap* ro_heap) {
2934 if (ReadOnlyHeap::IsReadOnlySpaceShared()) {
2935 DCHECK_NOT_NULL(artifacts);
2936 artifacts_ = artifacts;
2937 } else {
2938 DCHECK_NULL(artifacts);
2939 }
2940 DCHECK_NOT_NULL(ro_heap);
2941 DCHECK_IMPLIES(read_only_heap_ != nullptr, read_only_heap_ == ro_heap);
2942 read_only_heap_ = ro_heap;
2943 heap_.SetUpFromReadOnlyHeap(read_only_heap_);
2944 }
2945
2946 v8::PageAllocator* Isolate::page_allocator() {
2947 return isolate_allocator_->page_allocator();
2948 }
2949
2950 Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
2951 : isolate_data_(this),
2952 isolate_allocator_(std::move(isolate_allocator)),
2953 id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)),
2954 allocator_(new TracingAccountingAllocator(this)),
2955 builtins_(this),
2956 #if defined(DEBUG) || defined(VERIFY_HEAP)
2957 num_active_deserializers_(0),
2958 #endif
2959 rail_mode_(PERFORMANCE_ANIMATION),
2960 code_event_dispatcher_(new CodeEventDispatcher()),
2961 persistent_handles_list_(new PersistentHandlesList()),
2962 jitless_(FLAG_jitless),
2963 #if V8_SFI_HAS_UNIQUE_ID
2964 next_unique_sfi_id_(0),
2965 #endif
2966 cancelable_task_manager_(new CancelableTaskManager()) {
2967 TRACE_ISOLATE(constructor);
2968 CheckIsolateLayout();
2969
2970 // ThreadManager is initialized early to support locking an isolate
2971 // before it is entered.
2972 thread_manager_ = new ThreadManager(this);
2973
2974 handle_scope_data_.Initialize();
2975
2976 #define ISOLATE_INIT_EXECUTE(type, name, initial_value) \
2977 name##_ = (initial_value);
2978 ISOLATE_INIT_LIST(ISOLATE_INIT_EXECUTE)
2979 #undef ISOLATE_INIT_EXECUTE
2980
2981 #define ISOLATE_INIT_ARRAY_EXECUTE(type, name, length) \
2982 memset(name##_, 0, sizeof(type) * length);
2983 ISOLATE_INIT_ARRAY_LIST(ISOLATE_INIT_ARRAY_EXECUTE)
2984 #undef ISOLATE_INIT_ARRAY_EXECUTE
2985
2986 InitializeLoggingAndCounters();
2987 debug_ = new Debug(this);
2988
2989 InitializeDefaultEmbeddedBlob();
2990
2991 MicrotaskQueue::SetUpDefaultMicrotaskQueue(this);
2992 }
2993
2994 void Isolate::CheckIsolateLayout() {
2995 CHECK_EQ(OFFSET_OF(Isolate, isolate_data_), 0);
2996 CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.embedder_data_)),
2997 Internals::kIsolateEmbedderDataOffset);
2998 CHECK_EQ(static_cast<int>(
2999 OFFSET_OF(Isolate, isolate_data_.fast_c_call_caller_fp_)),
3000 Internals::kIsolateFastCCallCallerFpOffset);
3001 CHECK_EQ(static_cast<int>(
3002 OFFSET_OF(Isolate, isolate_data_.fast_c_call_caller_pc_)),
3003 Internals::kIsolateFastCCallCallerPcOffset);
3004 CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.stack_guard_)),
3005 Internals::kIsolateStackGuardOffset);
3006 CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.roots_)),
3007 Internals::kIsolateRootsOffset);
3008
3009 #ifdef V8_HEAP_SANDBOX
3010 CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, buffer_)),
3011 Internals::kExternalPointerTableBufferOffset);
3012 CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, length_)),
3013 Internals::kExternalPointerTableLengthOffset);
3014 CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, capacity_)),
3015 Internals::kExternalPointerTableCapacityOffset);
3016 #endif
3017 }
3018
3019 void Isolate::ClearSerializerData() {
3020 delete external_reference_map_;
3021 external_reference_map_ = nullptr;
3022 }
3023
3024 bool Isolate::LogObjectRelocation() {
3025 return FLAG_verify_predictable || logger()->is_logging() || is_profiling() ||
3026 heap()->isolate()->logger()->is_listening_to_code_events() ||
3027 (heap_profiler() != nullptr &&
3028 heap_profiler()->is_tracking_object_moves()) ||
3029 heap()->has_heap_object_allocation_tracker();
3030 }
3031
3032 void Isolate::Deinit() {
3033 TRACE_ISOLATE(deinit);
3034
3035 tracing_cpu_profiler_.reset();
3036 if (FLAG_stress_sampling_allocation_profiler > 0) {
3037 heap_profiler()->StopSamplingHeapProfiler();
3038 }
3039
3040 metrics_recorder_->NotifyIsolateDisposal();
3041 recorder_context_id_map_.clear();
3042
3043 #if defined(V8_OS_WIN64)
3044 if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
3045 heap()->memory_allocator() && RequiresCodeRange()) {
3046 const base::AddressRegion& code_range =
3047 heap()->memory_allocator()->code_range();
3048 void* start = reinterpret_cast<void*>(code_range.begin());
3049 win64_unwindinfo::UnregisterNonABICompliantCodeRange(start);
3050 }
3051 #endif // V8_OS_WIN64
3052
3053 FutexEmulation::IsolateDeinit(this);
3054
3055 debug()->Unload();
3056
3057 wasm_engine()->DeleteCompileJobsOnIsolate(this);
3058
3059 if (concurrent_recompilation_enabled()) {
3060 optimizing_compile_dispatcher_->Stop();
3061 delete optimizing_compile_dispatcher_;
3062 optimizing_compile_dispatcher_ = nullptr;
3063 }
3064
3065 BackingStore::RemoveSharedWasmMemoryObjects(this);
3066
3067 // Help sweeper threads complete sweeping to stop faster.
3068 heap_.mark_compact_collector()->DrainSweepingWorklists();
3069 heap_.mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
3070
3071 heap_.memory_allocator()->unmapper()->EnsureUnmappingCompleted();
3072
3073 DumpAndResetStats();
3074
3075 if (FLAG_print_deopt_stress) {
3076 PrintF(stdout, "=== Stress deopt counter: %u\n", stress_deopt_count_);
3077 }
3078
3079 // We must stop the logger before we tear down other components.
3080 sampler::Sampler* sampler = logger_->sampler();
3081 if (sampler && sampler->IsActive()) sampler->Stop();
3082
3083 FreeThreadResources();
3084 logger_->StopProfilerThread();
3085
3086 // We start with the heap tear down so that releasing managed objects does
3087 // not cause a GC.
3088 heap_.StartTearDown();
3089
3090 ReleaseSharedPtrs();
3091
3092 string_table_.reset();
3093 builtins_.TearDown();
3094 bootstrapper_->TearDown();
3095
3096 if (runtime_profiler_ != nullptr) {
3097 delete runtime_profiler_;
3098 runtime_profiler_ = nullptr;
3099 }
3100
3101 delete heap_profiler_;
3102 heap_profiler_ = nullptr;
3103
3104 compiler_dispatcher_->AbortAll();
3105 delete compiler_dispatcher_;
3106 compiler_dispatcher_ = nullptr;
3107
3108 // This stops cancelable tasks (i.e. concurrent marking tasks)
3109 cancelable_task_manager()->CancelAndWait();
3110
3111 heap_.TearDown();
3112 FILE* logfile = logger_->TearDownAndGetLogFile();
3113 if (logfile != nullptr) fclose(logfile);
3114
3115 if (wasm_engine_) {
3116 wasm_engine_->RemoveIsolate(this);
3117 wasm_engine_.reset();
3118 }
3119
3120 TearDownEmbeddedBlob();
3121
3122 delete interpreter_;
3123 interpreter_ = nullptr;
3124
3125 delete ast_string_constants_;
3126 ast_string_constants_ = nullptr;
3127
3128 code_event_dispatcher_.reset();
3129
3130 delete root_index_map_;
3131 root_index_map_ = nullptr;
3132
3133 delete compiler_zone_;
3134 compiler_zone_ = nullptr;
3135 compiler_cache_ = nullptr;
3136
3137 SetCodePages(nullptr);
3138
3139 ClearSerializerData();
3140
3141 {
3142 base::MutexGuard lock_guard(&thread_data_table_mutex_);
3143 thread_data_table_.RemoveAllThreads();
3144 }
3145 }
3146
3147 void Isolate::SetIsolateThreadLocals(Isolate* isolate,
3148 PerIsolateThreadData* data) {
3149 base::Thread::SetThreadLocal(isolate_key_, isolate);
3150 base::Thread::SetThreadLocal(per_isolate_thread_data_key_, data);
3151 }
3152
3153 Isolate::~Isolate() {
3154 TRACE_ISOLATE(destructor);
3155
3156 // The entry stack must be empty when we get here.
3157 DCHECK(entry_stack_ == nullptr || entry_stack_->previous_item == nullptr);
3158
3159 delete entry_stack_;
3160 entry_stack_ = nullptr;
3161
3162 delete date_cache_;
3163 date_cache_ = nullptr;
3164
3165 delete regexp_stack_;
3166 regexp_stack_ = nullptr;
3167
3168 delete descriptor_lookup_cache_;
3169 descriptor_lookup_cache_ = nullptr;
3170
3171 delete load_stub_cache_;
3172 load_stub_cache_ = nullptr;
3173 delete store_stub_cache_;
3174 store_stub_cache_ = nullptr;
3175
3176 delete materialized_object_store_;
3177 materialized_object_store_ = nullptr;
3178
3179 delete logger_;
3180 logger_ = nullptr;
3181
3182 delete handle_scope_implementer_;
3183 handle_scope_implementer_ = nullptr;
3184
3185 delete code_tracer();
3186 set_code_tracer(nullptr);
3187
3188 delete compilation_cache_;
3189 compilation_cache_ = nullptr;
3190 delete bootstrapper_;
3191 bootstrapper_ = nullptr;
3192 delete inner_pointer_to_code_cache_;
3193 inner_pointer_to_code_cache_ = nullptr;
3194
3195 delete thread_manager_;
3196 thread_manager_ = nullptr;
3197
3198 delete global_handles_;
3199 global_handles_ = nullptr;
3200 delete eternal_handles_;
3201 eternal_handles_ = nullptr;
3202
3203 delete string_stream_debug_object_cache_;
3204 string_stream_debug_object_cache_ = nullptr;
3205
3206 delete random_number_generator_;
3207 random_number_generator_ = nullptr;
3208
3209 delete fuzzer_rng_;
3210 fuzzer_rng_ = nullptr;
3211
3212 delete debug_;
3213 debug_ = nullptr;
3214
3215 delete cancelable_task_manager_;
3216 cancelable_task_manager_ = nullptr;
3217
3218 delete allocator_;
3219 allocator_ = nullptr;
3220
3221 // Assert that |default_microtask_queue_| is the last MicrotaskQueue instance.
3222 DCHECK_IMPLIES(default_microtask_queue_,
3223 default_microtask_queue_ == default_microtask_queue_->next());
3224 delete default_microtask_queue_;
3225 default_microtask_queue_ = nullptr;
3226
3227 // The ReadOnlyHeap should not be destroyed when sharing without pointer
3228 // compression as the object itself is shared.
3229 if (read_only_heap_->IsOwnedByIsolate()) {
3230 delete read_only_heap_;
3231 read_only_heap_ = nullptr;
3232 }
3233 }
3234
3235 void Isolate::InitializeThreadLocal() {
3236 thread_local_top()->Initialize(this);
3237 clear_pending_exception();
3238 clear_pending_message();
3239 clear_scheduled_exception();
3240 }
3241
3242 void Isolate::SetTerminationOnExternalTryCatch() {
3243 if (try_catch_handler() == nullptr) return;
3244 try_catch_handler()->can_continue_ = false;
3245 try_catch_handler()->has_terminated_ = true;
3246 try_catch_handler()->exception_ =
3247 reinterpret_cast<void*>(ReadOnlyRoots(heap()).null_value().ptr());
3248 }
3249
3250 bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
3251 Object exception = pending_exception();
3252
3253 if (IsJavaScriptHandlerOnTop(exception)) {
3254 thread_local_top()->external_caught_exception_ = false;
3255 return false;
3256 }
3257
3258 if (!IsExternalHandlerOnTop(exception)) {
3259 thread_local_top()->external_caught_exception_ = false;
3260 return true;
3261 }
3262
3263 thread_local_top()->external_caught_exception_ = true;
3264 if (!is_catchable_by_javascript(exception)) {
3265 SetTerminationOnExternalTryCatch();
3266 } else {
3267 v8::TryCatch* handler = try_catch_handler();
3268 DCHECK(thread_local_top()->pending_message_obj_.IsJSMessageObject() ||
3269 thread_local_top()->pending_message_obj_.IsTheHole(this));
3270 handler->can_continue_ = true;
3271 handler->has_terminated_ = false;
3272 handler->exception_ = reinterpret_cast<void*>(pending_exception().ptr());
3273 // Propagate to the external try-catch only if we got an actual message.
3274 if (thread_local_top()->pending_message_obj_.IsTheHole(this)) return true;
3275
3276 handler->message_obj_ =
3277 reinterpret_cast<void*>(thread_local_top()->pending_message_obj_.ptr());
3278 }
3279 return true;
3280 }
3281
3282 bool Isolate::InitializeCounters() {
3283 if (async_counters_) return false;
3284 async_counters_ = std::make_shared<Counters>(this);
3285 return true;
3286 }
3287
3288 void Isolate::InitializeLoggingAndCounters() {
3289 if (logger_ == nullptr) {
3290 logger_ = new Logger(this);
3291 }
3292 InitializeCounters();
3293 }
3294
3295 namespace {
3296
3297 void CreateOffHeapTrampolines(Isolate* isolate) {
3298 DCHECK_NOT_NULL(isolate->embedded_blob_code());
3299 DCHECK_NE(0, isolate->embedded_blob_code_size());
3300 DCHECK_NOT_NULL(isolate->embedded_blob_data());
3301 DCHECK_NE(0, isolate->embedded_blob_data_size());
3302
3303 HandleScope scope(isolate);
3304 Builtins* builtins = isolate->builtins();
3305
3306 EmbeddedData d = EmbeddedData::FromBlob();
3307
3308 STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
3309 for (int i = 0; i < Builtins::builtin_count; i++) {
3310 Address instruction_start = d.InstructionStartOfBuiltin(i);
3311 Handle<Code> trampoline = isolate->factory()->NewOffHeapTrampolineFor(
3312 builtins->builtin_handle(i), instruction_start);
3313
3314 // From this point onwards, the old builtin code object is unreachable and
3315 // will be collected by the next GC.
3316 builtins->set_builtin(i, *trampoline);
3317 }
3318 }
3319
3320 #ifdef DEBUG
3321 bool IsolateIsCompatibleWithEmbeddedBlob(Isolate* isolate) {
3322 EmbeddedData d = EmbeddedData::FromBlob(isolate);
3323 return (d.IsolateHash() == isolate->HashIsolateForEmbeddedBlob());
3324 }
3325 #endif // DEBUG
3326
3327 } // namespace
3328
3329 void Isolate::InitializeDefaultEmbeddedBlob() {
3330 const uint8_t* code = DefaultEmbeddedBlobCode();
3331 uint32_t code_size = DefaultEmbeddedBlobCodeSize();
3332 const uint8_t* data = DefaultEmbeddedBlobData();
3333 uint32_t data_size = DefaultEmbeddedBlobDataSize();
3334
3335 #ifdef V8_MULTI_SNAPSHOTS
3336 if (!FLAG_untrusted_code_mitigations) {
3337 code = TrustedEmbeddedBlobCode();
3338 code_size = TrustedEmbeddedBlobCodeSize();
3339 data = TrustedEmbeddedBlobData();
3340 data_size = TrustedEmbeddedBlobDataSize();
3341 }
3342 #endif
3343
3344 if (StickyEmbeddedBlobCode() != nullptr) {
3345 base::MutexGuard guard(current_embedded_blob_refcount_mutex_.Pointer());
3346 // Check again now that we hold the lock.
3347 if (StickyEmbeddedBlobCode() != nullptr) {
3348 code = StickyEmbeddedBlobCode();
3349 code_size = StickyEmbeddedBlobCodeSize();
3350 data = StickyEmbeddedBlobData();
3351 data_size = StickyEmbeddedBlobDataSize();
3352 current_embedded_blob_refs_++;
3353 }
3354 }
3355
3356 if (code == nullptr) {
3357 CHECK_EQ(0, code_size);
3358 } else {
3359 SetEmbeddedBlob(code, code_size, data, data_size);
3360 }
3361 }
3362
3363 void Isolate::CreateAndSetEmbeddedBlob() {
3364 base::MutexGuard guard(current_embedded_blob_refcount_mutex_.Pointer());
3365
3366 PrepareBuiltinSourcePositionMap();
3367
3368 PrepareBuiltinLabelInfoMap();
3369
3370 // If a sticky blob has been set, we reuse it.
3371 if (StickyEmbeddedBlobCode() != nullptr) {
3372 CHECK_EQ(embedded_blob_code(), StickyEmbeddedBlobCode());
3373 CHECK_EQ(embedded_blob_data(), StickyEmbeddedBlobData());
3374 CHECK_EQ(CurrentEmbeddedBlobCode(), StickyEmbeddedBlobCode());
3375 CHECK_EQ(CurrentEmbeddedBlobData(), StickyEmbeddedBlobData());
3376 } else {
3377 // Create and set a new embedded blob.
3378 uint8_t* code;
3379 uint32_t code_size;
3380 uint8_t* data;
3381 uint32_t data_size;
3382 InstructionStream::CreateOffHeapInstructionStream(this, &code, &code_size,
3383 &data, &data_size);
3384
3385 CHECK_EQ(0, current_embedded_blob_refs_);
3386 const uint8_t* const_code = const_cast<const uint8_t*>(code);
3387 const uint8_t* const_data = const_cast<const uint8_t*>(data);
3388 SetEmbeddedBlob(const_code, code_size, const_data, data_size);
3389 current_embedded_blob_refs_++;
3390
3391 SetStickyEmbeddedBlob(code, code_size, data, data_size);
3392 }
3393
3394 CreateOffHeapTrampolines(this);
3395 }
3396
3397 void Isolate::TearDownEmbeddedBlob() {
3398 // Nothing to do in case the blob is embedded into the binary or unset.
3399 if (StickyEmbeddedBlobCode() == nullptr) return;
3400
3401 CHECK_EQ(embedded_blob_code(), StickyEmbeddedBlobCode());
3402 CHECK_EQ(embedded_blob_data(), StickyEmbeddedBlobData());
3403 CHECK_EQ(CurrentEmbeddedBlobCode(), StickyEmbeddedBlobCode());
3404 CHECK_EQ(CurrentEmbeddedBlobData(), StickyEmbeddedBlobData());
3405
3406 base::MutexGuard guard(current_embedded_blob_refcount_mutex_.Pointer());
3407 current_embedded_blob_refs_--;
3408 if (current_embedded_blob_refs_ == 0 && enable_embedded_blob_refcounting_) {
3409 // We own the embedded blob and are the last holder. Free it.
3410 InstructionStream::FreeOffHeapInstructionStream(
3411 const_cast<uint8_t*>(embedded_blob_code()), embedded_blob_code_size(),
3412 const_cast<uint8_t*>(embedded_blob_data()), embedded_blob_data_size());
3413 ClearEmbeddedBlob();
3414 }
3415 }
3416
3417 bool Isolate::InitWithoutSnapshot() { return Init(nullptr, nullptr, false); }
3418
3419 bool Isolate::InitWithSnapshot(SnapshotData* startup_snapshot_data,
3420 SnapshotData* read_only_snapshot_data,
3421 bool can_rehash) {
3422 DCHECK_NOT_NULL(startup_snapshot_data);
3423 DCHECK_NOT_NULL(read_only_snapshot_data);
3424 return Init(startup_snapshot_data, read_only_snapshot_data, can_rehash);
3425 }
3426
3427 static std::string AddressToString(uintptr_t address) {
3428 std::stringstream stream_address;
3429 stream_address << "0x" << std::hex << address;
3430 return stream_address.str();
3431 }
3432
3433 void Isolate::AddCrashKeysForIsolateAndHeapPointers() {
3434 DCHECK_NOT_NULL(add_crash_key_callback_);
3435
3436 const uintptr_t isolate_address = reinterpret_cast<uintptr_t>(this);
3437 add_crash_key_callback_(v8::CrashKeyId::kIsolateAddress,
3438 AddressToString(isolate_address));
3439
3440 const uintptr_t ro_space_firstpage_address =
3441 heap()->read_only_space()->FirstPageAddress();
3442 add_crash_key_callback_(v8::CrashKeyId::kReadonlySpaceFirstPageAddress,
3443 AddressToString(ro_space_firstpage_address));
3444 const uintptr_t map_space_firstpage_address =
3445 heap()->map_space()->FirstPageAddress();
3446 add_crash_key_callback_(v8::CrashKeyId::kMapSpaceFirstPageAddress,
3447 AddressToString(map_space_firstpage_address));
3448 const uintptr_t code_space_firstpage_address =
3449 heap()->code_space()->FirstPageAddress();
3450 add_crash_key_callback_(v8::CrashKeyId::kCodeSpaceFirstPageAddress,
3451 AddressToString(code_space_firstpage_address));
3452 }
3453
3454 void Isolate::InitializeCodeRanges() {
3455 DCHECK_NULL(GetCodePages());
3456 MemoryRange embedded_range{
3457 reinterpret_cast<const void*>(embedded_blob_code()),
3458 embedded_blob_code_size()};
3459 code_pages_buffer1_.push_back(embedded_range);
3460 SetCodePages(&code_pages_buffer1_);
3461 }
3462
3463 namespace {
3464
3465 // This global counter contains number of stack loads/stores per optimized/wasm
3466 // function.
3467 using MapOfLoadsAndStoresPerFunction =
3468 std::map<std::string /* function_name */,
3469 std::pair<uint64_t /* loads */, uint64_t /* stores */>>;
3470 MapOfLoadsAndStoresPerFunction* stack_access_count_map = nullptr;
3471 } // namespace
3472
3473 bool Isolate::Init(SnapshotData* startup_snapshot_data,
3474 SnapshotData* read_only_snapshot_data, bool can_rehash) {
3475 TRACE_ISOLATE(init);
3476 const bool create_heap_objects = (read_only_snapshot_data == nullptr);
3477 // We either have both or neither.
3478 DCHECK_EQ(create_heap_objects, startup_snapshot_data == nullptr);
3479
3480 base::ElapsedTimer timer;
3481 if (create_heap_objects && FLAG_profile_deserialization) timer.Start();
3482
3483 time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
3484
3485 stress_deopt_count_ = FLAG_deopt_every_n_times;
3486 force_slow_path_ = FLAG_force_slow_path;
3487
3488 has_fatal_error_ = false;
3489
3490 // The initialization process does not handle memory exhaustion.
3491 AlwaysAllocateScope always_allocate(heap());
3492
3493 #define ASSIGN_ELEMENT(CamelName, hacker_name) \
3494 isolate_addresses_[IsolateAddressId::k##CamelName##Address] = \
3495 reinterpret_cast<Address>(hacker_name##_address());
3496 FOR_EACH_ISOLATE_ADDRESS_NAME(ASSIGN_ELEMENT)
3497 #undef ASSIGN_ELEMENT
3498
3499 // We need to initialize code_pages_ before any on-heap code is allocated to
3500 // make sure we record all code allocations.
3501 InitializeCodeRanges();
3502
3503 compilation_cache_ = new CompilationCache(this);
3504 descriptor_lookup_cache_ = new DescriptorLookupCache();
3505 inner_pointer_to_code_cache_ = new InnerPointerToCodeCache(this);
3506 global_handles_ = new GlobalHandles(this);
3507 eternal_handles_ = new EternalHandles();
3508 bootstrapper_ = new Bootstrapper(this);
3509 handle_scope_implementer_ = new HandleScopeImplementer(this);
3510 load_stub_cache_ = new StubCache(this);
3511 store_stub_cache_ = new StubCache(this);
3512 materialized_object_store_ = new MaterializedObjectStore(this);
3513 regexp_stack_ = new RegExpStack();
3514 regexp_stack_->isolate_ = this;
3515 date_cache_ = new DateCache();
3516 heap_profiler_ = new HeapProfiler(heap());
3517 interpreter_ = new interpreter::Interpreter(this);
3518 string_table_.reset(new StringTable(this));
3519
3520 compiler_dispatcher_ =
3521 new CompilerDispatcher(this, V8::GetCurrentPlatform(), FLAG_stack_size);
3522
3523 // Enable logging before setting up the heap
3524 logger_->SetUp(this);
3525
3526 metrics_recorder_ = std::make_shared<metrics::Recorder>();
3527
3528 { // NOLINT
3529 // Ensure that the thread has a valid stack guard. The v8::Locker object
3530 // will ensure this too, but we don't have to use lockers if we are only
3531 // using one thread.
3532 ExecutionAccess lock(this);
3533 stack_guard()->InitThread(lock);
3534 }
3535
3536 // SetUp the object heap.
3537 DCHECK(!heap_.HasBeenSetUp());
3538 heap_.SetUp();
3539 ReadOnlyHeap::SetUp(this, read_only_snapshot_data, can_rehash);
3540 heap_.SetUpSpaces();
3541
3542 isolate_data_.external_reference_table()->Init(this);
3543
3544 // Setup the wasm engine.
3545 if (wasm_engine_ == nullptr) {
3546 SetWasmEngine(wasm::WasmEngine::GetWasmEngine());
3547 }
3548 DCHECK_NOT_NULL(wasm_engine_);
3549
3550 if (setup_delegate_ == nullptr) {
3551 setup_delegate_ = new SetupIsolateDelegate(create_heap_objects);
3552 }
3553
3554 if (!FLAG_inline_new) heap_.DisableInlineAllocation();
3555
3556 if (!setup_delegate_->SetupHeap(&heap_)) {
3557 V8::FatalProcessOutOfMemory(this, "heap object creation");
3558 return false;
3559 }
3560
3561 if (create_heap_objects) {
3562 // Terminate the startup object cache so we can iterate.
3563 startup_object_cache_.push_back(ReadOnlyRoots(this).undefined_value());
3564 }
3565
3566 InitializeThreadLocal();
3567
3568 // Profiler has to be created after ThreadLocal is initialized
3569 // because it makes use of interrupts.
3570 tracing_cpu_profiler_.reset(new TracingCpuProfilerImpl(this));
3571
3572 bootstrapper_->Initialize(create_heap_objects);
3573
3574 if (create_heap_objects) {
3575 builtins_constants_table_builder_ = new BuiltinsConstantsTableBuilder(this);
3576
3577 setup_delegate_->SetupBuiltins(this);
3578
3579 #ifndef V8_TARGET_ARCH_ARM
3580 // Store the interpreter entry trampoline on the root list. It is used as a
3581 // template for further copies that may later be created to help profile
3582 // interpreted code.
3583 // We currently cannot do this on arm due to RELATIVE_CODE_TARGETs
3584 // assuming that all possible Code targets may be addressed with an int24
3585 // offset, effectively limiting code space size to 32MB. We can guarantee
3586 // this at mksnapshot-time, but not at runtime.
3587 // See also: https://crbug.com/v8/8713.
3588 heap_.SetInterpreterEntryTrampolineForProfiling(
3589 heap_.builtin(Builtins::kInterpreterEntryTrampoline));
3590 #endif
3591
3592 builtins_constants_table_builder_->Finalize();
3593 delete builtins_constants_table_builder_;
3594 builtins_constants_table_builder_ = nullptr;
3595
3596 CreateAndSetEmbeddedBlob();
3597 } else {
3598 setup_delegate_->SetupBuiltins(this);
3599 }
3600
3601 // Initialize custom memcopy and memmove functions (must happen after
3602 // embedded blob setup).
3603 init_memcopy_functions();
3604
3605 if (FLAG_log_internal_timer_events) {
3606 set_event_logger(Logger::DefaultEventLoggerSentinel);
3607 }
3608
3609 if (FLAG_trace_turbo || FLAG_trace_turbo_graph || FLAG_turbo_profiling) {
3610 PrintF("Concurrent recompilation has been disabled for tracing.\n");
3611 } else if (OptimizingCompileDispatcher::Enabled()) {
3612 optimizing_compile_dispatcher_ = new OptimizingCompileDispatcher(this);
3613 }
3614
3615 // Initialize runtime profiler before deserialization, because collections may
3616 // occur, clearing/updating ICs.
3617 runtime_profiler_ = new RuntimeProfiler(this);
3618
3619 // If we are deserializing, read the state into the now-empty heap.
3620 {
3621 AlwaysAllocateScope always_allocate(heap());
3622 CodeSpaceMemoryModificationScope modification_scope(heap());
3623
3624 if (create_heap_objects) {
3625 heap_.read_only_space()->ClearStringPaddingIfNeeded();
3626 read_only_heap_->OnCreateHeapObjectsComplete(this);
3627 } else {
3628 StartupDeserializer startup_deserializer(this, startup_snapshot_data,
3629 can_rehash);
3630 startup_deserializer.DeserializeIntoIsolate();
3631 }
3632 load_stub_cache_->Initialize();
3633 store_stub_cache_->Initialize();
3634 interpreter_->Initialize();
3635 heap_.NotifyDeserializationComplete();
3636 }
3637
3638 #ifdef VERIFY_HEAP
3639 if (FLAG_verify_heap) {
3640 heap_.VerifyReadOnlyHeap();
3641 }
3642 #endif
3643
3644 delete setup_delegate_;
3645 setup_delegate_ = nullptr;
3646
3647 Builtins::InitializeBuiltinEntryTable(this);
3648 Builtins::EmitCodeCreateEvents(this);
3649
3650 #ifdef DEBUG
3651 // Verify that the current heap state (usually deserialized from the snapshot)
3652 // is compatible with the embedded blob. If this DCHECK fails, we've likely
3653 // loaded a snapshot generated by a different V8 version or build-time
3654 // configuration.
3655 if (!IsolateIsCompatibleWithEmbeddedBlob(this)) {
3656 FATAL(
3657 "The Isolate is incompatible with the embedded blob. This is usually "
3658 "caused by incorrect usage of mksnapshot. When generating custom "
3659 "snapshots, embedders must ensure they pass the same flags as during "
3660 "the V8 build process (e.g.: --turbo-instruction-scheduling).");
3661 }
3662 #endif // DEBUG
3663
3664 #ifndef V8_TARGET_ARCH_ARM
3665 // The IET for profiling should always be a full on-heap Code object.
3666 DCHECK(!Code::cast(heap_.interpreter_entry_trampoline_for_profiling())
3667 .is_off_heap_trampoline());
3668 #endif // V8_TARGET_ARCH_ARM
3669
3670 if (FLAG_print_builtin_code) builtins()->PrintBuiltinCode();
3671 if (FLAG_print_builtin_size) builtins()->PrintBuiltinSize();
3672
3673 // Finish initialization of ThreadLocal after deserialization is done.
3674 clear_pending_exception();
3675 clear_pending_message();
3676 clear_scheduled_exception();
3677
3678 // Quiet the heap NaN if needed on target platform.
3679 if (!create_heap_objects)
3680 Assembler::QuietNaN(ReadOnlyRoots(this).nan_value());
3681
3682 if (FLAG_trace_turbo) {
3683 // Create an empty file.
3684 std::ofstream(GetTurboCfgFileName(this).c_str(), std::ios_base::trunc);
3685 }
3686
3687 {
3688 HandleScope scope(this);
3689 ast_string_constants_ = new AstStringConstants(this, HashSeed(this));
3690 }
3691
3692 initialized_from_snapshot_ = !create_heap_objects;
3693
3694 if (FLAG_stress_sampling_allocation_profiler > 0) {
3695 uint64_t sample_interval = FLAG_stress_sampling_allocation_profiler;
3696 int stack_depth = 128;
3697 v8::HeapProfiler::SamplingFlags sampling_flags =
3698 v8::HeapProfiler::SamplingFlags::kSamplingForceGC;
3699 heap_profiler()->StartSamplingHeapProfiler(sample_interval, stack_depth,
3700 sampling_flags);
3701 }
3702
3703 #if defined(V8_OS_WIN64)
3704 if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
3705 const base::AddressRegion& code_range =
3706 heap()->memory_allocator()->code_range();
3707 void* start = reinterpret_cast<void*>(code_range.begin());
3708 size_t size_in_bytes = code_range.size();
3709 win64_unwindinfo::RegisterNonABICompliantCodeRange(start, size_in_bytes);
3710 }
3711 #endif // V8_OS_WIN64
3712
3713 if (create_heap_objects && FLAG_profile_deserialization) {
3714 double ms = timer.Elapsed().InMillisecondsF();
3715 PrintF("[Initializing isolate from scratch took %0.3f ms]\n", ms);
3716 }
3717
3718 return true;
3719 }
3720
3721 void Isolate::Enter() {
3722 Isolate* current_isolate = nullptr;
3723 PerIsolateThreadData* current_data = CurrentPerIsolateThreadData();
3724 if (current_data != nullptr) {
3725 current_isolate = current_data->isolate_;
3726 DCHECK_NOT_NULL(current_isolate);
3727 if (current_isolate == this) {
3728 DCHECK(Current() == this);
3729 DCHECK_NOT_NULL(entry_stack_);
3730 DCHECK(entry_stack_->previous_thread_data == nullptr ||
3731 entry_stack_->previous_thread_data->thread_id() ==
3732 ThreadId::Current());
3733 // Same thread re-enters the isolate, no need to re-init anything.
3734 entry_stack_->entry_count++;
3735 return;
3736 }
3737 }
3738
3739 PerIsolateThreadData* data = FindOrAllocatePerThreadDataForThisThread();
3740 DCHECK_NOT_NULL(data);
3741 DCHECK(data->isolate_ == this);
3742
3743 EntryStackItem* item =
3744 new EntryStackItem(current_data, current_isolate, entry_stack_);
3745 entry_stack_ = item;
3746
3747 SetIsolateThreadLocals(this, data);
3748
3749 // In case it's the first time some thread enters the isolate.
3750 set_thread_id(data->thread_id());
3751 }
3752
3753 void Isolate::Exit() {
3754 DCHECK_NOT_NULL(entry_stack_);
3755 DCHECK(entry_stack_->previous_thread_data == nullptr ||
3756 entry_stack_->previous_thread_data->thread_id() ==
3757 ThreadId::Current());
3758
3759 if (--entry_stack_->entry_count > 0) return;
3760
3761 DCHECK_NOT_NULL(CurrentPerIsolateThreadData());
3762 DCHECK(CurrentPerIsolateThreadData()->isolate_ == this);
3763
3764 // Pop the stack.
3765 EntryStackItem* item = entry_stack_;
3766 entry_stack_ = item->previous_item;
3767
3768 PerIsolateThreadData* previous_thread_data = item->previous_thread_data;
3769 Isolate* previous_isolate = item->previous_isolate;
3770
3771 delete item;
3772
3773 // Reinit the current thread for the isolate it was running before this one.
3774 SetIsolateThreadLocals(previous_isolate, previous_thread_data);
3775 }
3776
3777 std::unique_ptr<PersistentHandles> Isolate::NewPersistentHandles() {
3778 return std::make_unique<PersistentHandles>(this);
3779 }
3780
3781 void Isolate::DumpAndResetStats() {
3782 if (FLAG_trace_turbo_stack_accesses) {
3783 StdoutStream os;
3784 uint64_t total_loads = 0;
3785 uint64_t total_stores = 0;
3786 os << "=== Stack access counters === " << std::endl;
3787 if (!stack_access_count_map) {
3788 os << "No stack accesses in optimized/wasm functions found.";
3789 } else {
3790 DCHECK_NOT_NULL(stack_access_count_map);
3791 os << "Number of optimized/wasm stack-access functions: "
3792 << stack_access_count_map->size() << std::endl;
3793 for (auto it = stack_access_count_map->cbegin();
3794 it != stack_access_count_map->cend(); it++) {
3795 std::string function_name((*it).first);
3796 std::pair<uint64_t, uint64_t> per_func_count = (*it).second;
3797 os << "Name: " << function_name << ", Loads: " << per_func_count.first
3798 << ", Stores: " << per_func_count.second << std::endl;
3799 total_loads += per_func_count.first;
3800 total_stores += per_func_count.second;
3801 }
3802 os << "Total Loads: " << total_loads << ", Total Stores: " << total_stores
3803 << std::endl;
3804 stack_access_count_map = nullptr;
3805 }
3806 }
3807 if (turbo_statistics() != nullptr) {
3808 DCHECK(FLAG_turbo_stats || FLAG_turbo_stats_nvp);
3809 StdoutStream os;
3810 if (FLAG_turbo_stats) {
3811 AsPrintableStatistics ps = {*turbo_statistics(), false};
3812 os << ps << std::endl;
3813 }
3814 if (FLAG_turbo_stats_nvp) {
3815 AsPrintableStatistics ps = {*turbo_statistics(), true};
3816 os << ps << std::endl;
3817 }
3818 delete turbo_statistics_;
3819 turbo_statistics_ = nullptr;
3820 }
3821 // TODO(7424): There is no public API for the {WasmEngine} yet. So for now we
3822 // just dump and reset the engines statistics together with the Isolate.
3823 if (FLAG_turbo_stats_wasm) {
3824 wasm_engine()->DumpAndResetTurboStatistics();
3825 }
3826 if (V8_UNLIKELY(TracingFlags::runtime_stats.load(std::memory_order_relaxed) ==
3827 v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) {
3828 counters()->worker_thread_runtime_call_stats()->AddToMainTable(
3829 counters()->runtime_call_stats());
3830 counters()->runtime_call_stats()->Print();
3831 counters()->runtime_call_stats()->Reset();
3832 }
3833 if (BasicBlockProfiler::Get()->HasData(this)) {
3834 StdoutStream out;
3835 BasicBlockProfiler::Get()->Print(out, this);
3836 BasicBlockProfiler::Get()->ResetCounts(this);
3837 }
3838 }
3839
3840 void Isolate::AbortConcurrentOptimization(BlockingBehavior behavior) {
3841 if (concurrent_recompilation_enabled()) {
3842 DisallowHeapAllocation no_recursive_gc;
3843 optimizing_compile_dispatcher()->Flush(behavior);
3844 }
3845 }
3846
3847 CompilationStatistics* Isolate::GetTurboStatistics() {
3848 if (turbo_statistics() == nullptr)
3849 set_turbo_statistics(new CompilationStatistics());
3850 return turbo_statistics();
3851 }
3852
3853 CodeTracer* Isolate::GetCodeTracer() {
3854 if (code_tracer() == nullptr) set_code_tracer(new CodeTracer(id()));
3855 return code_tracer();
3856 }
3857
3858 bool Isolate::use_optimizer() {
3859 return FLAG_opt && !serializer_enabled_ && CpuFeatures::SupportsOptimizer() &&
3860 !is_precise_count_code_coverage();
3861 }
3862
3863 void Isolate::IncreaseTotalRegexpCodeGenerated(Handle<HeapObject> code) {
3864 DCHECK(code->IsCode() || code->IsByteArray());
3865 total_regexp_code_generated_ += code->Size();
3866 }
3867
3868 bool Isolate::NeedsDetailedOptimizedCodeLineInfo() const {
3869 return NeedsSourcePositionsForProfiling() ||
3870 detailed_source_positions_for_profiling();
3871 }
3872
3873 bool Isolate::NeedsSourcePositionsForProfiling() const {
3874 return FLAG_trace_deopt || FLAG_trace_turbo || FLAG_trace_turbo_graph ||
3875 FLAG_turbo_profiling || FLAG_perf_prof || is_profiling() ||
3876 debug_->is_active() || logger_->is_logging() || FLAG_trace_maps;
3877 }
3878
3879 void Isolate::SetFeedbackVectorsForProfilingTools(Object value) {
3880 DCHECK(value.IsUndefined(this) || value.IsArrayList());
3881 heap()->set_feedback_vectors_for_profiling_tools(value);
3882 }
3883
3884 void Isolate::MaybeInitializeVectorListFromHeap() {
3885 if (!heap()->feedback_vectors_for_profiling_tools().IsUndefined(this)) {
3886 // Already initialized, return early.
3887 DCHECK(heap()->feedback_vectors_for_profiling_tools().IsArrayList());
3888 return;
3889 }
3890
3891 // Collect existing feedback vectors.
3892 std::vector<Handle<FeedbackVector>> vectors;
3893
3894 {
3895 HeapObjectIterator heap_iterator(heap());
3896 for (HeapObject current_obj = heap_iterator.Next(); !current_obj.is_null();
3897 current_obj = heap_iterator.Next()) {
3898 if (!current_obj.IsFeedbackVector()) continue;
3899
3900 FeedbackVector vector = FeedbackVector::cast(current_obj);
3901 SharedFunctionInfo shared = vector.shared_function_info();
3902
3903 // No need to preserve the feedback vector for non-user-visible functions.
3904 if (!shared.IsSubjectToDebugging()) continue;
3905
3906 vectors.emplace_back(vector, this);
3907 }
3908 }
3909
3910 // Add collected feedback vectors to the root list lest we lose them to GC.
3911 Handle<ArrayList> list =
3912 ArrayList::New(this, static_cast<int>(vectors.size()));
3913 for (const auto& vector : vectors) list = ArrayList::Add(this, list, vector);
3914 SetFeedbackVectorsForProfilingTools(*list);
3915 }
3916
3917 void Isolate::set_date_cache(DateCache* date_cache) {
3918 if (date_cache != date_cache_) {
3919 delete date_cache_;
3920 }
3921 date_cache_ = date_cache;
3922 }
3923
3924 Isolate::KnownPrototype Isolate::IsArrayOrObjectOrStringPrototype(
3925 Object object) {
3926 Object context = heap()->native_contexts_list();
3927 while (!context.IsUndefined(this)) {
3928 Context current_context = Context::cast(context);
3929 if (current_context.initial_object_prototype() == object) {
3930 return KnownPrototype::kObject;
3931 } else if (current_context.initial_array_prototype() == object) {
3932 return KnownPrototype::kArray;
3933 } else if (current_context.initial_string_prototype() == object) {
3934 return KnownPrototype::kString;
3935 }
3936 context = current_context.next_context_link();
3937 }
3938 return KnownPrototype::kNone;
3939 }
3940
3941 bool Isolate::IsInAnyContext(Object object, uint32_t index) {
3942 DisallowHeapAllocation no_gc;
3943 Object context = heap()->native_contexts_list();
3944 while (!context.IsUndefined(this)) {
3945 Context current_context = Context::cast(context);
3946 if (current_context.get(index) == object) {
3947 return true;
3948 }
3949 context = current_context.next_context_link();
3950 }
3951 return false;
3952 }
3953
3954 void Isolate::UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object) {
3955 DisallowHeapAllocation no_gc;
3956 if (!object->map().is_prototype_map()) return;
3957 if (!Protectors::IsNoElementsIntact(this)) return;
3958 KnownPrototype obj_type = IsArrayOrObjectOrStringPrototype(*object);
3959 if (obj_type == KnownPrototype::kNone) return;
3960 if (obj_type == KnownPrototype::kObject) {
3961 this->CountUsage(v8::Isolate::kObjectPrototypeHasElements);
3962 } else if (obj_type == KnownPrototype::kArray) {
3963 this->CountUsage(v8::Isolate::kArrayPrototypeHasElements);
3964 }
3965 Protectors::InvalidateNoElements(this);
3966 }
3967
3968 bool Isolate::IsAnyInitialArrayPrototype(Handle<JSArray> array) {
3969 DisallowHeapAllocation no_gc;
3970 return IsInAnyContext(*array, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
3971 }
3972
3973 static base::RandomNumberGenerator* ensure_rng_exists(
3974 base::RandomNumberGenerator** rng, int seed) {
3975 if (*rng == nullptr) {
3976 if (seed != 0) {
3977 *rng = new base::RandomNumberGenerator(seed);
3978 } else {
3979 *rng = new base::RandomNumberGenerator();
3980 }
3981 }
3982 return *rng;
3983 }
3984
3985 base::RandomNumberGenerator* Isolate::random_number_generator() {
3986 // TODO(bmeurer) Initialized lazily because it depends on flags; can
3987 // be fixed once the default isolate cleanup is done.
3988 return ensure_rng_exists(&random_number_generator_, FLAG_random_seed);
3989 }
3990
3991 base::RandomNumberGenerator* Isolate::fuzzer_rng() {
3992 if (fuzzer_rng_ == nullptr) {
3993 int64_t seed = FLAG_fuzzer_random_seed;
3994 if (seed == 0) {
3995 seed = random_number_generator()->initial_seed();
3996 }
3997
3998 fuzzer_rng_ = new base::RandomNumberGenerator(seed);
3999 }
4000
4001 return fuzzer_rng_;
4002 }
4003
4004 int Isolate::GenerateIdentityHash(uint32_t mask) {
4005 int hash;
4006 int attempts = 0;
4007 do {
4008 hash = random_number_generator()->NextInt() & mask;
4009 } while (hash == 0 && attempts++ < 30);
4010 return hash != 0 ? hash : 1;
4011 }
4012
4013 Code Isolate::FindCodeObject(Address a) {
4014 return heap()->GcSafeFindCodeForInnerPointer(a);
4015 }
4016
4017 #ifdef DEBUG
4018 #define ISOLATE_FIELD_OFFSET(type, name, ignored) \
4019 const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
4020 ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
4021 ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
4022 #undef ISOLATE_FIELD_OFFSET
4023 #endif
4024
4025 Handle<Symbol> Isolate::SymbolFor(RootIndex dictionary_index,
4026 Handle<String> name, bool private_symbol) {
4027 Handle<String> key = factory()->InternalizeString(name);
4028 Handle<NameDictionary> dictionary =
4029 Handle<NameDictionary>::cast(root_handle(dictionary_index));
4030 InternalIndex entry = dictionary->FindEntry(this, key);
4031 Handle<Symbol> symbol;
4032 if (entry.is_not_found()) {
4033 symbol =
4034 private_symbol ? factory()->NewPrivateSymbol() : factory()->NewSymbol();
4035 symbol->set_description(*key);
4036 dictionary = NameDictionary::Add(this, dictionary, key, symbol,
4037 PropertyDetails::Empty(), &entry);
4038 switch (dictionary_index) {
4039 case RootIndex::kPublicSymbolTable:
4040 symbol->set_is_in_public_symbol_table(true);
4041 heap()->set_public_symbol_table(*dictionary);
4042 break;
4043 case RootIndex::kApiSymbolTable:
4044 heap()->set_api_symbol_table(*dictionary);
4045 break;
4046 case RootIndex::kApiPrivateSymbolTable:
4047 heap()->set_api_private_symbol_table(*dictionary);
4048 break;
4049 default:
4050 UNREACHABLE();
4051 }
4052 } else {
4053 symbol = Handle<Symbol>(Symbol::cast(dictionary->ValueAt(entry)), this);
4054 }
4055 return symbol;
4056 }
4057
4058 void Isolate::AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback) {
4059 auto pos = std::find(before_call_entered_callbacks_.begin(),
4060 before_call_entered_callbacks_.end(), callback);
4061 if (pos != before_call_entered_callbacks_.end()) return;
4062 before_call_entered_callbacks_.push_back(callback);
4063 }
4064
4065 void Isolate::RemoveBeforeCallEnteredCallback(
4066 BeforeCallEnteredCallback callback) {
4067 auto pos = std::find(before_call_entered_callbacks_.begin(),
4068 before_call_entered_callbacks_.end(), callback);
4069 if (pos == before_call_entered_callbacks_.end()) return;
4070 before_call_entered_callbacks_.erase(pos);
4071 }
4072
4073 void Isolate::AddCallCompletedCallback(CallCompletedCallback callback) {
4074 auto pos = std::find(call_completed_callbacks_.begin(),
4075 call_completed_callbacks_.end(), callback);
4076 if (pos != call_completed_callbacks_.end()) return;
4077 call_completed_callbacks_.push_back(callback);
4078 }
4079
4080 void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) {
4081 auto pos = std::find(call_completed_callbacks_.begin(),
4082 call_completed_callbacks_.end(), callback);
4083 if (pos == call_completed_callbacks_.end()) return;
4084 call_completed_callbacks_.erase(pos);
4085 }
4086
4087 void Isolate::FireCallCompletedCallback(MicrotaskQueue* microtask_queue) {
4088 if (!thread_local_top()->CallDepthIsZero()) return;
4089
4090 bool perform_checkpoint =
4091 microtask_queue &&
4092 microtask_queue->microtasks_policy() == v8::MicrotasksPolicy::kAuto;
4093
4094 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this);
4095 if (perform_checkpoint) microtask_queue->PerformCheckpoint(isolate);
4096
4097 if (call_completed_callbacks_.empty()) return;
4098 // Fire callbacks. Increase call depth to prevent recursive callbacks.
4099 v8::Isolate::SuppressMicrotaskExecutionScope suppress(isolate);
4100 std::vector<CallCompletedCallback> callbacks(call_completed_callbacks_);
4101 for (auto& callback : callbacks) {
4102 callback(reinterpret_cast<v8::Isolate*>(this));
4103 }
4104 }
4105
4106 void Isolate::PromiseHookStateUpdated() {
4107 bool promise_hook_or_async_event_delegate =
4108 promise_hook_ || async_event_delegate_;
4109 bool promise_hook_or_debug_is_active_or_async_event_delegate =
4110 promise_hook_or_async_event_delegate || debug()->is_active();
4111 if (promise_hook_or_debug_is_active_or_async_event_delegate &&
4112 Protectors::IsPromiseHookIntact(this)) {
4113 HandleScope scope(this);
4114 Protectors::InvalidatePromiseHook(this);
4115 }
4116 promise_hook_or_async_event_delegate_ = promise_hook_or_async_event_delegate;
4117 promise_hook_or_debug_is_active_or_async_event_delegate_ =
4118 promise_hook_or_debug_is_active_or_async_event_delegate;
4119 }
4120
4121 namespace {
4122
4123 MaybeHandle<JSPromise> NewRejectedPromise(Isolate* isolate,
4124 v8::Local<v8::Context> api_context,
4125 Handle<Object> exception) {
4126 v8::Local<v8::Promise::Resolver> resolver;
4127 ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
4128 isolate, resolver, v8::Promise::Resolver::New(api_context),
4129 MaybeHandle<JSPromise>());
4130
4131 RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
4132 isolate, resolver->Reject(api_context, v8::Utils::ToLocal(exception)),
4133 MaybeHandle<JSPromise>());
4134
4135 v8::Local<v8::Promise> promise = resolver->GetPromise();
4136 return v8::Utils::OpenHandle(*promise);
4137 }
4138
4139 } // namespace
4140
4141 MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
4142 Handle<Script> referrer, Handle<Object> specifier) {
4143 v8::Local<v8::Context> api_context =
4144 v8::Utils::ToLocal(Handle<Context>(native_context()));
4145
4146 if (host_import_module_dynamically_callback_ == nullptr) {
4147 Handle<Object> exception =
4148 factory()->NewError(error_function(), MessageTemplate::kUnsupported);
4149 return NewRejectedPromise(this, api_context, exception);
4150 }
4151
4152 Handle<String> specifier_str;
4153 MaybeHandle<String> maybe_specifier = Object::ToString(this, specifier);
4154 if (!maybe_specifier.ToHandle(&specifier_str)) {
4155 Handle<Object> exception(pending_exception(), this);
4156 clear_pending_exception();
4157
4158 return NewRejectedPromise(this, api_context, exception);
4159 }
4160 DCHECK(!has_pending_exception());
4161
4162 v8::Local<v8::Promise> promise;
4163 ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
4164 this, promise,
4165 host_import_module_dynamically_callback_(
4166 api_context, v8::Utils::ScriptOrModuleToLocal(referrer),
4167 v8::Utils::ToLocal(specifier_str)),
4168 MaybeHandle<JSPromise>());
4169 return v8::Utils::OpenHandle(*promise);
4170 }
4171
4172 void Isolate::ClearKeptObjects() { heap()->ClearKeptObjects(); }
4173
4174 void Isolate::SetHostImportModuleDynamicallyCallback(
4175 HostImportModuleDynamicallyCallback callback) {
4176 host_import_module_dynamically_callback_ = callback;
4177 }
4178
4179 MaybeHandle<JSObject> Isolate::RunHostInitializeImportMetaObjectCallback(
4180 Handle<SourceTextModule> module) {
4181 CHECK(module->import_meta().IsTheHole(this));
4182 Handle<JSObject> import_meta = factory()->NewJSObjectWithNullProto();
4183 if (host_initialize_import_meta_object_callback_ != nullptr) {
4184 v8::Local<v8::Context> api_context =
4185 v8::Utils::ToLocal(Handle<Context>(native_context()));
4186 host_initialize_import_meta_object_callback_(
4187 api_context, Utils::ToLocal(Handle<Module>::cast(module)),
4188 v8::Local<v8::Object>::Cast(v8::Utils::ToLocal(import_meta)));
4189 if (has_scheduled_exception()) {
4190 PromoteScheduledException();
4191 return {};
4192 }
4193 }
4194 return import_meta;
4195 }
4196
4197 void Isolate::SetHostInitializeImportMetaObjectCallback(
4198 HostInitializeImportMetaObjectCallback callback) {
4199 host_initialize_import_meta_object_callback_ = callback;
4200 }
4201
4202 MaybeHandle<Object> Isolate::RunPrepareStackTraceCallback(
4203 Handle<Context> context, Handle<JSObject> error, Handle<JSArray> sites) {
4204 v8::Local<v8::Context> api_context = Utils::ToLocal(context);
4205
4206 v8::Local<v8::Value> stack;
4207 ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
4208 this, stack,
4209 prepare_stack_trace_callback_(api_context, Utils::ToLocal(error),
4210 Utils::ToLocal(sites)),
4211 MaybeHandle<Object>());
4212 return Utils::OpenHandle(*stack);
4213 }
4214
4215 int Isolate::LookupOrAddExternallyCompiledFilename(const char* filename) {
4216 if (embedded_file_writer_ != nullptr) {
4217 return embedded_file_writer_->LookupOrAddExternallyCompiledFilename(
4218 filename);
4219 }
4220 return 0;
4221 }
4222
4223 const char* Isolate::GetExternallyCompiledFilename(int index) const {
4224 if (embedded_file_writer_ != nullptr) {
4225 return embedded_file_writer_->GetExternallyCompiledFilename(index);
4226 }
4227 return "";
4228 }
4229
4230 int Isolate::GetExternallyCompiledFilenameCount() const {
4231 if (embedded_file_writer_ != nullptr) {
4232 return embedded_file_writer_->GetExternallyCompiledFilenameCount();
4233 }
4234 return 0;
4235 }
4236
4237 void Isolate::PrepareBuiltinSourcePositionMap() {
4238 if (embedded_file_writer_ != nullptr) {
4239 return embedded_file_writer_->PrepareBuiltinSourcePositionMap(
4240 this->builtins());
4241 }
4242 }
4243
4244 void Isolate::PrepareBuiltinLabelInfoMap() {
4245 if (embedded_file_writer_ != nullptr) {
4246 embedded_file_writer_->PrepareBuiltinLabelInfoMap(
4247 heap()->construct_stub_create_deopt_pc_offset().value(),
4248 heap()->construct_stub_invoke_deopt_pc_offset().value(),
4249 heap()->arguments_adaptor_deopt_pc_offset().value());
4250 }
4251 }
4252
4253 #if defined(V8_OS_WIN64)
4254 void Isolate::SetBuiltinUnwindData(
4255 int builtin_index,
4256 const win64_unwindinfo::BuiltinUnwindInfo& unwinding_info) {
4257 if (embedded_file_writer_ != nullptr) {
4258 embedded_file_writer_->SetBuiltinUnwindData(builtin_index, unwinding_info);
4259 }
4260 }
4261 #endif // V8_OS_WIN64
4262
4263 void Isolate::SetPrepareStackTraceCallback(PrepareStackTraceCallback callback) {
4264 prepare_stack_trace_callback_ = callback;
4265 }
4266
4267 bool Isolate::HasPrepareStackTraceCallback() const {
4268 return prepare_stack_trace_callback_ != nullptr;
4269 }
4270
4271 void Isolate::SetAddCrashKeyCallback(AddCrashKeyCallback callback) {
4272 add_crash_key_callback_ = callback;
4273
4274 // Log the initial set of data.
4275 AddCrashKeysForIsolateAndHeapPointers();
4276 }
4277
4278 void Isolate::SetAtomicsWaitCallback(v8::Isolate::AtomicsWaitCallback callback,
4279 void* data) {
4280 atomics_wait_callback_ = callback;
4281 atomics_wait_callback_data_ = data;
4282 }
4283
4284 void Isolate::RunAtomicsWaitCallback(v8::Isolate::AtomicsWaitEvent event,
4285 Handle<JSArrayBuffer> array_buffer,
4286 size_t offset_in_bytes, int64_t value,
4287 double timeout_in_ms,
4288 AtomicsWaitWakeHandle* stop_handle) {
4289 DCHECK(array_buffer->is_shared());
4290 if (atomics_wait_callback_ == nullptr) return;
4291 HandleScope handle_scope(this);
4292 atomics_wait_callback_(
4293 event, v8::Utils::ToLocalShared(array_buffer), offset_in_bytes, value,
4294 timeout_in_ms,
4295 reinterpret_cast<v8::Isolate::AtomicsWaitWakeHandle*>(stop_handle),
4296 atomics_wait_callback_data_);
4297 }
4298
4299 void Isolate::SetPromiseHook(PromiseHook hook) {
4300 promise_hook_ = hook;
4301 PromiseHookStateUpdated();
4302 }
4303
4304 void Isolate::RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
4305 Handle<Object> parent) {
4306 RunPromiseHookForAsyncEventDelegate(type, promise);
4307 if (promise_hook_ == nullptr) return;
4308 promise_hook_(type, v8::Utils::PromiseToLocal(promise),
4309 v8::Utils::ToLocal(parent));
4310 }
4311
4312 void Isolate::RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
4313 Handle<JSPromise> promise) {
4314 if (!async_event_delegate_) return;
4315 switch (type) {
4316 case PromiseHookType::kResolve:
4317 return;
4318 case PromiseHookType::kBefore:
4319 if (!promise->async_task_id()) return;
4320 async_event_delegate_->AsyncEventOccurred(
4321 debug::kDebugWillHandle, promise->async_task_id(), false);
4322 break;
4323 case PromiseHookType::kAfter:
4324 if (!promise->async_task_id()) return;
4325 async_event_delegate_->AsyncEventOccurred(
4326 debug::kDebugDidHandle, promise->async_task_id(), false);
4327 break;
4328 case PromiseHookType::kInit:
4329 debug::DebugAsyncActionType type = debug::kDebugPromiseThen;
4330 bool last_frame_was_promise_builtin = false;
4331 JavaScriptFrameIterator it(this);
4332 while (!it.done()) {
4333 std::vector<Handle<SharedFunctionInfo>> infos;
4334 it.frame()->GetFunctions(&infos);
4335 for (size_t i = 1; i <= infos.size(); ++i) {
4336 Handle<SharedFunctionInfo> info = infos[infos.size() - i];
4337 if (info->IsUserJavaScript()) {
4338 // We should not report PromiseThen and PromiseCatch which is called
4339 // indirectly, e.g. Promise.all calls Promise.then internally.
4340 if (last_frame_was_promise_builtin) {
4341 if (!promise->async_task_id()) {
4342 promise->set_async_task_id(++async_task_count_);
4343 }
4344 async_event_delegate_->AsyncEventOccurred(
4345 type, promise->async_task_id(), debug()->IsBlackboxed(info));
4346 }
4347 return;
4348 }
4349 last_frame_was_promise_builtin = false;
4350 if (info->HasBuiltinId()) {
4351 if (info->builtin_id() == Builtins::kPromisePrototypeThen) {
4352 type = debug::kDebugPromiseThen;
4353 last_frame_was_promise_builtin = true;
4354 } else if (info->builtin_id() == Builtins::kPromisePrototypeCatch) {
4355 type = debug::kDebugPromiseCatch;
4356 last_frame_was_promise_builtin = true;
4357 } else if (info->builtin_id() ==
4358 Builtins::kPromisePrototypeFinally) {
4359 type = debug::kDebugPromiseFinally;
4360 last_frame_was_promise_builtin = true;
4361 }
4362 }
4363 }
4364 it.Advance();
4365 }
4366 }
4367 }
4368
4369 void Isolate::OnAsyncFunctionStateChanged(Handle<JSPromise> promise,
4370 debug::DebugAsyncActionType event) {
4371 if (!async_event_delegate_) return;
4372 if (!promise->async_task_id()) {
4373 promise->set_async_task_id(++async_task_count_);
4374 }
4375 async_event_delegate_->AsyncEventOccurred(event, promise->async_task_id(),
4376 false);
4377 }
4378
4379 void Isolate::SetPromiseRejectCallback(PromiseRejectCallback callback) {
4380 promise_reject_callback_ = callback;
4381 }
4382
4383 void Isolate::ReportPromiseReject(Handle<JSPromise> promise,
4384 Handle<Object> value,
4385 v8::PromiseRejectEvent event) {
4386 if (promise_reject_callback_ == nullptr) return;
4387 promise_reject_callback_(v8::PromiseRejectMessage(
4388 v8::Utils::PromiseToLocal(promise), event, v8::Utils::ToLocal(value)));
4389 }
4390
4391 void Isolate::SetUseCounterCallback(v8::Isolate::UseCounterCallback callback) {
4392 DCHECK(!use_counter_callback_);
4393 use_counter_callback_ = callback;
4394 }
4395
4396 void Isolate::CountUsage(v8::Isolate::UseCounterFeature feature) {
4397 // The counter callback
4398 // - may cause the embedder to call into V8, which is not generally possible
4399 // during GC.
4400 // - requires a current native context, which may not always exist.
4401 // TODO(jgruber): Consider either removing the native context requirement in
4402 // blink, or passing it to the callback explicitly.
4403 if (heap_.gc_state() == Heap::NOT_IN_GC && !context().is_null()) {
4404 DCHECK(context().IsContext());
4405 DCHECK(context().native_context().IsNativeContext());
4406 if (use_counter_callback_) {
4407 HandleScope handle_scope(this);
4408 use_counter_callback_(reinterpret_cast<v8::Isolate*>(this), feature);
4409 }
4410 } else {
4411 heap_.IncrementDeferredCount(feature);
4412 }
4413 }
4414
4415 int Isolate::GetNextScriptId() { return heap()->NextScriptId(); }
4416
4417 // static
4418 std::string Isolate::GetTurboCfgFileName(Isolate* isolate) {
4419 if (FLAG_trace_turbo_cfg_file == nullptr) {
4420 std::ostringstream os;
4421 os << "turbo-" << base::OS::GetCurrentProcessId() << "-";
4422 if (isolate != nullptr) {
4423 os << isolate->id();
4424 } else {
4425 os << "any";
4426 }
4427 os << ".cfg";
4428 return os.str();
4429 } else {
4430 return FLAG_trace_turbo_cfg_file;
4431 }
4432 }
4433
4434 // Heap::detached_contexts tracks detached contexts as pairs
4435 // (number of GC since the context was detached, the context).
4436 void Isolate::AddDetachedContext(Handle<Context> context) {
4437 HandleScope scope(this);
4438 Handle<WeakArrayList> detached_contexts = factory()->detached_contexts();
4439 detached_contexts = WeakArrayList::AddToEnd(
4440 this, detached_contexts, MaybeObjectHandle(Smi::zero(), this),
4441 MaybeObjectHandle::Weak(context));
4442 heap()->set_detached_contexts(*detached_contexts);
4443 }
4444
4445 void Isolate::AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object) {
4446 HandleScope scope(this);
4447 Handle<WeakArrayList> shared_wasm_memories =
4448 factory()->shared_wasm_memories();
4449 shared_wasm_memories = WeakArrayList::AddToEnd(
4450 this, shared_wasm_memories, MaybeObjectHandle::Weak(memory_object));
4451 heap()->set_shared_wasm_memories(*shared_wasm_memories);
4452 }
4453
4454 void Isolate::CheckDetachedContextsAfterGC() {
4455 HandleScope scope(this);
4456 Handle<WeakArrayList> detached_contexts = factory()->detached_contexts();
4457 int length = detached_contexts->length();
4458 if (length == 0) return;
4459 int new_length = 0;
4460 for (int i = 0; i < length; i += 2) {
4461 int mark_sweeps = detached_contexts->Get(i).ToSmi().value();
4462 MaybeObject context = detached_contexts->Get(i + 1);
4463 DCHECK(context->IsWeakOrCleared());
4464 if (!context->IsCleared()) {
4465 detached_contexts->Set(
4466 new_length, MaybeObject::FromSmi(Smi::FromInt(mark_sweeps + 1)));
4467 detached_contexts->Set(new_length + 1, context);
4468 new_length += 2;
4469 }
4470 }
4471 detached_contexts->set_length(new_length);
4472 while (new_length < length) {
4473 detached_contexts->Set(new_length, MaybeObject::FromSmi(Smi::zero()));
4474 ++new_length;
4475 }
4476
4477 if (FLAG_trace_detached_contexts) {
4478 PrintF("%d detached contexts are collected out of %d\n",
4479 length - new_length, length);
4480 for (int i = 0; i < new_length; i += 2) {
4481 int mark_sweeps = detached_contexts->Get(i).ToSmi().value();
4482 MaybeObject context = detached_contexts->Get(i + 1);
4483 DCHECK(context->IsWeakOrCleared());
4484 if (mark_sweeps > 3) {
4485 PrintF("detached context %p\n survived %d GCs (leak?)\n",
4486 reinterpret_cast<void*>(context.ptr()), mark_sweeps);
4487 }
4488 }
4489 }
4490 }
4491
4492 double Isolate::LoadStartTimeMs() {
4493 base::MutexGuard guard(&rail_mutex_);
4494 return load_start_time_ms_;
4495 }
4496
4497 void Isolate::SetRAILMode(RAILMode rail_mode) {
4498 RAILMode old_rail_mode = rail_mode_.load();
4499 if (old_rail_mode != PERFORMANCE_LOAD && rail_mode == PERFORMANCE_LOAD) {
4500 base::MutexGuard guard(&rail_mutex_);
4501 load_start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
4502 }
4503 rail_mode_.store(rail_mode);
4504 if (old_rail_mode == PERFORMANCE_LOAD && rail_mode != PERFORMANCE_LOAD) {
4505 heap()->incremental_marking()->incremental_marking_job()->ScheduleTask(
4506 heap());
4507 }
4508 if (FLAG_trace_rail) {
4509 PrintIsolate(this, "RAIL mode: %s\n", RAILModeName(rail_mode));
4510 }
4511 }
4512
4513 void Isolate::IsolateInBackgroundNotification() {
4514 is_isolate_in_background_ = true;
4515 heap()->ActivateMemoryReducerIfNeeded();
4516 }
4517
4518 void Isolate::IsolateInForegroundNotification() {
4519 is_isolate_in_background_ = false;
4520 }
4521
4522 void Isolate::PrintWithTimestamp(const char* format, ...) {
4523 base::OS::Print("[%d:%p] %8.0f ms: ", base::OS::GetCurrentProcessId(),
4524 static_cast<void*>(this), time_millis_since_init());
4525 va_list arguments;
4526 va_start(arguments, format);
4527 base::OS::VPrint(format, arguments);
4528 va_end(arguments);
4529 }
4530
4531 void Isolate::SetIdle(bool is_idle) {
4532 StateTag state = current_vm_state();
4533 if (js_entry_sp() != kNullAddress) return;
4534 DCHECK(state == EXTERNAL || state == IDLE);
4535 if (is_idle) {
4536 set_current_vm_state(IDLE);
4537 } else if (state == IDLE) {
4538 set_current_vm_state(EXTERNAL);
4539 }
4540 }
4541
4542 void Isolate::CollectSourcePositionsForAllBytecodeArrays() {
4543 HandleScope scope(this);
4544 std::vector<Handle<SharedFunctionInfo>> sfis;
4545 {
4546 DisallowHeapAllocation no_gc;
4547 HeapObjectIterator iterator(heap());
4548 for (HeapObject obj = iterator.Next(); !obj.is_null();
4549 obj = iterator.Next()) {
4550 if (obj.IsSharedFunctionInfo()) {
4551 SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
4552 if (sfi.HasBytecodeArray()) {
4553 sfis.push_back(Handle<SharedFunctionInfo>(sfi, this));
4554 }
4555 }
4556 }
4557 }
4558 for (auto sfi : sfis) {
4559 SharedFunctionInfo::EnsureSourcePositionsAvailable(this, sfi);
4560 }
4561 }
4562
4563 #ifdef V8_INTL_SUPPORT
4564 icu::UMemory* Isolate::get_cached_icu_object(ICUObjectCacheType cache_type) {
4565 return icu_object_cache_[cache_type].get();
4566 }
4567
4568 void Isolate::set_icu_object_in_cache(ICUObjectCacheType cache_type,
4569 std::shared_ptr<icu::UMemory> obj) {
4570 icu_object_cache_[cache_type] = obj;
4571 }
4572
4573 void Isolate::clear_cached_icu_object(ICUObjectCacheType cache_type) {
4574 icu_object_cache_.erase(cache_type);
4575 }
4576
4577 void Isolate::ClearCachedIcuObjects() { icu_object_cache_.clear(); }
4578
4579 #endif // V8_INTL_SUPPORT
4580
4581 bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
4582 StackGuard* stack_guard = isolate_->stack_guard();
4583 #ifdef USE_SIMULATOR
4584 // The simulator uses a separate JS stack.
4585 Address jssp_address = Simulator::current(isolate_)->get_sp();
4586 uintptr_t jssp = static_cast<uintptr_t>(jssp_address);
4587 if (jssp - gap < stack_guard->real_jslimit()) return true;
4588 #endif // USE_SIMULATOR
4589 return GetCurrentStackPosition() - gap < stack_guard->real_climit();
4590 }
4591
4592 SaveContext::SaveContext(Isolate* isolate) : isolate_(isolate) {
4593 if (!isolate->context().is_null()) {
4594 context_ = Handle<Context>(isolate->context(), isolate);
4595 }
4596
4597 c_entry_fp_ = isolate->c_entry_fp(isolate->thread_local_top());
4598 }
4599
4600 SaveContext::~SaveContext() {
4601 isolate_->set_context(context_.is_null() ? Context() : *context_);
4602 }
4603
4604 bool SaveContext::IsBelowFrame(CommonFrame* frame) {
4605 return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
4606 }
4607
4608 SaveAndSwitchContext::SaveAndSwitchContext(Isolate* isolate,
4609 Context new_context)
4610 : SaveContext(isolate) {
4611 isolate->set_context(new_context);
4612 }
4613
4614 #ifdef DEBUG
4615 AssertNoContextChange::AssertNoContextChange(Isolate* isolate)
4616 : isolate_(isolate), context_(isolate->context(), isolate) {}
4617
4618 namespace {
4619
4620 bool Overlapping(const MemoryRange& a, const MemoryRange& b) {
4621 uintptr_t a1 = reinterpret_cast<uintptr_t>(a.start);
4622 uintptr_t a2 = a1 + a.length_in_bytes;
4623 uintptr_t b1 = reinterpret_cast<uintptr_t>(b.start);
4624 uintptr_t b2 = b1 + b.length_in_bytes;
4625 // Either b1 or b2 are in the [a1, a2) range.
4626 return (a1 <= b1 && b1 < a2) || (a1 <= b2 && b2 < a2);
4627 }
4628
4629 } // anonymous namespace
4630
4631 #endif // DEBUG
4632
4633 void Isolate::AddCodeMemoryRange(MemoryRange range) {
4634 std::vector<MemoryRange>* old_code_pages = GetCodePages();
4635 DCHECK_NOT_NULL(old_code_pages);
4636 #ifdef DEBUG
4637 auto overlapping = [range](const MemoryRange& a) {
4638 return Overlapping(range, a);
4639 };
4640 DCHECK_EQ(old_code_pages->end(),
4641 std::find_if(old_code_pages->begin(), old_code_pages->end(),
4642 overlapping));
4643 #endif
4644
4645 std::vector<MemoryRange>* new_code_pages;
4646 if (old_code_pages == &code_pages_buffer1_) {
4647 new_code_pages = &code_pages_buffer2_;
4648 } else {
4649 new_code_pages = &code_pages_buffer1_;
4650 }
4651
4652 // Copy all existing data from the old vector to the new vector and insert the
4653 // new page.
4654 new_code_pages->clear();
4655 new_code_pages->reserve(old_code_pages->size() + 1);
4656 std::merge(old_code_pages->begin(), old_code_pages->end(), &range, &range + 1,
4657 std::back_inserter(*new_code_pages),
4658 [](const MemoryRange& a, const MemoryRange& b) {
4659 return a.start < b.start;
4660 });
4661
4662 // Atomically switch out the pointer
4663 SetCodePages(new_code_pages);
4664 }
4665
4666 // |chunk| is either a Page or an executable LargePage.
4667 void Isolate::AddCodeMemoryChunk(MemoryChunk* chunk) {
4668 // We only keep track of individual code pages/allocations if we are on arm32,
4669 // because on x64 and arm64 we have a code range which makes this unnecessary.
4670 #if !defined(V8_TARGET_ARCH_ARM)
4671 return;
4672 #else
4673 void* new_page_start = reinterpret_cast<void*>(chunk->area_start());
4674 size_t new_page_size = chunk->area_size();
4675
4676 MemoryRange new_range{new_page_start, new_page_size};
4677
4678 AddCodeMemoryRange(new_range);
4679 #endif // !defined(V8_TARGET_ARCH_ARM)
4680 }
4681
4682 void Isolate::AddCodeRange(Address begin, size_t length_in_bytes) {
4683 AddCodeMemoryRange(
4684 MemoryRange{reinterpret_cast<void*>(begin), length_in_bytes});
4685 }
4686
4687 bool Isolate::RequiresCodeRange() const {
4688 return kPlatformRequiresCodeRange && !jitless_;
4689 }
4690
4691 v8::metrics::Recorder::ContextId Isolate::GetOrRegisterRecorderContextId(
4692 Handle<NativeContext> context) {
4693 if (serializer_enabled_) return v8::metrics::Recorder::ContextId::Empty();
4694 i::Object id = context->recorder_context_id();
4695 if (id.IsNullOrUndefined()) {
4696 CHECK_LT(last_recorder_context_id_, i::Smi::kMaxValue);
4697 context->set_recorder_context_id(
4698 i::Smi::FromIntptr(++last_recorder_context_id_));
4699 v8::HandleScope handle_scope(reinterpret_cast<v8::Isolate*>(this));
4700 auto result = recorder_context_id_map_.emplace(
4701 std::piecewise_construct,
4702 std::forward_as_tuple(last_recorder_context_id_),
4703 std::forward_as_tuple(reinterpret_cast<v8::Isolate*>(this),
4704 ToApiHandle<v8::Context>(context)));
4705 result.first->second.SetWeak(
4706 reinterpret_cast<void*>(last_recorder_context_id_),
4707 RemoveContextIdCallback, v8::WeakCallbackType::kParameter);
4708 return v8::metrics::Recorder::ContextId(last_recorder_context_id_);
4709 } else {
4710 DCHECK(id.IsSmi());
4711 return v8::metrics::Recorder::ContextId(
4712 static_cast<uintptr_t>(i::Smi::ToInt(id)));
4713 }
4714 }
4715
4716 MaybeLocal<v8::Context> Isolate::GetContextFromRecorderContextId(
4717 v8::metrics::Recorder::ContextId id) {
4718 auto result = recorder_context_id_map_.find(id.id_);
4719 if (result == recorder_context_id_map_.end() || result->second.IsEmpty())
4720 return MaybeLocal<v8::Context>();
4721 return result->second.Get(reinterpret_cast<v8::Isolate*>(this));
4722 }
4723
4724 void Isolate::RemoveContextIdCallback(const v8::WeakCallbackInfo<void>& data) {
4725 Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
4726 uintptr_t context_id = reinterpret_cast<uintptr_t>(data.GetParameter());
4727 isolate->recorder_context_id_map_.erase(context_id);
4728 }
4729
4730 // |chunk| is either a Page or an executable LargePage.
4731 void Isolate::RemoveCodeMemoryChunk(MemoryChunk* chunk) {
4732 // We only keep track of individual code pages/allocations if we are on arm32,
4733 // because on x64 and arm64 we have a code range which makes this unnecessary.
4734 #if !defined(V8_TARGET_ARCH_ARM)
4735 return;
4736 #else
4737 void* removed_page_start = reinterpret_cast<void*>(chunk->area_start());
4738 std::vector<MemoryRange>* old_code_pages = GetCodePages();
4739 DCHECK_NOT_NULL(old_code_pages);
4740
4741 std::vector<MemoryRange>* new_code_pages;
4742 if (old_code_pages == &code_pages_buffer1_) {
4743 new_code_pages = &code_pages_buffer2_;
4744 } else {
4745 new_code_pages = &code_pages_buffer1_;
4746 }
4747
4748 // Copy all existing data from the old vector to the new vector except the
4749 // removed page.
4750 new_code_pages->clear();
4751 new_code_pages->reserve(old_code_pages->size() - 1);
4752 std::remove_copy_if(old_code_pages->begin(), old_code_pages->end(),
4753 std::back_inserter(*new_code_pages),
4754 [removed_page_start](const MemoryRange& range) {
4755 return range.start == removed_page_start;
4756 });
4757 DCHECK_EQ(old_code_pages->size(), new_code_pages->size() + 1);
4758 // Atomically switch out the pointer
4759 SetCodePages(new_code_pages);
4760 #endif // !defined(V8_TARGET_ARCH_ARM)
4761 }
4762
4763 #undef TRACE_ISOLATE
4764
4765 // static
4766 Address Isolate::load_from_stack_count_address(const char* function_name) {
4767 DCHECK_NOT_NULL(function_name);
4768 if (!stack_access_count_map) {
4769 stack_access_count_map = new MapOfLoadsAndStoresPerFunction{};
4770 }
4771 auto& map = *stack_access_count_map;
4772 std::string name(function_name);
4773 // It is safe to return the address of std::map values.
4774 // Only iterators and references to the erased elements are invalidated.
4775 return reinterpret_cast<Address>(&map[name].first);
4776 }
4777
4778 // static
4779 Address Isolate::store_to_stack_count_address(const char* function_name) {
4780 DCHECK_NOT_NULL(function_name);
4781 if (!stack_access_count_map) {
4782 stack_access_count_map = new MapOfLoadsAndStoresPerFunction{};
4783 }
4784 auto& map = *stack_access_count_map;
4785 std::string name(function_name);
4786 // It is safe to return the address of std::map values.
4787 // Only iterators and references to the erased elements are invalidated.
4788 return reinterpret_cast<Address>(&map[name].second);
4789 }
4790
4791 } // namespace internal
4792 } // namespace v8
4793