• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_EXECUTION_ISOLATE_H_
6 #define V8_EXECUTION_ISOLATE_H_
7 
8 #include <atomic>
9 #include <cstddef>
10 #include <functional>
11 #include <memory>
12 #include <queue>
13 #include <unordered_map>
14 #include <vector>
15 
16 #include "include/v8-context.h"
17 #include "include/v8-internal.h"
18 #include "include/v8-isolate.h"
19 #include "include/v8-metrics.h"
20 #include "include/v8-snapshot.h"
21 #include "src/base/macros.h"
22 #include "src/base/platform/mutex.h"
23 #include "src/builtins/builtins.h"
24 #include "src/common/globals.h"
25 #include "src/debug/interface-types.h"
26 #include "src/execution/execution.h"
27 #include "src/execution/futex-emulation.h"
28 #include "src/execution/isolate-data.h"
29 #include "src/execution/messages.h"
30 #include "src/execution/shared-mutex-guard-if-off-thread.h"
31 #include "src/execution/stack-guard.h"
32 #include "src/handles/handles.h"
33 #include "src/heap/factory.h"
34 #include "src/heap/heap.h"
35 #include "src/heap/read-only-heap.h"
36 #include "src/init/isolate-allocator.h"
37 #include "src/objects/code.h"
38 #include "src/objects/contexts.h"
39 #include "src/objects/debug-objects.h"
40 #include "src/objects/js-objects.h"
41 #include "src/runtime/runtime.h"
42 #include "src/sandbox/external-pointer-table.h"
43 #include "src/sandbox/sandbox.h"
44 #include "src/strings/unicode.h"
45 #include "src/utils/allocation.h"
46 
47 #ifdef V8_INTL_SUPPORT
48 #include "unicode/uversion.h"  // Define U_ICU_NAMESPACE.
49 namespace U_ICU_NAMESPACE {
50 class UMemory;
51 }  // namespace U_ICU_NAMESPACE
52 #endif  // V8_INTL_SUPPORT
53 
54 #if USE_SIMULATOR
55 #include "src/execution/encoded-c-signature.h"
56 namespace v8 {
57 namespace internal {
58 class SimulatorData;
59 }
60 }  // namespace v8
61 #endif
62 
63 namespace v8_inspector {
64 class V8Inspector;
65 }  // namespace v8_inspector
66 
67 namespace v8 {
68 
69 class EmbedderState;
70 
71 namespace base {
72 class RandomNumberGenerator;
73 }  // namespace base
74 
75 namespace bigint {
76 class Processor;
77 }
78 
79 namespace debug {
80 class ConsoleDelegate;
81 class AsyncEventDelegate;
82 }  // namespace debug
83 
84 namespace internal {
85 
86 namespace heap {
87 class HeapTester;
88 }  // namespace heap
89 
90 namespace maglev {
91 class MaglevConcurrentDispatcher;
92 }  // namespace maglev
93 
94 class AddressToIndexHashMap;
95 class AstStringConstants;
96 class Bootstrapper;
97 class BuiltinsConstantsTableBuilder;
98 class CancelableTaskManager;
99 class CodeEventDispatcher;
100 class CodeTracer;
101 class CommonFrame;
102 class CompilationCache;
103 class CompilationStatistics;
104 class Counters;
105 class Debug;
106 class Deoptimizer;
107 class DescriptorLookupCache;
108 class EmbeddedFileWriterInterface;
109 class EternalHandles;
110 class GlobalHandles;
111 class GlobalSafepoint;
112 class HandleScopeImplementer;
113 class HeapObjectToIndexHashMap;
114 class HeapProfiler;
115 class InnerPointerToCodeCache;
116 class LazyCompileDispatcher;
117 class LocalIsolate;
118 class Logger;
119 class MaterializedObjectStore;
120 class Microtask;
121 class MicrotaskQueue;
122 class OptimizingCompileDispatcher;
123 class PersistentHandles;
124 class PersistentHandlesList;
125 class ReadOnlyArtifacts;
126 class RegExpStack;
127 class RootVisitor;
128 class SetupIsolateDelegate;
129 class Simulator;
130 class SnapshotData;
131 class StringTable;
132 class StubCache;
133 class ThreadManager;
134 class ThreadState;
135 class ThreadVisitor;  // Defined in v8threads.h
136 class TieringManager;
137 class TracingCpuProfilerImpl;
138 class UnicodeCache;
139 struct ManagedPtrDestructor;
140 
141 template <StateTag Tag>
142 class VMState;
143 
144 namespace baseline {
145 class BaselineBatchCompiler;
146 }  // namespace baseline
147 
148 namespace interpreter {
149 class Interpreter;
150 }  // namespace interpreter
151 
152 namespace compiler {
153 class NodeObserver;
154 class PerIsolateCompilerCache;
155 }  // namespace compiler
156 
157 namespace win64_unwindinfo {
158 class BuiltinUnwindInfo;
159 }  // namespace win64_unwindinfo
160 
161 namespace metrics {
162 class Recorder;
163 }  // namespace metrics
164 
165 namespace wasm {
166 class StackMemory;
167 }
168 
169 #define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
170   do {                                                 \
171     Isolate* __isolate__ = (isolate);                  \
172     DCHECK(!__isolate__->has_pending_exception());     \
173     if (__isolate__->has_scheduled_exception()) {      \
174       return __isolate__->PromoteScheduledException(); \
175     }                                                  \
176   } while (false)
177 
178 // Macros for MaybeHandle.
179 
180 #define RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, value) \
181   do {                                                      \
182     Isolate* __isolate__ = (isolate);                       \
183     DCHECK(!__isolate__->has_pending_exception());          \
184     if (__isolate__->has_scheduled_exception()) {           \
185       __isolate__->PromoteScheduledException();             \
186       return value;                                         \
187     }                                                       \
188   } while (false)
189 
190 #define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
191   RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())
192 
193 #define ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, dst, call, value) \
194   do {                                                                        \
195     Isolate* __isolate__ = (isolate);                                         \
196     if (!(call).ToLocal(&dst)) {                                              \
197       DCHECK(__isolate__->has_scheduled_exception());                         \
198       __isolate__->PromoteScheduledException();                               \
199       return value;                                                           \
200     }                                                                         \
201   } while (false)
202 
203 #define RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, call, value) \
204   do {                                                            \
205     Isolate* __isolate__ = (isolate);                             \
206     if ((call).IsNothing()) {                                     \
207       DCHECK(__isolate__->has_scheduled_exception());             \
208       __isolate__->PromoteScheduledException();                   \
209       return value;                                               \
210     }                                                             \
211   } while (false)
212 
213 /**
214  * RETURN_RESULT_OR_FAILURE is used in functions with return type Object (such
215  * as "RUNTIME_FUNCTION(...) {...}" or "BUILTIN(...) {...}" ) to return either
216  * the contents of a MaybeHandle<X>, or the "exception" sentinel value.
217  * Example usage:
218  *
219  * RUNTIME_FUNCTION(Runtime_Func) {
220  *   ...
221  *   RETURN_RESULT_OR_FAILURE(
222  *       isolate,
223  *       FunctionWithReturnTypeMaybeHandleX(...));
224  * }
225  *
226  * If inside a function with return type MaybeHandle<X> use RETURN_ON_EXCEPTION
227  * instead.
228  * If inside a function with return type Handle<X>, or Maybe<X> use
229  * RETURN_ON_EXCEPTION_VALUE instead.
230  */
231 #define RETURN_RESULT_OR_FAILURE(isolate, call)      \
232   do {                                               \
233     Handle<Object> __result__;                       \
234     Isolate* __isolate__ = (isolate);                \
235     if (!(call).ToHandle(&__result__)) {             \
236       DCHECK(__isolate__->has_pending_exception());  \
237       return ReadOnlyRoots(__isolate__).exception(); \
238     }                                                \
239     DCHECK(!__isolate__->has_pending_exception());   \
240     return *__result__;                              \
241   } while (false)
242 
243 #define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value) \
244   do {                                                              \
245     if (!(call).ToHandle(&dst)) {                                   \
246       DCHECK((isolate)->has_pending_exception());                   \
247       return value;                                                 \
248     }                                                               \
249   } while (false)
250 
251 #define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call)                \
252   do {                                                                        \
253     auto* __isolate__ = (isolate);                                            \
254     ASSIGN_RETURN_ON_EXCEPTION_VALUE(__isolate__, dst, call,                  \
255                                      ReadOnlyRoots(__isolate__).exception()); \
256   } while (false)
257 
258 #define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T) \
259   ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())
260 
261 #define THROW_NEW_ERROR(isolate, call, T)                                \
262   do {                                                                   \
263     auto* __isolate__ = (isolate);                                       \
264     return __isolate__->template Throw<T>(__isolate__->factory()->call); \
265   } while (false)
266 
267 #define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call)         \
268   do {                                                        \
269     auto* __isolate__ = (isolate);                            \
270     return __isolate__->Throw(*__isolate__->factory()->call); \
271   } while (false)
272 
273 #define THROW_NEW_ERROR_RETURN_VALUE(isolate, call, value) \
274   do {                                                     \
275     auto* __isolate__ = (isolate);                         \
276     __isolate__->Throw(*__isolate__->factory()->call);     \
277     return value;                                          \
278   } while (false)
279 
280 /**
281  * RETURN_ON_EXCEPTION_VALUE conditionally returns the given value when the
282  * given MaybeHandle is empty. It is typically used in functions with return
283  * type Maybe<X> or Handle<X>. Example usage:
284  *
285  * Handle<X> Func() {
286  *   ...
287  *   RETURN_ON_EXCEPTION_VALUE(
288  *       isolate,
289  *       FunctionWithReturnTypeMaybeHandleX(...),
290  *       Handle<X>());
291  *   // code to handle non exception
292  *   ...
293  * }
294  *
295  * Maybe<bool> Func() {
296  *   ..
297  *   RETURN_ON_EXCEPTION_VALUE(
298  *       isolate,
299  *       FunctionWithReturnTypeMaybeHandleX(...),
300  *       Nothing<bool>);
301  *   // code to handle non exception
302  *   return Just(true);
303  * }
304  *
305  * If inside a function with return type MaybeHandle<X>, use RETURN_ON_EXCEPTION
306  * instead.
307  * If inside a function with return type Object, use
308  * RETURN_FAILURE_ON_EXCEPTION instead.
309  */
310 #define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \
311   do {                                                  \
312     if ((call).is_null()) {                             \
313       DCHECK((isolate)->has_pending_exception());       \
314       return value;                                     \
315     }                                                   \
316   } while (false)
317 
318 /**
319  * RETURN_FAILURE_ON_EXCEPTION conditionally returns the "exception" sentinel if
320  * the given MaybeHandle is empty; so it can only be used in functions with
321  * return type Object, such as RUNTIME_FUNCTION(...) {...} or BUILTIN(...)
322  * {...}. Example usage:
323  *
324  * RUNTIME_FUNCTION(Runtime_Func) {
325  *   ...
326  *   RETURN_FAILURE_ON_EXCEPTION(
327  *       isolate,
328  *       FunctionWithReturnTypeMaybeHandleX(...));
329  *   // code to handle non exception
330  *   ...
331  * }
332  *
333  * If inside a function with return type MaybeHandle<X>, use RETURN_ON_EXCEPTION
334  * instead.
335  * If inside a function with return type Maybe<X> or Handle<X>, use
336  * RETURN_ON_EXCEPTION_VALUE instead.
337  */
338 #define RETURN_FAILURE_ON_EXCEPTION(isolate, call)                     \
339   do {                                                                 \
340     Isolate* __isolate__ = (isolate);                                  \
341     RETURN_ON_EXCEPTION_VALUE(__isolate__, call,                       \
342                               ReadOnlyRoots(__isolate__).exception()); \
343   } while (false);
344 
345 /**
346  * RETURN_ON_EXCEPTION conditionally returns an empty MaybeHandle<T> if the
347  * given MaybeHandle is empty. Use it to return immediately from a function with
348  * return type MaybeHandle when an exception was thrown. Example usage:
349  *
350  * MaybeHandle<X> Func() {
351  *   ...
352  *   RETURN_ON_EXCEPTION(
353  *       isolate,
354  *       FunctionWithReturnTypeMaybeHandleY(...),
355  *       X);
356  *   // code to handle non exception
357  *   ...
358  * }
359  *
360  * If inside a function with return type Object, use
361  * RETURN_FAILURE_ON_EXCEPTION instead.
362  * If inside a function with return type
363  * Maybe<X> or Handle<X>, use RETURN_ON_EXCEPTION_VALUE instead.
364  */
365 #define RETURN_ON_EXCEPTION(isolate, call, T) \
366   RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
367 
368 #define RETURN_FAILURE(isolate, should_throw, call) \
369   do {                                              \
370     if ((should_throw) == kDontThrow) {             \
371       return Just(false);                           \
372     } else {                                        \
373       isolate->Throw(*isolate->factory()->call);    \
374       return Nothing<bool>();                       \
375     }                                               \
376   } while (false)
377 
378 #define MAYBE_RETURN(call, value)         \
379   do {                                    \
380     if ((call).IsNothing()) return value; \
381   } while (false)
382 
383 #define MAYBE_RETURN_NULL(call) MAYBE_RETURN(call, MaybeHandle<Object>())
384 
385 #define MAYBE_ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value) \
386   do {                                                                    \
387     if (!(call).To(&dst)) {                                               \
388       DCHECK((isolate)->has_pending_exception());                         \
389       return value;                                                       \
390     }                                                                     \
391   } while (false)
392 
393 #define MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
394   do {                                                               \
395     Isolate* __isolate__ = (isolate);                                \
396     if (!(call).To(&dst)) {                                          \
397       DCHECK(__isolate__->has_pending_exception());                  \
398       return ReadOnlyRoots(__isolate__).exception();                 \
399     }                                                                \
400   } while (false)
401 
402 #define FOR_WITH_HANDLE_SCOPE(isolate, loop_var_type, init, loop_var,      \
403                               limit_check, increment, body)                \
404   do {                                                                     \
405     loop_var_type init;                                                    \
406     loop_var_type for_with_handle_limit = loop_var;                        \
407     Isolate* for_with_handle_isolate = isolate;                            \
408     while (limit_check) {                                                  \
409       for_with_handle_limit += 1024;                                       \
410       HandleScope loop_scope(for_with_handle_isolate);                     \
411       for (; limit_check && loop_var < for_with_handle_limit; increment) { \
412         body                                                               \
413       }                                                                    \
414     }                                                                      \
415   } while (false)
416 
417 #define WHILE_WITH_HANDLE_SCOPE(isolate, limit_check, body)                  \
418   do {                                                                       \
419     Isolate* for_with_handle_isolate = isolate;                              \
420     while (limit_check) {                                                    \
421       HandleScope loop_scope(for_with_handle_isolate);                       \
422       for (int for_with_handle_it = 0;                                       \
423            limit_check && for_with_handle_it < 1024; ++for_with_handle_it) { \
424         body                                                                 \
425       }                                                                      \
426     }                                                                        \
427   } while (false)
428 
429 #define FIELD_ACCESSOR(type, name)                \
430   inline void set_##name(type v) { name##_ = v; } \
431   inline type name() const { return name##_; }
432 
433 // Controls for manual embedded blob lifecycle management, used by tests and
434 // mksnapshot.
435 V8_EXPORT_PRIVATE void DisableEmbeddedBlobRefcounting();
436 V8_EXPORT_PRIVATE void FreeCurrentEmbeddedBlob();
437 
438 #ifdef DEBUG
439 
440 #define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)               \
441   V(CommentStatistic, paged_space_comments_statistics, \
442     CommentStatistic::kMaxComments + 1)                \
443   V(int, code_kind_statistics, kCodeKindCount)
444 #else
445 
446 #define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
447 
448 #endif
449 
450 #define ISOLATE_INIT_ARRAY_LIST(V)                                             \
451   /* SerializerDeserializer state. */                                          \
452   V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
453   V(int, bad_char_shift_table, kUC16AlphabetSize)                              \
454   V(int, good_suffix_shift_table, (kBMMaxShift + 1))                           \
455   V(int, suffix_table, (kBMMaxShift + 1))                                      \
456   ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
457 
458 using DebugObjectCache = std::vector<Handle<HeapObject>>;
459 
460 #define ISOLATE_INIT_LIST(V)                                                  \
461   /* Assembler state. */                                                      \
462   V(FatalErrorCallback, exception_behavior, nullptr)                          \
463   V(OOMErrorCallback, oom_behavior, nullptr)                                  \
464   V(LogEventCallback, event_logger, nullptr)                                  \
465   V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
466   V(ModifyCodeGenerationFromStringsCallback, modify_code_gen_callback,        \
467     nullptr)                                                                  \
468   V(ModifyCodeGenerationFromStringsCallback2, modify_code_gen_callback2,      \
469     nullptr)                                                                  \
470   V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr)   \
471   V(ExtensionCallback, wasm_module_callback, &NoExtension)                    \
472   V(ExtensionCallback, wasm_instance_callback, &NoExtension)                  \
473   V(SharedArrayBufferConstructorEnabledCallback,                              \
474     sharedarraybuffer_constructor_enabled_callback, nullptr)                  \
475   V(WasmStreamingCallback, wasm_streaming_callback, nullptr)                  \
476   V(WasmLoadSourceMapCallback, wasm_load_source_map_callback, nullptr)        \
477   V(WasmSimdEnabledCallback, wasm_simd_enabled_callback, nullptr)             \
478   V(WasmExceptionsEnabledCallback, wasm_exceptions_enabled_callback, nullptr) \
479   V(WasmDynamicTieringEnabledCallback, wasm_dynamic_tiering_enabled_callback, \
480     nullptr)                                                                  \
481   /* State for Relocatable. */                                                \
482   V(Relocatable*, relocatable_top, nullptr)                                   \
483   V(DebugObjectCache*, string_stream_debug_object_cache, nullptr)             \
484   V(Object, string_stream_current_security_token, Object())                   \
485   V(const intptr_t*, api_external_references, nullptr)                        \
486   V(AddressToIndexHashMap*, external_reference_map, nullptr)                  \
487   V(HeapObjectToIndexHashMap*, root_index_map, nullptr)                       \
488   V(MicrotaskQueue*, default_microtask_queue, nullptr)                        \
489   V(CompilationStatistics*, turbo_statistics, nullptr)                        \
490   V(CodeTracer*, code_tracer, nullptr)                                        \
491   V(PromiseRejectCallback, promise_reject_callback, nullptr)                  \
492   V(const v8::StartupData*, snapshot_blob, nullptr)                           \
493   V(int, code_and_metadata_size, 0)                                           \
494   V(int, bytecode_and_metadata_size, 0)                                       \
495   V(int, external_script_source_size, 0)                                      \
496   /* Number of CPU profilers running on the isolate. */                       \
497   V(size_t, num_cpu_profilers, 0)                                             \
498   /* true if a trace is being formatted through Error.prepareStackTrace. */   \
499   V(bool, formatting_stack_trace, false)                                      \
500   /* Perform side effect checks on function call and API callbacks. */        \
501   V(DebugInfo::ExecutionMode, debug_execution_mode, DebugInfo::kBreakpoints)  \
502   V(debug::TypeProfileMode, type_profile_mode, debug::TypeProfileMode::kNone) \
503   V(bool, disable_bytecode_flushing, false)                                   \
504   V(int, last_console_context_id, 0)                                          \
505   V(v8_inspector::V8Inspector*, inspector, nullptr)                           \
506   V(bool, next_v8_call_is_safe_for_termination, false)                        \
507   V(bool, only_terminate_in_safe_scope, false)                                \
508   V(int, embedder_wrapper_type_index, -1)                                     \
509   V(int, embedder_wrapper_object_index, -1)                                   \
510   V(compiler::NodeObserver*, node_observer, nullptr)                          \
511   /* Used in combination with --script-run-delay-once */                      \
512   V(bool, did_run_script_delay, false)                                        \
513   V(bool, javascript_execution_assert, true)                                  \
514   V(bool, javascript_execution_throws, true)                                  \
515   V(bool, javascript_execution_dump, true)                                    \
516   V(bool, deoptimization_assert, true)                                        \
517   V(bool, compilation_assert, true)                                           \
518   V(bool, no_exception_assert, true)
519 
520 #define THREAD_LOCAL_TOP_ACCESSOR(type, name)                         \
521   inline void set_##name(type v) { thread_local_top()->name##_ = v; } \
522   inline type name() const { return thread_local_top()->name##_; }
523 
524 #define THREAD_LOCAL_TOP_ADDRESS(type, name) \
525   type* name##_address() { return &thread_local_top()->name##_; }
526 
527 // HiddenFactory exists so Isolate can privately inherit from it without making
528 // Factory's members available to Isolate directly.
529 class V8_EXPORT_PRIVATE HiddenFactory : private Factory {};
530 
531 class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
532   // These forward declarations are required to make the friend declarations in
533   // PerIsolateThreadData work on some older versions of gcc.
534   class ThreadDataTable;
535   class EntryStackItem;
536 
537  public:
538   Isolate(const Isolate&) = delete;
539   Isolate& operator=(const Isolate&) = delete;
540 
541   using HandleScopeType = HandleScope;
542   void* operator new(size_t) = delete;
543   void operator delete(void*) = delete;
544 
545   // A thread has a PerIsolateThreadData instance for each isolate that it has
546   // entered. That instance is allocated when the isolate is initially entered
547   // and reused on subsequent entries.
548   class PerIsolateThreadData {
549    public:
PerIsolateThreadData(Isolate * isolate,ThreadId thread_id)550     PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
551         : isolate_(isolate),
552           thread_id_(thread_id),
553           stack_limit_(0),
554           thread_state_(nullptr)
555 #if USE_SIMULATOR
556           ,
557           simulator_(nullptr)
558 #endif
559     {
560     }
561     ~PerIsolateThreadData();
562     PerIsolateThreadData(const PerIsolateThreadData&) = delete;
563     PerIsolateThreadData& operator=(const PerIsolateThreadData&) = delete;
isolate()564     Isolate* isolate() const { return isolate_; }
thread_id()565     ThreadId thread_id() const { return thread_id_; }
566 
FIELD_ACCESSOR(uintptr_t,stack_limit)567     FIELD_ACCESSOR(uintptr_t, stack_limit)
568     FIELD_ACCESSOR(ThreadState*, thread_state)
569 
570 #if USE_SIMULATOR
571     FIELD_ACCESSOR(Simulator*, simulator)
572 #endif
573 
574     bool Matches(Isolate* isolate, ThreadId thread_id) const {
575       return isolate_ == isolate && thread_id_ == thread_id;
576     }
577 
578    private:
579     Isolate* isolate_;
580     ThreadId thread_id_;
581     uintptr_t stack_limit_;
582     ThreadState* thread_state_;
583 
584 #if USE_SIMULATOR
585     Simulator* simulator_;
586 #endif
587 
588     friend class Isolate;
589     friend class ThreadDataTable;
590     friend class EntryStackItem;
591   };
592 
593   static void InitializeOncePerProcess();
594   static void DisposeOncePerProcess();
595 
596   // Creates Isolate object. Must be used instead of constructing Isolate with
597   // new operator.
598   static Isolate* New();
599 
600   // Creates a new shared Isolate object.
601   static Isolate* NewShared(const v8::Isolate::CreateParams& params);
602 
603   // Deletes Isolate object. Must be used instead of delete operator.
604   // Destroys the non-default isolates.
605   // Sets default isolate into "has_been_disposed" state rather then destroying,
606   // for legacy API reasons.
607   static void Delete(Isolate* isolate);
608 
609   void SetUpFromReadOnlyArtifacts(std::shared_ptr<ReadOnlyArtifacts> artifacts,
610                                   ReadOnlyHeap* ro_heap);
set_read_only_heap(ReadOnlyHeap * ro_heap)611   void set_read_only_heap(ReadOnlyHeap* ro_heap) { read_only_heap_ = ro_heap; }
612 
613   // Page allocator that must be used for allocating V8 heap pages.
614   v8::PageAllocator* page_allocator() const;
615 
616   // Returns the PerIsolateThreadData for the current thread (or nullptr if one
617   // is not currently set).
CurrentPerIsolateThreadData()618   static PerIsolateThreadData* CurrentPerIsolateThreadData() {
619     return reinterpret_cast<PerIsolateThreadData*>(
620         base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
621   }
622 
623   // Returns the isolate inside which the current thread is running or nullptr.
TryGetCurrent()624   V8_INLINE static Isolate* TryGetCurrent() {
625     DCHECK_EQ(true, isolate_key_created_.load(std::memory_order_relaxed));
626     return reinterpret_cast<Isolate*>(
627         base::Thread::GetExistingThreadLocal(isolate_key_));
628   }
629 
630   // Returns the isolate inside which the current thread is running.
Current()631   V8_INLINE static Isolate* Current() {
632     Isolate* isolate = TryGetCurrent();
633     DCHECK_NOT_NULL(isolate);
634     return isolate;
635   }
636 
IsCurrent()637   bool IsCurrent() const { return this == TryGetCurrent(); }
638 
639   // Usually called by Init(), but can be called early e.g. to allow
640   // testing components that require logging but not the whole
641   // isolate.
642   //
643   // Safe to call more than once.
644   void InitializeLoggingAndCounters();
645   bool InitializeCounters();  // Returns false if already initialized.
646 
647   bool InitWithoutSnapshot();
648   bool InitWithSnapshot(SnapshotData* startup_snapshot_data,
649                         SnapshotData* read_only_snapshot_data,
650                         SnapshotData* shared_heap_snapshot_data,
651                         bool can_rehash);
652 
653   // True if at least one thread Enter'ed this isolate.
IsInUse()654   bool IsInUse() { return entry_stack_ != nullptr; }
655 
656   void ReleaseSharedPtrs();
657 
658   void ClearSerializerData();
659 
660   bool LogObjectRelocation();
661 
662   // Initializes the current thread to run this Isolate.
663   // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
664   // at the same time, this should be prevented using external locking.
665   void Enter();
666 
667   // Exits the current thread. The previosuly entered Isolate is restored
668   // for the thread.
669   // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
670   // at the same time, this should be prevented using external locking.
671   void Exit();
672 
673   // Find the PerThread for this particular (isolate, thread) combination.
674   // If one does not yet exist, allocate a new one.
675   PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
676 
677   // Find the PerThread for this particular (isolate, thread) combination
678   // If one does not yet exist, return null.
679   PerIsolateThreadData* FindPerThreadDataForThisThread();
680 
681   // Find the PerThread for given (isolate, thread) combination
682   // If one does not yet exist, return null.
683   PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id);
684 
685   // Discard the PerThread for this particular (isolate, thread) combination
686   // If one does not yet exist, no-op.
687   void DiscardPerThreadDataForThisThread();
688 
689   // Mutex for serializing access to break control structures.
break_access()690   base::RecursiveMutex* break_access() { return &break_access_; }
691 
692   // Shared mutex for allowing thread-safe concurrent reads of FeedbackVectors.
feedback_vector_access()693   base::SharedMutex* feedback_vector_access() {
694     return &feedback_vector_access_;
695   }
696 
697   // Shared mutex for allowing thread-safe concurrent reads of
698   // InternalizedStrings.
internalized_string_access()699   base::SharedMutex* internalized_string_access() {
700     return &internalized_string_access_;
701   }
702 
703   // Shared mutex for allowing thread-safe concurrent reads of TransitionArrays
704   // of kind kFullTransitionArray.
full_transition_array_access()705   base::SharedMutex* full_transition_array_access() {
706     return &full_transition_array_access_;
707   }
708 
709   // Shared mutex for allowing thread-safe concurrent reads of
710   // SharedFunctionInfos.
shared_function_info_access()711   base::SharedMutex* shared_function_info_access() {
712     return &shared_function_info_access_;
713   }
714 
715   // Protects (most) map update operations, see also MapUpdater.
map_updater_access()716   base::SharedMutex* map_updater_access() { return &map_updater_access_; }
717 
718   // Protects JSObject boilerplate migrations (i.e. calls to MigrateInstance on
719   // boilerplate objects; elements kind transitions are *not* protected).
720   // Note this lock interacts with `map_updater_access` as follows
721   //
722   // - boilerplate migrations may trigger map updates.
723   // - if so, `boilerplate_migration_access` is locked before
724   //   `map_updater_access`.
725   // - backgrounds threads must use the same lock order to avoid deadlocks.
boilerplate_migration_access()726   base::SharedMutex* boilerplate_migration_access() {
727     return &boilerplate_migration_access_;
728   }
729 
730   // The isolate's string table.
string_table()731   StringTable* string_table() const { return string_table_.get(); }
732 
733   Address get_address_from_id(IsolateAddressId id);
734 
735   // Access to top context (where the current function object was created).
context()736   Context context() const { return thread_local_top()->context_; }
737   inline void set_context(Context context);
context_address()738   Context* context_address() { return &thread_local_top()->context_; }
739 
740   // Access to current thread id.
set_thread_id(ThreadId id)741   inline void set_thread_id(ThreadId id) {
742     thread_local_top()->thread_id_.store(id, std::memory_order_relaxed);
743   }
thread_id()744   inline ThreadId thread_id() const {
745     return thread_local_top()->thread_id_.load(std::memory_order_relaxed);
746   }
747 
748   void InstallConditionalFeatures(Handle<Context> context);
749 
750   bool IsSharedArrayBufferConstructorEnabled(Handle<Context> context);
751 
752   bool IsWasmSimdEnabled(Handle<Context> context);
753   bool AreWasmExceptionsEnabled(Handle<Context> context);
754   bool IsWasmDynamicTieringEnabled();
755 
THREAD_LOCAL_TOP_ADDRESS(Context,pending_handler_context)756   THREAD_LOCAL_TOP_ADDRESS(Context, pending_handler_context)
757   THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_entrypoint)
758   THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_constant_pool)
759   THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_fp)
760   THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_sp)
761   THREAD_LOCAL_TOP_ADDRESS(uintptr_t, num_frames_above_pending_handler)
762 
763   THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
764 
765   v8::TryCatch* try_catch_handler() {
766     return thread_local_top()->try_catch_handler_;
767   }
768 
769   THREAD_LOCAL_TOP_ADDRESS(bool, external_caught_exception)
770 
771   // Interface to pending exception.
772   THREAD_LOCAL_TOP_ADDRESS(Object, pending_exception)
773   inline Object pending_exception();
774   inline void set_pending_exception(Object exception_obj);
775   inline void clear_pending_exception();
776   inline bool has_pending_exception();
777 
778   THREAD_LOCAL_TOP_ADDRESS(Object, pending_message)
779   inline void clear_pending_message();
780   inline Object pending_message();
781   inline bool has_pending_message();
782   inline void set_pending_message(Object message_obj);
783 
784   THREAD_LOCAL_TOP_ADDRESS(Object, scheduled_exception)
785   inline Object scheduled_exception();
786   inline bool has_scheduled_exception();
787   inline void clear_scheduled_exception();
788   inline void set_scheduled_exception(Object exception);
789 
790   enum class ExceptionHandlerType {
791     kJavaScriptHandler,
792     kExternalTryCatch,
793     kNone
794   };
795 
796   ExceptionHandlerType TopExceptionHandlerType(Object exception);
797 
798   inline bool is_catchable_by_javascript(Object exception);
799   inline bool is_catchable_by_wasm(Object exception);
800 
801   // JS execution stack (see frames.h).
c_entry_fp(ThreadLocalTop * thread)802   static Address c_entry_fp(ThreadLocalTop* thread) {
803     return thread->c_entry_fp_;
804   }
handler(ThreadLocalTop * thread)805   static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
c_function()806   Address c_function() { return thread_local_top()->c_function_; }
807 
c_entry_fp_address()808   inline Address* c_entry_fp_address() {
809     return &thread_local_top()->c_entry_fp_;
810   }
c_entry_fp_offset()811   static uint32_t c_entry_fp_offset() {
812     return static_cast<uint32_t>(
813         OFFSET_OF(Isolate, thread_local_top()->c_entry_fp_) -
814         isolate_root_bias());
815   }
handler_address()816   inline Address* handler_address() { return &thread_local_top()->handler_; }
c_function_address()817   inline Address* c_function_address() {
818     return &thread_local_top()->c_function_;
819   }
820 
821 #if defined(DEBUG) || defined(VERIFY_HEAP)
822   // Count the number of active deserializers, so that the heap verifier knows
823   // whether there is currently an active deserialization happening.
824   //
825   // This is needed as the verifier currently doesn't support verifying objects
826   // which are partially deserialized.
827   //
828   // TODO(leszeks): Make the verifier a bit more deserialization compatible.
RegisterDeserializerStarted()829   void RegisterDeserializerStarted() { ++num_active_deserializers_; }
RegisterDeserializerFinished()830   void RegisterDeserializerFinished() {
831     CHECK_GE(--num_active_deserializers_, 0);
832   }
has_active_deserializer()833   bool has_active_deserializer() const {
834     return num_active_deserializers_.load(std::memory_order_acquire) > 0;
835   }
836 #else
RegisterDeserializerStarted()837   void RegisterDeserializerStarted() {}
RegisterDeserializerFinished()838   void RegisterDeserializerFinished() {}
has_active_deserializer()839   bool has_active_deserializer() const { UNREACHABLE(); }
840 #endif
841 
842   // Bottom JS entry.
js_entry_sp()843   Address js_entry_sp() { return thread_local_top()->js_entry_sp_; }
js_entry_sp_address()844   inline Address* js_entry_sp_address() {
845     return &thread_local_top()->js_entry_sp_;
846   }
847 
848   std::vector<MemoryRange>* GetCodePages() const;
849 
850   void SetCodePages(std::vector<MemoryRange>* new_code_pages);
851 
852   // Returns the global object of the current context. It could be
853   // a builtin object, or a JS global object.
854   inline Handle<JSGlobalObject> global_object();
855 
856   // Returns the global proxy object of the current context.
857   inline Handle<JSGlobalProxy> global_proxy();
858 
ArchiveSpacePerThread()859   static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
FreeThreadResources()860   void FreeThreadResources() { thread_local_top()->Free(); }
861 
862   // This method is called by the api after operations that may throw
863   // exceptions.  If an exception was thrown and not handled by an external
864   // handler the exception is scheduled to be rethrown when we return to running
865   // JavaScript code.  If an exception is scheduled true is returned.
866   bool OptionalRescheduleException(bool clear_exception);
867 
868   // Push and pop a promise and the current try-catch handler.
869   void PushPromise(Handle<JSObject> promise);
870   void PopPromise();
871   bool IsPromiseStackEmpty() const;
872 
873   // Return the relevant Promise that a throw/rejection pertains to, based
874   // on the contents of the Promise stack
875   Handle<Object> GetPromiseOnStackOnThrow();
876 
877   // Heuristically guess whether a Promise is handled by user catch handler
878   bool PromiseHasUserDefinedRejectHandler(Handle<JSPromise> promise);
879 
880   class V8_NODISCARD ExceptionScope {
881    public:
882     // Scope currently can only be used for regular exceptions,
883     // not termination exception.
884     inline explicit ExceptionScope(Isolate* isolate);
885     inline ~ExceptionScope();
886 
887    private:
888     Isolate* isolate_;
889     Handle<Object> pending_exception_;
890   };
891 
892   void SetCaptureStackTraceForUncaughtExceptions(
893       bool capture, int frame_limit, StackTrace::StackTraceOptions options);
894   bool get_capture_stack_trace_for_uncaught_exceptions() const;
895 
896   void SetAbortOnUncaughtExceptionCallback(
897       v8::Isolate::AbortOnUncaughtExceptionCallback callback);
898 
899   enum PrintStackMode { kPrintStackConcise, kPrintStackVerbose };
900   void PrintCurrentStackTrace(std::ostream& out);
901   void PrintStack(StringStream* accumulator,
902                   PrintStackMode mode = kPrintStackVerbose);
903   void PrintStack(FILE* out, PrintStackMode mode = kPrintStackVerbose);
904   Handle<String> StackTraceString();
905   // Stores a stack trace in a stack-allocated temporary buffer which will
906   // end up in the minidump for debugging purposes.
907   V8_NOINLINE void PushStackTraceAndDie(void* ptr1 = nullptr,
908                                         void* ptr2 = nullptr,
909                                         void* ptr3 = nullptr,
910                                         void* ptr4 = nullptr);
911   // Similar to the above but without collecting the stack trace.
912   V8_NOINLINE void PushParamsAndDie(void* ptr1 = nullptr, void* ptr2 = nullptr,
913                                     void* ptr3 = nullptr, void* ptr4 = nullptr,
914                                     void* ptr5 = nullptr, void* ptr6 = nullptr);
915   Handle<FixedArray> CaptureDetailedStackTrace(
916       int limit, StackTrace::StackTraceOptions options);
917   MaybeHandle<JSObject> CaptureAndSetErrorStack(Handle<JSObject> error_object,
918                                                 FrameSkipMode mode,
919                                                 Handle<Object> caller);
920   Handle<FixedArray> GetDetailedStackTrace(Handle<JSReceiver> error_object);
921   Handle<FixedArray> GetSimpleStackTrace(Handle<JSReceiver> error_object);
922   // Walks the JS stack to find the first frame with a script name or
923   // source URL. The inspected frames are the same as for the detailed stack
924   // trace.
925   Handle<String> CurrentScriptNameOrSourceURL();
926 
927   Address GetAbstractPC(int* line, int* column);
928 
929   // Returns if the given context may access the given global object. If
930   // the result is false, the pending exception is guaranteed to be
931   // set.
932   bool MayAccess(Handle<Context> accessing_context, Handle<JSObject> receiver);
933 
934   void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
935   void ReportFailedAccessCheck(Handle<JSObject> receiver);
936 
937   // Exception throwing support. The caller should use the result
938   // of Throw() as its return value.
Throw(Object exception)939   Object Throw(Object exception) { return ThrowInternal(exception, nullptr); }
940   Object ThrowAt(Handle<JSObject> exception, MessageLocation* location);
941   Object ThrowIllegalOperation();
942 
943   template <typename T>
Throw(Handle<Object> exception)944   V8_WARN_UNUSED_RESULT MaybeHandle<T> Throw(Handle<Object> exception) {
945     Throw(*exception);
946     return MaybeHandle<T>();
947   }
948 
949   template <typename T>
ThrowAt(Handle<JSObject> exception,MessageLocation * location)950   V8_WARN_UNUSED_RESULT MaybeHandle<T> ThrowAt(Handle<JSObject> exception,
951                                                MessageLocation* location) {
952     ThrowAt(exception, location);
953     return MaybeHandle<T>();
954   }
955 
FatalProcessOutOfHeapMemory(const char * location)956   void FatalProcessOutOfHeapMemory(const char* location) {
957     heap()->FatalProcessOutOfMemory(location);
958   }
959 
set_console_delegate(debug::ConsoleDelegate * delegate)960   void set_console_delegate(debug::ConsoleDelegate* delegate) {
961     console_delegate_ = delegate;
962   }
console_delegate()963   debug::ConsoleDelegate* console_delegate() { return console_delegate_; }
964 
set_async_event_delegate(debug::AsyncEventDelegate * delegate)965   void set_async_event_delegate(debug::AsyncEventDelegate* delegate) {
966     async_event_delegate_ = delegate;
967     PromiseHookStateUpdated();
968   }
969 
970   // Async function and promise instrumentation support.
971   void OnAsyncFunctionSuspended(Handle<JSPromise> promise,
972                                 Handle<JSPromise> parent);
973   void OnPromiseThen(Handle<JSPromise> promise);
974   void OnPromiseBefore(Handle<JSPromise> promise);
975   void OnPromiseAfter(Handle<JSPromise> promise);
976   void OnTerminationDuringRunMicrotasks();
977 
978   // Re-throw an exception.  This involves no error reporting since error
979   // reporting was handled when the exception was thrown originally.
980   // The first overload doesn't set the corresponding pending message, which
981   // has to be set separately or be guaranteed to not have changed.
982   Object ReThrow(Object exception);
983   Object ReThrow(Object exception, Object message);
984 
985   // Find the correct handler for the current pending exception. This also
986   // clears and returns the current pending exception.
987   Object UnwindAndFindHandler();
988 
989   // Tries to predict whether an exception will be caught. Note that this can
990   // only produce an estimate, because it is undecidable whether a finally
991   // clause will consume or re-throw an exception.
992   enum CatchType {
993     NOT_CAUGHT,
994     CAUGHT_BY_JAVASCRIPT,
995     CAUGHT_BY_EXTERNAL,
996     CAUGHT_BY_PROMISE,
997     CAUGHT_BY_ASYNC_AWAIT
998   };
999   CatchType PredictExceptionCatcher();
1000 
1001   void ScheduleThrow(Object exception);
1002   // Re-set pending message, script and positions reported to the TryCatch
1003   // back to the TLS for re-use when rethrowing.
1004   void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
1005   // Un-schedule an exception that was caught by a TryCatch handler.
1006   void CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler);
1007   void ReportPendingMessages();
1008 
1009   // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
1010   Object PromoteScheduledException();
1011 
1012   // Attempts to compute the current source location, storing the
1013   // result in the target out parameter. The source location is attached to a
1014   // Message object as the location which should be shown to the user. It's
1015   // typically the top-most meaningful location on the stack.
1016   bool ComputeLocation(MessageLocation* target);
1017   bool ComputeLocationFromException(MessageLocation* target,
1018                                     Handle<Object> exception);
1019   bool ComputeLocationFromSimpleStackTrace(MessageLocation* target,
1020                                            Handle<Object> exception);
1021   bool ComputeLocationFromDetailedStackTrace(MessageLocation* target,
1022                                              Handle<Object> exception);
1023 
1024   Handle<JSMessageObject> CreateMessage(Handle<Object> exception,
1025                                         MessageLocation* location);
1026   Handle<JSMessageObject> CreateMessageOrAbort(Handle<Object> exception,
1027                                                MessageLocation* location);
1028   // Similar to Isolate::CreateMessage but DOESN'T inspect the JS stack and
1029   // only looks at the "detailed stack trace" as the "simple stack trace" might
1030   // have already been stringified.
1031   Handle<JSMessageObject> CreateMessageFromException(Handle<Object> exception);
1032 
1033   // Out of resource exception helpers.
1034   Object StackOverflow();
1035   Object TerminateExecution();
1036   void CancelTerminateExecution();
1037 
1038   void RequestInterrupt(InterruptCallback callback, void* data);
1039   void InvokeApiInterruptCallbacks();
1040 
1041   // Administration
1042   void Iterate(RootVisitor* v);
1043   void Iterate(RootVisitor* v, ThreadLocalTop* t);
1044   char* Iterate(RootVisitor* v, char* t);
1045   void IterateThread(ThreadVisitor* v, char* t);
1046 
1047   // Returns the current native context.
1048   inline Handle<NativeContext> native_context();
1049   inline NativeContext raw_native_context();
1050 
1051   Handle<Context> GetIncumbentContext();
1052 
1053   void RegisterTryCatchHandler(v8::TryCatch* that);
1054   void UnregisterTryCatchHandler(v8::TryCatch* that);
1055 
1056   char* ArchiveThread(char* to);
1057   char* RestoreThread(char* from);
1058 
1059   static const int kUC16AlphabetSize = 256;  // See StringSearchBase.
1060   static const int kBMMaxShift = 250;        // See StringSearchBase.
1061 
1062   // Accessors.
1063 #define GLOBAL_ACCESSOR(type, name, initialvalue)                \
1064   inline type name() const {                                     \
1065     DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
1066     return name##_;                                              \
1067   }                                                              \
1068   inline void set_##name(type value) {                           \
1069     DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
1070     name##_ = value;                                             \
1071   }
ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)1072   ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
1073 #undef GLOBAL_ACCESSOR
1074 
1075   void SetDetailedSourcePositionsForProfiling(bool value) {
1076     if (value) {
1077       CollectSourcePositionsForAllBytecodeArrays();
1078     }
1079     detailed_source_positions_for_profiling_ = value;
1080   }
1081 
detailed_source_positions_for_profiling()1082   bool detailed_source_positions_for_profiling() const {
1083     return detailed_source_positions_for_profiling_;
1084   }
1085 
1086 #define GLOBAL_ARRAY_ACCESSOR(type, name, length)                \
1087   inline type* name() {                                          \
1088     DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
1089     return &(name##_)[0];                                        \
1090   }
1091   ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
1092 #undef GLOBAL_ARRAY_ACCESSOR
1093 
1094 #define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
1095   inline Handle<type> name();                            \
1096   inline bool is_##name(type value);
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)1097   NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
1098 #undef NATIVE_CONTEXT_FIELD_ACCESSOR
1099 
1100   Bootstrapper* bootstrapper() { return bootstrapper_; }
1101   // Use for updating counters on a foreground thread.
counters()1102   Counters* counters() { return async_counters().get(); }
1103   // Use for updating counters on a background thread.
async_counters()1104   const std::shared_ptr<Counters>& async_counters() {
1105     // Make sure InitializeCounters() has been called.
1106     DCHECK_NOT_NULL(async_counters_.get());
1107     return async_counters_;
1108   }
metrics_recorder()1109   const std::shared_ptr<metrics::Recorder>& metrics_recorder() {
1110     return metrics_recorder_;
1111   }
tiering_manager()1112   TieringManager* tiering_manager() { return tiering_manager_; }
compilation_cache()1113   CompilationCache* compilation_cache() { return compilation_cache_; }
logger()1114   Logger* logger() {
1115     // Call InitializeLoggingAndCounters() if logging is needed before
1116     // the isolate is fully initialized.
1117     DCHECK_NOT_NULL(logger_);
1118     return logger_;
1119   }
stack_guard()1120   StackGuard* stack_guard() { return isolate_data()->stack_guard(); }
heap()1121   Heap* heap() { return &heap_; }
heap()1122   const Heap* heap() const { return &heap_; }
read_only_heap()1123   ReadOnlyHeap* read_only_heap() const { return read_only_heap_; }
FromHeap(Heap * heap)1124   static Isolate* FromHeap(Heap* heap) {
1125     return reinterpret_cast<Isolate*>(reinterpret_cast<Address>(heap) -
1126                                       OFFSET_OF(Isolate, heap_));
1127   }
1128 
isolate_data()1129   const IsolateData* isolate_data() const { return &isolate_data_; }
isolate_data()1130   IsolateData* isolate_data() { return &isolate_data_; }
1131 
1132   // When pointer compression is on, this is the base address of the pointer
1133   // compression cage, and the kPtrComprCageBaseRegister is set to this
1134   // value. When pointer compression is off, this is always kNullAddress.
cage_base()1135   Address cage_base() const {
1136     DCHECK_IMPLIES(!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL &&
1137                        !COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL,
1138                    isolate_data()->cage_base() == kNullAddress);
1139     return isolate_data()->cage_base();
1140   }
1141 
1142   // When pointer compression and external code space are on, this is the base
1143   // address of the cage where the code space is allocated. Otherwise, it
1144   // defaults to cage_base().
code_cage_base()1145   Address code_cage_base() const {
1146 #ifdef V8_EXTERNAL_CODE_SPACE
1147     return code_cage_base_;
1148 #else
1149     return cage_base();
1150 #endif  // V8_EXTERNAL_CODE_SPACE
1151   }
1152 
1153   // When pointer compression is on, the PtrComprCage used by this
1154   // Isolate. Otherwise nullptr.
GetPtrComprCage()1155   VirtualMemoryCage* GetPtrComprCage() {
1156     return isolate_allocator_->GetPtrComprCage();
1157   }
GetPtrComprCage()1158   const VirtualMemoryCage* GetPtrComprCage() const {
1159     return isolate_allocator_->GetPtrComprCage();
1160   }
1161   VirtualMemoryCage* GetPtrComprCodeCageForTesting();
1162 
1163   // Generated code can embed this address to get access to the isolate-specific
1164   // data (for example, roots, external references, builtins, etc.).
1165   // The kRootRegister is set to this value.
isolate_root()1166   Address isolate_root() const { return isolate_data()->isolate_root(); }
isolate_root_bias()1167   static size_t isolate_root_bias() {
1168     return OFFSET_OF(Isolate, isolate_data_) + IsolateData::kIsolateRootBias;
1169   }
FromRootAddress(Address isolate_root)1170   static Isolate* FromRootAddress(Address isolate_root) {
1171     return reinterpret_cast<Isolate*>(isolate_root - isolate_root_bias());
1172   }
1173 
roots_table()1174   RootsTable& roots_table() { return isolate_data()->roots(); }
roots_table()1175   const RootsTable& roots_table() const { return isolate_data()->roots(); }
1176 
1177   // A sub-region of the Isolate object that has "predictable" layout which
1178   // depends only on the pointer size and therefore it's guaranteed that there
1179   // will be no compatibility issues because of different compilers used for
1180   // snapshot generator and actual V8 code.
1181   // Thus, kRootRegister may be used to address any location that falls into
1182   // this region.
1183   // See IsolateData::AssertPredictableLayout() for details.
root_register_addressable_region()1184   base::AddressRegion root_register_addressable_region() const {
1185     return base::AddressRegion(reinterpret_cast<Address>(&isolate_data_),
1186                                sizeof(IsolateData));
1187   }
1188 
root(RootIndex index)1189   Object root(RootIndex index) const { return Object(roots_table()[index]); }
1190 
root_handle(RootIndex index)1191   Handle<Object> root_handle(RootIndex index) {
1192     return Handle<Object>(&roots_table()[index]);
1193   }
1194 
external_reference_table()1195   ExternalReferenceTable* external_reference_table() {
1196     DCHECK(isolate_data()->external_reference_table()->is_initialized());
1197     return isolate_data()->external_reference_table();
1198   }
1199 
builtin_entry_table()1200   Address* builtin_entry_table() { return isolate_data_.builtin_entry_table(); }
builtin_table()1201   V8_INLINE Address* builtin_table() { return isolate_data_.builtin_table(); }
builtin_tier0_table()1202   V8_INLINE Address* builtin_tier0_table() {
1203     return isolate_data_.builtin_tier0_table();
1204   }
1205 
1206   bool IsBuiltinTableHandleLocation(Address* handle_location);
1207 
load_stub_cache()1208   StubCache* load_stub_cache() const { return load_stub_cache_; }
store_stub_cache()1209   StubCache* store_stub_cache() const { return store_stub_cache_; }
GetAndClearCurrentDeoptimizer()1210   Deoptimizer* GetAndClearCurrentDeoptimizer() {
1211     Deoptimizer* result = current_deoptimizer_;
1212     CHECK_NOT_NULL(result);
1213     current_deoptimizer_ = nullptr;
1214     return result;
1215   }
set_current_deoptimizer(Deoptimizer * deoptimizer)1216   void set_current_deoptimizer(Deoptimizer* deoptimizer) {
1217     DCHECK_NULL(current_deoptimizer_);
1218     DCHECK_NOT_NULL(deoptimizer);
1219     current_deoptimizer_ = deoptimizer;
1220   }
deoptimizer_lazy_throw()1221   bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; }
set_deoptimizer_lazy_throw(bool value)1222   void set_deoptimizer_lazy_throw(bool value) {
1223     deoptimizer_lazy_throw_ = value;
1224   }
1225   void InitializeThreadLocal();
thread_local_top()1226   ThreadLocalTop* thread_local_top() {
1227     return &isolate_data_.thread_local_top_;
1228   }
thread_local_top()1229   ThreadLocalTop const* thread_local_top() const {
1230     return &isolate_data_.thread_local_top_;
1231   }
1232 
thread_in_wasm_flag_address_offset()1233   static uint32_t thread_in_wasm_flag_address_offset() {
1234     // For WebAssembly trap handlers there is a flag in thread-local storage
1235     // which indicates that the executing thread executes WebAssembly code. To
1236     // access this flag directly from generated code, we store a pointer to the
1237     // flag in ThreadLocalTop in thread_in_wasm_flag_address_. This function
1238     // here returns the offset of that member from {isolate_root()}.
1239     return static_cast<uint32_t>(
1240         OFFSET_OF(Isolate, thread_local_top()->thread_in_wasm_flag_address_) -
1241         isolate_root_bias());
1242   }
1243 
THREAD_LOCAL_TOP_ADDRESS(Address,thread_in_wasm_flag_address)1244   THREAD_LOCAL_TOP_ADDRESS(Address, thread_in_wasm_flag_address)
1245 
1246   MaterializedObjectStore* materialized_object_store() const {
1247     return materialized_object_store_;
1248   }
1249 
descriptor_lookup_cache()1250   DescriptorLookupCache* descriptor_lookup_cache() const {
1251     return descriptor_lookup_cache_;
1252   }
1253 
handle_scope_data()1254   V8_INLINE HandleScopeData* handle_scope_data() { return &handle_scope_data_; }
1255 
handle_scope_implementer()1256   HandleScopeImplementer* handle_scope_implementer() const {
1257     DCHECK(handle_scope_implementer_);
1258     return handle_scope_implementer_;
1259   }
1260 
unicode_cache()1261   UnicodeCache* unicode_cache() const { return unicode_cache_; }
1262 
inner_pointer_to_code_cache()1263   InnerPointerToCodeCache* inner_pointer_to_code_cache() {
1264     return inner_pointer_to_code_cache_;
1265   }
1266 
global_handles()1267   GlobalHandles* global_handles() const { return global_handles_; }
1268 
eternal_handles()1269   EternalHandles* eternal_handles() const { return eternal_handles_; }
1270 
thread_manager()1271   ThreadManager* thread_manager() const { return thread_manager_; }
1272 
bigint_processor()1273   bigint::Processor* bigint_processor() { return bigint_processor_; }
1274 
1275 #ifndef V8_INTL_SUPPORT
jsregexp_uncanonicalize()1276   unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
1277     return &jsregexp_uncanonicalize_;
1278   }
1279 
jsregexp_canonrange()1280   unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
1281     return &jsregexp_canonrange_;
1282   }
1283 
1284   unibrow::Mapping<unibrow::Ecma262Canonicalize>*
regexp_macro_assembler_canonicalize()1285   regexp_macro_assembler_canonicalize() {
1286     return &regexp_macro_assembler_canonicalize_;
1287   }
1288 #endif  // !V8_INTL_SUPPORT
1289 
runtime_state()1290   RuntimeState* runtime_state() { return &runtime_state_; }
1291 
builtins()1292   Builtins* builtins() { return &builtins_; }
1293 
regexp_stack()1294   RegExpStack* regexp_stack() const { return regexp_stack_; }
1295 
total_regexp_code_generated()1296   size_t total_regexp_code_generated() const {
1297     return total_regexp_code_generated_;
1298   }
1299   void IncreaseTotalRegexpCodeGenerated(Handle<HeapObject> code);
1300 
regexp_indices()1301   std::vector<int>* regexp_indices() { return &regexp_indices_; }
1302 
debug()1303   Debug* debug() const { return debug_; }
1304 
is_profiling_address()1305   void* is_profiling_address() { return &is_profiling_; }
1306 
is_profiling()1307   bool is_profiling() const {
1308     return is_profiling_.load(std::memory_order_relaxed);
1309   }
1310 
SetIsProfiling(bool enabled)1311   void SetIsProfiling(bool enabled) {
1312     if (enabled) {
1313       CollectSourcePositionsForAllBytecodeArrays();
1314     }
1315     is_profiling_.store(enabled, std::memory_order_relaxed);
1316   }
1317 
code_event_dispatcher()1318   CodeEventDispatcher* code_event_dispatcher() const {
1319     return code_event_dispatcher_.get();
1320   }
heap_profiler()1321   HeapProfiler* heap_profiler() const { return heap_profiler_; }
1322 
1323 #ifdef DEBUG
non_disposed_isolates()1324   static size_t non_disposed_isolates() { return non_disposed_isolates_; }
1325 #endif
1326 
factory()1327   v8::internal::Factory* factory() {
1328     // Upcast to the privately inherited base-class using c-style casts to avoid
1329     // undefined behavior (as static_cast cannot cast across private bases).
1330     return (v8::internal::Factory*)this;
1331   }
1332 
1333   static const int kJSRegexpStaticOffsetsVectorSize = 128;
1334 
THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope *,external_callback_scope)1335   THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
1336 
1337   THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
1338   THREAD_LOCAL_TOP_ACCESSOR(EmbedderState*, current_embedder_state)
1339 
1340   void SetData(uint32_t slot, void* data) {
1341     DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
1342     isolate_data_.embedder_data_[slot] = data;
1343   }
GetData(uint32_t slot)1344   void* GetData(uint32_t slot) const {
1345     DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
1346     return isolate_data_.embedder_data_[slot];
1347   }
1348 
serializer_enabled()1349   bool serializer_enabled() const { return serializer_enabled_; }
1350 
enable_serializer()1351   void enable_serializer() { serializer_enabled_ = true; }
1352 
snapshot_available()1353   bool snapshot_available() const {
1354     return snapshot_blob_ != nullptr && snapshot_blob_->raw_size != 0;
1355   }
1356 
IsDead()1357   bool IsDead() const { return has_fatal_error_; }
SignalFatalError()1358   void SignalFatalError() { has_fatal_error_ = true; }
1359 
1360   bool use_optimizer();
1361 
initialized_from_snapshot()1362   bool initialized_from_snapshot() { return initialized_from_snapshot_; }
1363 
1364   bool NeedsSourcePositionsForProfiling() const;
1365 
1366   bool NeedsDetailedOptimizedCodeLineInfo() const;
1367 
is_best_effort_code_coverage()1368   bool is_best_effort_code_coverage() const {
1369     return code_coverage_mode() == debug::CoverageMode::kBestEffort;
1370   }
1371 
is_precise_count_code_coverage()1372   bool is_precise_count_code_coverage() const {
1373     return code_coverage_mode() == debug::CoverageMode::kPreciseCount;
1374   }
1375 
is_precise_binary_code_coverage()1376   bool is_precise_binary_code_coverage() const {
1377     return code_coverage_mode() == debug::CoverageMode::kPreciseBinary;
1378   }
1379 
is_block_count_code_coverage()1380   bool is_block_count_code_coverage() const {
1381     return code_coverage_mode() == debug::CoverageMode::kBlockCount;
1382   }
1383 
is_block_binary_code_coverage()1384   bool is_block_binary_code_coverage() const {
1385     return code_coverage_mode() == debug::CoverageMode::kBlockBinary;
1386   }
1387 
is_block_code_coverage()1388   bool is_block_code_coverage() const {
1389     return is_block_count_code_coverage() || is_block_binary_code_coverage();
1390   }
1391 
is_binary_code_coverage()1392   bool is_binary_code_coverage() const {
1393     return is_precise_binary_code_coverage() || is_block_binary_code_coverage();
1394   }
1395 
is_count_code_coverage()1396   bool is_count_code_coverage() const {
1397     return is_precise_count_code_coverage() || is_block_count_code_coverage();
1398   }
1399 
is_collecting_type_profile()1400   bool is_collecting_type_profile() const {
1401     return type_profile_mode() == debug::TypeProfileMode::kCollect;
1402   }
1403 
1404   // Collect feedback vectors with data for code coverage or type profile.
1405   // Reset the list, when both code coverage and type profile are not
1406   // needed anymore. This keeps many feedback vectors alive, but code
1407   // coverage or type profile are used for debugging only and increase in
1408   // memory usage is expected.
1409   void SetFeedbackVectorsForProfilingTools(Object value);
1410 
1411   void MaybeInitializeVectorListFromHeap();
1412 
time_millis_since_init()1413   double time_millis_since_init() const {
1414     return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
1415   }
1416 
date_cache()1417   DateCache* date_cache() const { return date_cache_; }
1418 
1419   void set_date_cache(DateCache* date_cache);
1420 
1421 #ifdef V8_INTL_SUPPORT
1422 
1423   const std::string& DefaultLocale();
1424 
1425   void ResetDefaultLocale();
1426 
set_default_locale(const std::string & locale)1427   void set_default_locale(const std::string& locale) {
1428     DCHECK_EQ(default_locale_.length(), 0);
1429     default_locale_ = locale;
1430   }
1431 
1432   enum class ICUObjectCacheType{
1433       kDefaultCollator, kDefaultNumberFormat, kDefaultSimpleDateFormat,
1434       kDefaultSimpleDateFormatForTime, kDefaultSimpleDateFormatForDate};
1435   static constexpr int kICUObjectCacheTypeCount = 5;
1436 
1437   icu::UMemory* get_cached_icu_object(ICUObjectCacheType cache_type,
1438                                       Handle<Object> locales);
1439   void set_icu_object_in_cache(ICUObjectCacheType cache_type,
1440                                Handle<Object> locales,
1441                                std::shared_ptr<icu::UMemory> obj);
1442   void clear_cached_icu_object(ICUObjectCacheType cache_type);
1443   void clear_cached_icu_objects();
1444 
1445 #endif  // V8_INTL_SUPPORT
1446 
1447   enum class KnownPrototype { kNone, kObject, kArray, kString };
1448 
1449   KnownPrototype IsArrayOrObjectOrStringPrototype(Object object);
1450 
1451   // On intent to set an element in object, make sure that appropriate
1452   // notifications occur if the set is on the elements of the array or
1453   // object prototype. Also ensure that changes to prototype chain between
1454   // Array and Object fire notifications.
1455   void UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object);
UpdateNoElementsProtectorOnSetLength(Handle<JSObject> object)1456   void UpdateNoElementsProtectorOnSetLength(Handle<JSObject> object) {
1457     UpdateNoElementsProtectorOnSetElement(object);
1458   }
UpdateNoElementsProtectorOnSetPrototype(Handle<JSObject> object)1459   void UpdateNoElementsProtectorOnSetPrototype(Handle<JSObject> object) {
1460     UpdateNoElementsProtectorOnSetElement(object);
1461   }
UpdateNoElementsProtectorOnNormalizeElements(Handle<JSObject> object)1462   void UpdateNoElementsProtectorOnNormalizeElements(Handle<JSObject> object) {
1463     UpdateNoElementsProtectorOnSetElement(object);
1464   }
1465 
1466   // Returns true if array is the initial array prototype in any native context.
1467   inline bool IsAnyInitialArrayPrototype(JSArray array);
1468 
1469   std::unique_ptr<PersistentHandles> NewPersistentHandles();
1470 
persistent_handles_list()1471   PersistentHandlesList* persistent_handles_list() const {
1472     return persistent_handles_list_.get();
1473   }
1474 
1475 #ifdef DEBUG
1476   bool IsDeferredHandle(Address* location);
1477 #endif  // DEBUG
1478 
baseline_batch_compiler()1479   baseline::BaselineBatchCompiler* baseline_batch_compiler() const {
1480     DCHECK_NOT_NULL(baseline_batch_compiler_);
1481     return baseline_batch_compiler_;
1482   }
1483 
1484 #ifdef V8_ENABLE_MAGLEV
maglev_concurrent_dispatcher()1485   maglev::MaglevConcurrentDispatcher* maglev_concurrent_dispatcher() {
1486     DCHECK_NOT_NULL(maglev_concurrent_dispatcher_);
1487     return maglev_concurrent_dispatcher_;
1488   }
1489 #endif  // V8_ENABLE_MAGLEV
1490 
concurrent_recompilation_enabled()1491   bool concurrent_recompilation_enabled() {
1492     // Thread is only available with flag enabled.
1493     DCHECK(optimizing_compile_dispatcher_ == nullptr ||
1494            FLAG_concurrent_recompilation);
1495     return optimizing_compile_dispatcher_ != nullptr;
1496   }
1497 
optimizing_compile_dispatcher()1498   OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
1499     DCHECK_NOT_NULL(optimizing_compile_dispatcher_);
1500     return optimizing_compile_dispatcher_;
1501   }
1502   // Flushes all pending concurrent optimzation jobs from the optimizing
1503   // compile dispatcher's queue.
1504   void AbortConcurrentOptimization(BlockingBehavior blocking_behavior);
1505 
id()1506   int id() const { return id_; }
1507 
was_locker_ever_used()1508   bool was_locker_ever_used() const {
1509     return was_locker_ever_used_.load(std::memory_order_relaxed);
1510   }
set_was_locker_ever_used()1511   void set_was_locker_ever_used() {
1512     was_locker_ever_used_.store(true, std::memory_order_relaxed);
1513   }
1514 
1515   CompilationStatistics* GetTurboStatistics();
1516   CodeTracer* GetCodeTracer();
1517 
1518   void DumpAndResetStats();
1519 
stress_deopt_count_address()1520   void* stress_deopt_count_address() { return &stress_deopt_count_; }
1521 
set_force_slow_path(bool v)1522   void set_force_slow_path(bool v) { force_slow_path_ = v; }
force_slow_path()1523   bool force_slow_path() const { return force_slow_path_; }
force_slow_path_address()1524   bool* force_slow_path_address() { return &force_slow_path_; }
1525 
debug_execution_mode_address()1526   DebugInfo::ExecutionMode* debug_execution_mode_address() {
1527     return &debug_execution_mode_;
1528   }
1529 
1530   base::RandomNumberGenerator* random_number_generator();
1531 
1532   base::RandomNumberGenerator* fuzzer_rng();
1533 
1534   // Generates a random number that is non-zero when masked
1535   // with the provided mask.
1536   int GenerateIdentityHash(uint32_t mask);
1537 
1538   // Given an address occupied by a live code object, return that object.
1539   Code FindCodeObject(Address a);
1540 
NextOptimizationId()1541   int NextOptimizationId() {
1542     int id = next_optimization_id_++;
1543     if (!Smi::IsValid(next_optimization_id_)) {
1544       next_optimization_id_ = 0;
1545     }
1546     return id;
1547   }
1548 
1549   // https://github.com/tc39/proposal-top-level-await/pull/159
1550   // TODO(syg): Update to actual spec link once merged.
1551   //
1552   // According to the spec, modules that depend on async modules (i.e. modules
1553   // with top-level await) must be evaluated in order in which their
1554   // [[AsyncEvaluating]] flags were set to true. V8 tracks this global total
1555   // order with next_module_async_evaluating_ordinal_. Each module that sets its
1556   // [[AsyncEvaluating]] to true grabs the next ordinal.
NextModuleAsyncEvaluatingOrdinal()1557   unsigned NextModuleAsyncEvaluatingOrdinal() {
1558     unsigned ordinal = next_module_async_evaluating_ordinal_++;
1559     CHECK_LT(ordinal, kMaxModuleAsyncEvaluatingOrdinal);
1560     return ordinal;
1561   }
1562 
1563   inline void DidFinishModuleAsyncEvaluation(unsigned ordinal);
1564 
1565   void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, void* data);
1566   void RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
1567                                    size_t heap_limit);
1568   void AddCallCompletedCallback(CallCompletedCallback callback);
1569   void RemoveCallCompletedCallback(CallCompletedCallback callback);
FireCallCompletedCallback(MicrotaskQueue * microtask_queue)1570   void FireCallCompletedCallback(MicrotaskQueue* microtask_queue) {
1571     if (!thread_local_top()->CallDepthIsZero()) return;
1572     FireCallCompletedCallbackInternal(microtask_queue);
1573   }
1574 
1575   void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
1576   void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
1577   inline void FireBeforeCallEnteredCallback();
1578 
1579   void SetPromiseRejectCallback(PromiseRejectCallback callback);
1580   void ReportPromiseReject(Handle<JSPromise> promise, Handle<Object> value,
1581                            v8::PromiseRejectEvent event);
1582 
1583   void SetTerminationOnExternalTryCatch();
1584 
1585   Handle<Symbol> SymbolFor(RootIndex dictionary_index, Handle<String> name,
1586                            bool private_symbol);
1587 
1588   void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
1589   void CountUsage(v8::Isolate::UseCounterFeature feature);
1590   void CountUsage(v8::Isolate::UseCounterFeature feature, int count);
1591 
1592   static std::string GetTurboCfgFileName(Isolate* isolate);
1593 
1594   int GetNextScriptId();
1595 
1596 #if V8_SFI_HAS_UNIQUE_ID
GetNextUniqueSharedFunctionInfoId()1597   int GetNextUniqueSharedFunctionInfoId() {
1598     int current_id = next_unique_sfi_id_.load(std::memory_order_relaxed);
1599     int next_id;
1600     do {
1601       if (current_id >= Smi::kMaxValue) {
1602         next_id = 0;
1603       } else {
1604         next_id = current_id + 1;
1605       }
1606     } while (!next_unique_sfi_id_.compare_exchange_weak(
1607         current_id, next_id, std::memory_order_relaxed));
1608     return current_id;
1609   }
1610 #endif
1611 
1612 #ifdef V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
SetHasContextPromiseHooks(bool context_promise_hook)1613   void SetHasContextPromiseHooks(bool context_promise_hook) {
1614     promise_hook_flags_ = PromiseHookFields::HasContextPromiseHook::update(
1615         promise_hook_flags_, context_promise_hook);
1616     PromiseHookStateUpdated();
1617   }
1618 #endif  // V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
1619 
HasContextPromiseHooks()1620   bool HasContextPromiseHooks() const {
1621     return PromiseHookFields::HasContextPromiseHook::decode(
1622         promise_hook_flags_);
1623   }
1624 
promise_hook_flags_address()1625   Address promise_hook_flags_address() {
1626     return reinterpret_cast<Address>(&promise_hook_flags_);
1627   }
1628 
promise_hook_address()1629   Address promise_hook_address() {
1630     return reinterpret_cast<Address>(&promise_hook_);
1631   }
1632 
async_event_delegate_address()1633   Address async_event_delegate_address() {
1634     return reinterpret_cast<Address>(&async_event_delegate_);
1635   }
1636 
javascript_execution_assert_address()1637   Address javascript_execution_assert_address() {
1638     return reinterpret_cast<Address>(&javascript_execution_assert_);
1639   }
1640 
handle_scope_implementer_address()1641   Address handle_scope_implementer_address() {
1642     return reinterpret_cast<Address>(&handle_scope_implementer_);
1643   }
1644 
1645   void SetAtomicsWaitCallback(v8::Isolate::AtomicsWaitCallback callback,
1646                               void* data);
1647   void RunAtomicsWaitCallback(v8::Isolate::AtomicsWaitEvent event,
1648                               Handle<JSArrayBuffer> array_buffer,
1649                               size_t offset_in_bytes, int64_t value,
1650                               double timeout_in_ms,
1651                               AtomicsWaitWakeHandle* stop_handle);
1652 
1653   void SetPromiseHook(PromiseHook hook);
1654   void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
1655                       Handle<Object> parent);
1656   void RunAllPromiseHooks(PromiseHookType type, Handle<JSPromise> promise,
1657                           Handle<Object> parent);
1658   void UpdatePromiseHookProtector();
1659   void PromiseHookStateUpdated();
1660 
1661   void AddDetachedContext(Handle<Context> context);
1662   void CheckDetachedContextsAfterGC();
1663 
1664   // Detach the environment from its outer global object.
1665   void DetachGlobal(Handle<Context> env);
1666 
startup_object_cache()1667   std::vector<Object>* startup_object_cache() { return &startup_object_cache_; }
1668 
1669   // When there is a shared space (i.e. when this is a client Isolate), the
1670   // shared heap object cache holds objects in shared among Isolates. Otherwise
1671   // this object cache is per-Isolate like the startup object cache.
shared_heap_object_cache()1672   std::vector<Object>* shared_heap_object_cache() {
1673     if (shared_isolate()) return shared_isolate()->shared_heap_object_cache();
1674     return &shared_heap_object_cache_;
1675   }
1676 
IsGeneratingEmbeddedBuiltins()1677   bool IsGeneratingEmbeddedBuiltins() const {
1678     return builtins_constants_table_builder() != nullptr;
1679   }
1680 
builtins_constants_table_builder()1681   BuiltinsConstantsTableBuilder* builtins_constants_table_builder() const {
1682     return builtins_constants_table_builder_;
1683   }
1684 
1685   // Hashes bits of the Isolate that are relevant for embedded builtins. In
1686   // particular, the embedded blob requires builtin Code object layout and the
1687   // builtins constants table to remain unchanged from build-time.
1688   size_t HashIsolateForEmbeddedBlob();
1689 
1690   static const uint8_t* CurrentEmbeddedBlobCode();
1691   static uint32_t CurrentEmbeddedBlobCodeSize();
1692   static const uint8_t* CurrentEmbeddedBlobData();
1693   static uint32_t CurrentEmbeddedBlobDataSize();
1694   static bool CurrentEmbeddedBlobIsBinaryEmbedded();
1695 
1696   // These always return the same result as static methods above, but don't
1697   // access the global atomic variable (and thus *might be* slightly faster).
1698   const uint8_t* embedded_blob_code() const;
1699   uint32_t embedded_blob_code_size() const;
1700   const uint8_t* embedded_blob_data() const;
1701   uint32_t embedded_blob_data_size() const;
1702 
1703   // Returns true if short bultin calls optimization is enabled for the Isolate.
is_short_builtin_calls_enabled()1704   bool is_short_builtin_calls_enabled() const {
1705     return V8_SHORT_BUILTIN_CALLS_BOOL && is_short_builtin_calls_enabled_;
1706   }
1707 
1708   // Returns a region from which it's possible to make pc-relative (short)
1709   // calls/jumps to embedded builtins or empty region if there's no embedded
1710   // blob or if pc-relative calls are not supported.
1711   static base::AddressRegion GetShortBuiltinsCallRegion();
1712 
set_array_buffer_allocator(v8::ArrayBuffer::Allocator * allocator)1713   void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
1714     array_buffer_allocator_ = allocator;
1715   }
array_buffer_allocator()1716   v8::ArrayBuffer::Allocator* array_buffer_allocator() const {
1717     return array_buffer_allocator_;
1718   }
1719 
set_array_buffer_allocator_shared(std::shared_ptr<v8::ArrayBuffer::Allocator> allocator)1720   void set_array_buffer_allocator_shared(
1721       std::shared_ptr<v8::ArrayBuffer::Allocator> allocator) {
1722     array_buffer_allocator_shared_ = std::move(allocator);
1723   }
array_buffer_allocator_shared()1724   std::shared_ptr<v8::ArrayBuffer::Allocator> array_buffer_allocator_shared()
1725       const {
1726     return array_buffer_allocator_shared_;
1727   }
1728 
futex_wait_list_node()1729   FutexWaitListNode* futex_wait_list_node() { return &futex_wait_list_node_; }
1730 
cancelable_task_manager()1731   CancelableTaskManager* cancelable_task_manager() {
1732     return cancelable_task_manager_;
1733   }
1734 
ast_string_constants()1735   const AstStringConstants* ast_string_constants() const {
1736     return ast_string_constants_;
1737   }
1738 
interpreter()1739   interpreter::Interpreter* interpreter() const { return interpreter_; }
1740 
compiler_cache()1741   compiler::PerIsolateCompilerCache* compiler_cache() const {
1742     return compiler_cache_;
1743   }
set_compiler_utils(compiler::PerIsolateCompilerCache * cache,Zone * zone)1744   void set_compiler_utils(compiler::PerIsolateCompilerCache* cache,
1745                           Zone* zone) {
1746     compiler_cache_ = cache;
1747     compiler_zone_ = zone;
1748   }
1749 
allocator()1750   AccountingAllocator* allocator() { return allocator_; }
1751 
lazy_compile_dispatcher()1752   LazyCompileDispatcher* lazy_compile_dispatcher() const {
1753     return lazy_compile_dispatcher_.get();
1754   }
1755 
1756   bool IsInAnyContext(Object object, uint32_t index);
1757 
1758   void ClearKeptObjects();
1759 
1760   void SetHostImportModuleDynamicallyCallback(
1761       HostImportModuleDynamicallyWithImportAssertionsCallback callback);
1762   void SetHostImportModuleDynamicallyCallback(
1763       HostImportModuleDynamicallyCallback callback);
1764   MaybeHandle<JSPromise> RunHostImportModuleDynamicallyCallback(
1765       Handle<Script> referrer, Handle<Object> specifier,
1766       MaybeHandle<Object> maybe_import_assertions_argument);
1767 
1768   void SetHostInitializeImportMetaObjectCallback(
1769       HostInitializeImportMetaObjectCallback callback);
1770   MaybeHandle<JSObject> RunHostInitializeImportMetaObjectCallback(
1771       Handle<SourceTextModule> module);
1772 
1773   void SetHostCreateShadowRealmContextCallback(
1774       HostCreateShadowRealmContextCallback callback);
1775   MaybeHandle<NativeContext> RunHostCreateShadowRealmContextCallback();
1776 
RegisterEmbeddedFileWriter(EmbeddedFileWriterInterface * writer)1777   void RegisterEmbeddedFileWriter(EmbeddedFileWriterInterface* writer) {
1778     embedded_file_writer_ = writer;
1779   }
1780 
1781   int LookupOrAddExternallyCompiledFilename(const char* filename);
1782   const char* GetExternallyCompiledFilename(int index) const;
1783   int GetExternallyCompiledFilenameCount() const;
1784   // PrepareBuiltinSourcePositionMap is necessary in order to preserve the
1785   // builtin source positions before the corresponding code objects are
1786   // replaced with trampolines. Those source positions are used to
1787   // annotate the builtin blob with debugging information.
1788   void PrepareBuiltinSourcePositionMap();
1789 
1790   // Store the position of the labels that will be used in the list of allowed
1791   // return addresses.
1792   void PrepareBuiltinLabelInfoMap();
1793 
1794 #if defined(V8_OS_WIN64)
1795   void SetBuiltinUnwindData(
1796       Builtin builtin,
1797       const win64_unwindinfo::BuiltinUnwindInfo& unwinding_info);
1798 #endif  // V8_OS_WIN64
1799 
1800   void SetPrepareStackTraceCallback(PrepareStackTraceCallback callback);
1801   MaybeHandle<Object> RunPrepareStackTraceCallback(Handle<Context>,
1802                                                    Handle<JSObject> Error,
1803                                                    Handle<JSArray> sites);
1804   bool HasPrepareStackTraceCallback() const;
1805 
1806   void SetAddCrashKeyCallback(AddCrashKeyCallback callback);
AddCrashKey(CrashKeyId id,const std::string & value)1807   void AddCrashKey(CrashKeyId id, const std::string& value) {
1808     if (add_crash_key_callback_) {
1809       add_crash_key_callback_(id, value);
1810     }
1811   }
1812 
1813   void SetRAILMode(RAILMode rail_mode);
1814 
rail_mode()1815   RAILMode rail_mode() { return rail_mode_.load(); }
1816 
set_code_coverage_mode(debug::CoverageMode coverage_mode)1817   void set_code_coverage_mode(debug::CoverageMode coverage_mode) {
1818     code_coverage_mode_.store(coverage_mode, std::memory_order_relaxed);
1819   }
code_coverage_mode()1820   debug::CoverageMode code_coverage_mode() const {
1821     return code_coverage_mode_.load(std::memory_order_relaxed);
1822   }
1823 
1824   double LoadStartTimeMs();
1825 
1826   void UpdateLoadStartTime();
1827 
1828   void IsolateInForegroundNotification();
1829 
1830   void IsolateInBackgroundNotification();
1831 
IsIsolateInBackground()1832   bool IsIsolateInBackground() { return is_isolate_in_background_; }
1833 
EnableMemorySavingsMode()1834   void EnableMemorySavingsMode() { memory_savings_mode_active_ = true; }
1835 
DisableMemorySavingsMode()1836   void DisableMemorySavingsMode() { memory_savings_mode_active_ = false; }
1837 
IsMemorySavingsModeActive()1838   bool IsMemorySavingsModeActive() { return memory_savings_mode_active_; }
1839 
1840   PRINTF_FORMAT(2, 3) void PrintWithTimestamp(const char* format, ...);
1841 
set_allow_atomics_wait(bool set)1842   void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
allow_atomics_wait()1843   bool allow_atomics_wait() { return allow_atomics_wait_; }
1844 
1845   // Register a finalizer to be called at isolate teardown.
1846   void RegisterManagedPtrDestructor(ManagedPtrDestructor* finalizer);
1847 
1848   // Removes a previously-registered shared object finalizer.
1849   void UnregisterManagedPtrDestructor(ManagedPtrDestructor* finalizer);
1850 
elements_deletion_counter()1851   size_t elements_deletion_counter() { return elements_deletion_counter_; }
set_elements_deletion_counter(size_t value)1852   void set_elements_deletion_counter(size_t value) {
1853     elements_deletion_counter_ = value;
1854   }
1855 
1856 #if V8_ENABLE_WEBASSEMBLY
1857   void AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object);
1858 #endif  // V8_ENABLE_WEBASSEMBLY
1859 
top_backup_incumbent_scope()1860   const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope() const {
1861     return top_backup_incumbent_scope_;
1862   }
set_top_backup_incumbent_scope(const v8::Context::BackupIncumbentScope * top_backup_incumbent_scope)1863   void set_top_backup_incumbent_scope(
1864       const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope) {
1865     top_backup_incumbent_scope_ = top_backup_incumbent_scope;
1866   }
1867 
1868   void SetIdle(bool is_idle);
1869 
1870   // Changing various modes can cause differences in generated bytecode which
1871   // interferes with lazy source positions, so this should be called immediately
1872   // before such a mode change to ensure that this cannot happen.
1873   void CollectSourcePositionsForAllBytecodeArrays();
1874 
1875   void AddCodeMemoryChunk(MemoryChunk* chunk);
1876   void RemoveCodeMemoryChunk(MemoryChunk* chunk);
1877   void AddCodeRange(Address begin, size_t length_in_bytes);
1878 
1879   bool RequiresCodeRange() const;
1880 
1881   static Address load_from_stack_count_address(const char* function_name);
1882   static Address store_to_stack_count_address(const char* function_name);
1883 
1884   v8::metrics::Recorder::ContextId GetOrRegisterRecorderContextId(
1885       Handle<NativeContext> context);
1886   MaybeLocal<v8::Context> GetContextFromRecorderContextId(
1887       v8::metrics::Recorder::ContextId id);
1888 
1889   void UpdateLongTaskStats();
1890   v8::metrics::LongTaskStats* GetCurrentLongTaskStats();
1891 
main_thread_local_isolate()1892   LocalIsolate* main_thread_local_isolate() {
1893     return main_thread_local_isolate_.get();
1894   }
1895 
AsIsolate()1896   Isolate* AsIsolate() { return this; }
AsLocalIsolate()1897   LocalIsolate* AsLocalIsolate() { return main_thread_local_isolate(); }
1898 
1899   LocalHeap* main_thread_local_heap();
1900   LocalHeap* CurrentLocalHeap();
1901 
1902 #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
external_pointer_table()1903   ExternalPointerTable& external_pointer_table() {
1904     return isolate_data_.external_pointer_table_;
1905   }
1906 
external_pointer_table()1907   const ExternalPointerTable& external_pointer_table() const {
1908     return isolate_data_.external_pointer_table_;
1909   }
1910 
external_pointer_table_address()1911   Address external_pointer_table_address() {
1912     return reinterpret_cast<Address>(&isolate_data_.external_pointer_table_);
1913   }
1914 #endif
1915 
1916   struct PromiseHookFields {
1917     using HasContextPromiseHook = base::BitField<bool, 0, 1>;
1918     using HasIsolatePromiseHook = HasContextPromiseHook::Next<bool, 1>;
1919     using HasAsyncEventDelegate = HasIsolatePromiseHook::Next<bool, 1>;
1920     using IsDebugActive = HasAsyncEventDelegate::Next<bool, 1>;
1921   };
1922 
is_shared()1923   bool is_shared() const { return is_shared_; }
shared_isolate()1924   Isolate* shared_isolate() const {
1925     DCHECK(attached_to_shared_isolate_);
1926     return shared_isolate_;
1927   }
1928 
set_shared_isolate(Isolate * shared_isolate)1929   void set_shared_isolate(Isolate* shared_isolate) {
1930     DCHECK(shared_isolate->is_shared());
1931     DCHECK_NULL(shared_isolate_);
1932     DCHECK(!attached_to_shared_isolate_);
1933     shared_isolate_ = shared_isolate;
1934   }
1935 
global_safepoint()1936   GlobalSafepoint* global_safepoint() const { return global_safepoint_.get(); }
1937 
OwnsStringTable()1938   bool OwnsStringTable() { return !FLAG_shared_string_table || is_shared(); }
1939 
1940 #if USE_SIMULATOR
simulator_data()1941   SimulatorData* simulator_data() { return simulator_data_; }
1942 #endif
1943 
1944 #ifdef V8_ENABLE_WEBASSEMBLY
wasm_stacks()1945   wasm::StackMemory*& wasm_stacks() { return wasm_stacks_; }
1946 #endif
1947 
1948  private:
1949   explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator,
1950                    bool is_shared);
1951   ~Isolate();
1952 
1953   bool Init(SnapshotData* startup_snapshot_data,
1954             SnapshotData* read_only_snapshot_data,
1955             SnapshotData* shared_heap_snapshot_data, bool can_rehash);
1956 
1957   void CheckIsolateLayout();
1958 
1959   void InitializeCodeRanges();
1960   void AddCodeMemoryRange(MemoryRange range);
1961 
1962   // Common method to create an Isolate used by Isolate::New() and
1963   // Isolate::NewShared().
1964   static Isolate* Allocate(bool is_shared);
1965 
1966   static void RemoveContextIdCallback(const v8::WeakCallbackInfo<void>& data);
1967 
1968   void FireCallCompletedCallbackInternal(MicrotaskQueue* microtask_queue);
1969 
1970   class ThreadDataTable {
1971    public:
1972     ThreadDataTable() = default;
1973 
1974     PerIsolateThreadData* Lookup(ThreadId thread_id);
1975     void Insert(PerIsolateThreadData* data);
1976     void Remove(PerIsolateThreadData* data);
1977     void RemoveAllThreads();
1978 
1979    private:
1980     struct Hasher {
operatorHasher1981       std::size_t operator()(const ThreadId& t) const {
1982         return std::hash<int>()(t.ToInteger());
1983       }
1984     };
1985 
1986     std::unordered_map<ThreadId, PerIsolateThreadData*, Hasher> table_;
1987   };
1988 
1989   // These items form a stack synchronously with threads Enter'ing and Exit'ing
1990   // the Isolate. The top of the stack points to a thread which is currently
1991   // running the Isolate. When the stack is empty, the Isolate is considered
1992   // not entered by any thread and can be Disposed.
1993   // If the same thread enters the Isolate more than once, the entry_count_
1994   // is incremented rather then a new item pushed to the stack.
1995   class EntryStackItem {
1996    public:
EntryStackItem(PerIsolateThreadData * previous_thread_data,Isolate * previous_isolate,EntryStackItem * previous_item)1997     EntryStackItem(PerIsolateThreadData* previous_thread_data,
1998                    Isolate* previous_isolate, EntryStackItem* previous_item)
1999         : entry_count(1),
2000           previous_thread_data(previous_thread_data),
2001           previous_isolate(previous_isolate),
2002           previous_item(previous_item) {}
2003     EntryStackItem(const EntryStackItem&) = delete;
2004     EntryStackItem& operator=(const EntryStackItem&) = delete;
2005 
2006     int entry_count;
2007     PerIsolateThreadData* previous_thread_data;
2008     Isolate* previous_isolate;
2009     EntryStackItem* previous_item;
2010   };
2011 
2012   static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
2013   static base::Thread::LocalStorageKey isolate_key_;
2014   static std::atomic<bool> isolate_key_created_;
2015 
2016   void Deinit();
2017 
2018   static void SetIsolateThreadLocals(Isolate* isolate,
2019                                      PerIsolateThreadData* data);
2020 
2021   void MarkCompactPrologue(bool is_compacting,
2022                            ThreadLocalTop* archived_thread_data);
2023   void MarkCompactEpilogue(bool is_compacting,
2024                            ThreadLocalTop* archived_thread_data);
2025 
2026   void FillCache();
2027 
2028   // Propagate pending exception message to the v8::TryCatch.
2029   // If there is no external try-catch or message was successfully propagated,
2030   // then return true.
2031   bool PropagatePendingExceptionToExternalTryCatch(
2032       ExceptionHandlerType top_handler);
2033 
HasIsolatePromiseHooks()2034   bool HasIsolatePromiseHooks() const {
2035     return PromiseHookFields::HasIsolatePromiseHook::decode(
2036         promise_hook_flags_);
2037   }
2038 
HasAsyncEventDelegate()2039   bool HasAsyncEventDelegate() const {
2040     return PromiseHookFields::HasAsyncEventDelegate::decode(
2041         promise_hook_flags_);
2042   }
2043 
RAILModeName(RAILMode rail_mode)2044   const char* RAILModeName(RAILMode rail_mode) const {
2045     switch (rail_mode) {
2046       case PERFORMANCE_RESPONSE:
2047         return "RESPONSE";
2048       case PERFORMANCE_ANIMATION:
2049         return "ANIMATION";
2050       case PERFORMANCE_IDLE:
2051         return "IDLE";
2052       case PERFORMANCE_LOAD:
2053         return "LOAD";
2054     }
2055     return "";
2056   }
2057 
2058   void AddCrashKeysForIsolateAndHeapPointers();
2059 
2060   // Returns the Exception sentinel.
2061   Object ThrowInternal(Object exception, MessageLocation* location);
2062 
2063   // These methods add/remove the isolate to/from the list of clients in the
2064   // shared isolate. Isolates in the client list need to participate in a global
2065   // safepoint.
2066   void AttachToSharedIsolate();
2067   void DetachFromSharedIsolate();
2068 
2069   // This class contains a collection of data accessible from both C++ runtime
2070   // and compiled code (including assembly stubs, builtins, interpreter bytecode
2071   // handlers and optimized code).
2072   IsolateData isolate_data_;
2073 
2074   // Set to true if this isolate is used as shared heap. This field must be set
2075   // before Heap is constructed, as Heap's constructor consults it.
2076   const bool is_shared_;
2077 
2078   std::unique_ptr<IsolateAllocator> isolate_allocator_;
2079   Heap heap_;
2080   ReadOnlyHeap* read_only_heap_ = nullptr;
2081   std::shared_ptr<ReadOnlyArtifacts> artifacts_;
2082   std::shared_ptr<StringTable> string_table_;
2083 
2084   const int id_;
2085   EntryStackItem* entry_stack_ = nullptr;
2086   int stack_trace_nesting_level_ = 0;
2087   std::atomic<bool> was_locker_ever_used_{false};
2088   StringStream* incomplete_message_ = nullptr;
2089   Address isolate_addresses_[kIsolateAddressCount + 1] = {};
2090   Bootstrapper* bootstrapper_ = nullptr;
2091   TieringManager* tiering_manager_ = nullptr;
2092   CompilationCache* compilation_cache_ = nullptr;
2093   std::shared_ptr<Counters> async_counters_;
2094   base::RecursiveMutex break_access_;
2095   base::SharedMutex feedback_vector_access_;
2096   base::SharedMutex internalized_string_access_;
2097   base::SharedMutex full_transition_array_access_;
2098   base::SharedMutex shared_function_info_access_;
2099   base::SharedMutex map_updater_access_;
2100   base::SharedMutex boilerplate_migration_access_;
2101   Logger* logger_ = nullptr;
2102   StubCache* load_stub_cache_ = nullptr;
2103   StubCache* store_stub_cache_ = nullptr;
2104   Deoptimizer* current_deoptimizer_ = nullptr;
2105   bool deoptimizer_lazy_throw_ = false;
2106   MaterializedObjectStore* materialized_object_store_ = nullptr;
2107   bool capture_stack_trace_for_uncaught_exceptions_ = false;
2108   int stack_trace_for_uncaught_exceptions_frame_limit_ = 0;
2109   StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_ =
2110       StackTrace::kOverview;
2111   DescriptorLookupCache* descriptor_lookup_cache_ = nullptr;
2112   HandleScopeData handle_scope_data_;
2113   HandleScopeImplementer* handle_scope_implementer_ = nullptr;
2114   UnicodeCache* unicode_cache_ = nullptr;
2115   AccountingAllocator* allocator_ = nullptr;
2116   InnerPointerToCodeCache* inner_pointer_to_code_cache_ = nullptr;
2117   GlobalHandles* global_handles_ = nullptr;
2118   EternalHandles* eternal_handles_ = nullptr;
2119   ThreadManager* thread_manager_ = nullptr;
2120   bigint::Processor* bigint_processor_ = nullptr;
2121   RuntimeState runtime_state_;
2122   Builtins builtins_;
2123   SetupIsolateDelegate* setup_delegate_ = nullptr;
2124 #if defined(DEBUG) || defined(VERIFY_HEAP)
2125   std::atomic<int> num_active_deserializers_;
2126 #endif
2127 #ifndef V8_INTL_SUPPORT
2128   unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
2129   unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
2130   unibrow::Mapping<unibrow::Ecma262Canonicalize>
2131       regexp_macro_assembler_canonicalize_;
2132 #endif  // !V8_INTL_SUPPORT
2133   RegExpStack* regexp_stack_ = nullptr;
2134   std::vector<int> regexp_indices_;
2135   DateCache* date_cache_ = nullptr;
2136   base::RandomNumberGenerator* random_number_generator_ = nullptr;
2137   base::RandomNumberGenerator* fuzzer_rng_ = nullptr;
2138   std::atomic<RAILMode> rail_mode_;
2139   v8::Isolate::AtomicsWaitCallback atomics_wait_callback_ = nullptr;
2140   void* atomics_wait_callback_data_ = nullptr;
2141   PromiseHook promise_hook_ = nullptr;
2142   HostImportModuleDynamicallyCallback host_import_module_dynamically_callback_ =
2143       nullptr;
2144   HostImportModuleDynamicallyWithImportAssertionsCallback
2145       host_import_module_dynamically_with_import_assertions_callback_ = nullptr;
2146   std::atomic<debug::CoverageMode> code_coverage_mode_{
2147       debug::CoverageMode::kBestEffort};
2148 
2149   // Helper function for RunHostImportModuleDynamicallyCallback.
2150   // Unpacks import assertions, if present, from the second argument to dynamic
2151   // import() and returns them in a FixedArray, sorted by code point order of
2152   // the keys, in the form [key1, value1, key2, value2, ...]. Returns an empty
2153   // MaybeHandle if an error was thrown.  In this case, the host callback should
2154   // not be called and instead the caller should use the pending exception to
2155   // reject the import() call's Promise.
2156   MaybeHandle<FixedArray> GetImportAssertionsFromArgument(
2157       MaybeHandle<Object> maybe_import_assertions_argument);
2158 
2159   HostInitializeImportMetaObjectCallback
2160       host_initialize_import_meta_object_callback_ = nullptr;
2161   HostCreateShadowRealmContextCallback
2162       host_create_shadow_realm_context_callback_ = nullptr;
2163 
2164   base::Mutex rail_mutex_;
2165   double load_start_time_ms_ = 0;
2166 
2167 #ifdef V8_INTL_SUPPORT
2168   std::string default_locale_;
2169 
2170   // The cache stores the most recently accessed {locales,obj} pair for each
2171   // cache type.
2172   struct ICUObjectCacheEntry {
2173     std::string locales;
2174     std::shared_ptr<icu::UMemory> obj;
2175 
2176     ICUObjectCacheEntry() = default;
ICUObjectCacheEntryICUObjectCacheEntry2177     ICUObjectCacheEntry(std::string locales, std::shared_ptr<icu::UMemory> obj)
2178         : locales(locales), obj(std::move(obj)) {}
2179   };
2180 
2181   ICUObjectCacheEntry icu_object_cache_[kICUObjectCacheTypeCount];
2182 #endif  // V8_INTL_SUPPORT
2183 
2184   // true if being profiled. Causes collection of extra compile info.
2185   std::atomic<bool> is_profiling_{false};
2186 
2187   // Whether the isolate has been created for snapshotting.
2188   bool serializer_enabled_ = false;
2189 
2190   // True if fatal error has been signaled for this isolate.
2191   bool has_fatal_error_ = false;
2192 
2193   // True if this isolate was initialized from a snapshot.
2194   bool initialized_from_snapshot_ = false;
2195 
2196   // True if short bultin calls optimization is enabled.
2197   bool is_short_builtin_calls_enabled_ = false;
2198 
2199   // True if the isolate is in background. This flag is used
2200   // to prioritize between memory usage and latency.
2201   bool is_isolate_in_background_ = false;
2202 
2203   // True if the isolate is in memory savings mode. This flag is used to
2204   // favor memory over runtime performance.
2205   bool memory_savings_mode_active_ = false;
2206 
2207 #ifdef V8_EXTERNAL_CODE_SPACE
2208   // Base address of the pointer compression cage containing external code
2209   // space, when external code space is enabled.
2210   Address code_cage_base_ = 0;
2211 #endif
2212 
2213   // Time stamp at initialization.
2214   double time_millis_at_init_ = 0;
2215 
2216 #ifdef DEBUG
2217   static std::atomic<size_t> non_disposed_isolates_;
2218 
2219   JSObject::SpillInformation js_spill_information_;
2220 #endif
2221 
2222   Debug* debug_ = nullptr;
2223   HeapProfiler* heap_profiler_ = nullptr;
2224   std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_;
2225 
2226   const AstStringConstants* ast_string_constants_ = nullptr;
2227 
2228   interpreter::Interpreter* interpreter_ = nullptr;
2229 
2230   compiler::PerIsolateCompilerCache* compiler_cache_ = nullptr;
2231   // The following zone is for compiler-related objects that should live
2232   // through all compilations (and thus all JSHeapBroker instances).
2233   Zone* compiler_zone_ = nullptr;
2234 
2235   std::unique_ptr<LazyCompileDispatcher> lazy_compile_dispatcher_;
2236   baseline::BaselineBatchCompiler* baseline_batch_compiler_ = nullptr;
2237 #ifdef V8_ENABLE_MAGLEV
2238   maglev::MaglevConcurrentDispatcher* maglev_concurrent_dispatcher_ = nullptr;
2239 #endif  // V8_ENABLE_MAGLEV
2240 
2241   using InterruptEntry = std::pair<InterruptCallback, void*>;
2242   std::queue<InterruptEntry> api_interrupts_queue_;
2243 
2244 #define GLOBAL_BACKING_STORE(type, name, initialvalue) type name##_;
2245   ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
2246 #undef GLOBAL_BACKING_STORE
2247 
2248 #define GLOBAL_ARRAY_BACKING_STORE(type, name, length) type name##_[length];
2249   ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
2250 #undef GLOBAL_ARRAY_BACKING_STORE
2251 
2252 #ifdef DEBUG
2253   // This class is huge and has a number of fields controlled by
2254   // preprocessor defines. Make sure the offsets of these fields agree
2255   // between compilation units.
2256 #define ISOLATE_FIELD_OFFSET(type, name, ignored) \
2257   static const intptr_t name##_debug_offset_;
2258   ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
2259   ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
2260 #undef ISOLATE_FIELD_OFFSET
2261 #endif
2262 
2263   bool detailed_source_positions_for_profiling_;
2264 
2265   OptimizingCompileDispatcher* optimizing_compile_dispatcher_ = nullptr;
2266 
2267   std::unique_ptr<PersistentHandlesList> persistent_handles_list_;
2268 
2269   // Counts deopt points if deopt_every_n_times is enabled.
2270   unsigned int stress_deopt_count_ = 0;
2271 
2272   bool force_slow_path_ = false;
2273 
2274   bool initialized_ = false;
2275   bool jitless_ = false;
2276 
2277   int next_optimization_id_ = 0;
2278 
2279 #if V8_SFI_HAS_UNIQUE_ID
2280   std::atomic<int> next_unique_sfi_id_;
2281 #endif
2282 
2283   unsigned next_module_async_evaluating_ordinal_;
2284 
2285   // Vector of callbacks before a Call starts execution.
2286   std::vector<BeforeCallEnteredCallback> before_call_entered_callbacks_;
2287 
2288   // Vector of callbacks when a Call completes.
2289   std::vector<CallCompletedCallback> call_completed_callbacks_;
2290 
2291   v8::Isolate::UseCounterCallback use_counter_callback_ = nullptr;
2292 
2293   std::shared_ptr<metrics::Recorder> metrics_recorder_;
2294   uintptr_t last_recorder_context_id_ = 0;
2295   std::unordered_map<
2296       uintptr_t,
2297       Persistent<v8::Context, v8::CopyablePersistentTraits<v8::Context>>>
2298       recorder_context_id_map_;
2299 
2300   size_t last_long_task_stats_counter_ = 0;
2301   v8::metrics::LongTaskStats long_task_stats_;
2302 
2303   std::vector<Object> startup_object_cache_;
2304 
2305   // When sharing data among Isolates (e.g. FLAG_shared_string_table), only the
2306   // shared Isolate populates this and client Isolates reference that copy.
2307   //
2308   // Otherwise this is populated for all Isolates.
2309   std::vector<Object> shared_heap_object_cache_;
2310 
2311   // Used during builtins compilation to build the builtins constants table,
2312   // which is stored on the root list prior to serialization.
2313   BuiltinsConstantsTableBuilder* builtins_constants_table_builder_ = nullptr;
2314 
2315   void InitializeDefaultEmbeddedBlob();
2316   void CreateAndSetEmbeddedBlob();
2317   void MaybeRemapEmbeddedBuiltinsIntoCodeRange();
2318   void TearDownEmbeddedBlob();
2319   void SetEmbeddedBlob(const uint8_t* code, uint32_t code_size,
2320                        const uint8_t* data, uint32_t data_size);
2321   void ClearEmbeddedBlob();
2322 
2323   const uint8_t* embedded_blob_code_ = nullptr;
2324   uint32_t embedded_blob_code_size_ = 0;
2325   const uint8_t* embedded_blob_data_ = nullptr;
2326   uint32_t embedded_blob_data_size_ = 0;
2327 
2328   v8::ArrayBuffer::Allocator* array_buffer_allocator_ = nullptr;
2329   std::shared_ptr<v8::ArrayBuffer::Allocator> array_buffer_allocator_shared_;
2330 
2331   FutexWaitListNode futex_wait_list_node_;
2332 
2333   CancelableTaskManager* cancelable_task_manager_ = nullptr;
2334 
2335   debug::ConsoleDelegate* console_delegate_ = nullptr;
2336 
2337   debug::AsyncEventDelegate* async_event_delegate_ = nullptr;
2338   uint32_t promise_hook_flags_ = 0;
2339   int async_task_count_ = 0;
2340 
2341   std::unique_ptr<LocalIsolate> main_thread_local_isolate_;
2342 
2343   v8::Isolate::AbortOnUncaughtExceptionCallback
2344       abort_on_uncaught_exception_callback_ = nullptr;
2345 
2346   bool allow_atomics_wait_ = true;
2347 
2348   base::Mutex managed_ptr_destructors_mutex_;
2349   ManagedPtrDestructor* managed_ptr_destructors_head_ = nullptr;
2350 
2351   size_t total_regexp_code_generated_ = 0;
2352 
2353   size_t elements_deletion_counter_ = 0;
2354 
2355   std::unique_ptr<TracingCpuProfilerImpl> tracing_cpu_profiler_;
2356 
2357   EmbeddedFileWriterInterface* embedded_file_writer_ = nullptr;
2358 
2359   // The top entry of the v8::Context::BackupIncumbentScope stack.
2360   const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope_ =
2361       nullptr;
2362 
2363   PrepareStackTraceCallback prepare_stack_trace_callback_ = nullptr;
2364 
2365   // TODO(kenton@cloudflare.com): This mutex can be removed if
2366   // thread_data_table_ is always accessed under the isolate lock. I do not
2367   // know if this is the case, so I'm preserving it for now.
2368   base::Mutex thread_data_table_mutex_;
2369   ThreadDataTable thread_data_table_;
2370 
2371   // Stores the shared isolate for this client isolate. nullptr for shared
2372   // isolates or when no shared isolate is used.
2373   Isolate* shared_isolate_ = nullptr;
2374 
2375 #if DEBUG
2376   // Set to true once during isolate initialization right when attaching to the
2377   // shared isolate. If there was no shared isolate given it will still be set
2378   // to true. After this point invocations of shared_isolate() are valid.
2379   bool attached_to_shared_isolate_ = false;
2380 #endif  // DEBUG
2381 
2382   // Used to track and safepoint all client isolates attached to this shared
2383   // isolate.
2384   std::unique_ptr<GlobalSafepoint> global_safepoint_;
2385   // Client isolates list managed by GlobalSafepoint.
2386   Isolate* global_safepoint_prev_client_isolate_ = nullptr;
2387   Isolate* global_safepoint_next_client_isolate_ = nullptr;
2388 
2389   // A signal-safe vector of heap pages containing code. Used with the
2390   // v8::Unwinder API.
2391   std::atomic<std::vector<MemoryRange>*> code_pages_{nullptr};
2392   std::vector<MemoryRange> code_pages_buffer1_;
2393   std::vector<MemoryRange> code_pages_buffer2_;
2394   // The mutex only guards adding pages, the retrieval is signal safe.
2395   base::Mutex code_pages_mutex_;
2396 
2397 #ifdef V8_ENABLE_WEBASSEMBLY
2398   wasm::StackMemory* wasm_stacks_;
2399 #endif
2400 
2401   // Enables the host application to provide a mechanism for recording a
2402   // predefined set of data as crash keys to be used in postmortem debugging
2403   // in case of a crash.
2404   AddCrashKeyCallback add_crash_key_callback_ = nullptr;
2405 
2406   // Delete new/delete operators to ensure that Isolate::New() and
2407   // Isolate::Delete() are used for Isolate creation and deletion.
new(size_t,void * ptr)2408   void* operator new(size_t, void* ptr) { return ptr; }
2409 
2410 #if USE_SIMULATOR
2411   SimulatorData* simulator_data_ = nullptr;
2412 #endif
2413 
2414   friend class heap::HeapTester;
2415   friend class GlobalSafepoint;
2416   friend class TestSerializer;
2417 };
2418 
2419 #undef FIELD_ACCESSOR
2420 #undef THREAD_LOCAL_TOP_ACCESSOR
2421 
2422 // SaveContext scopes save the current context on the Isolate on creation, and
2423 // restore it on destruction.
2424 class V8_EXPORT_PRIVATE SaveContext {
2425  public:
2426   explicit SaveContext(Isolate* isolate);
2427 
2428   ~SaveContext();
2429 
context()2430   Handle<Context> context() { return context_; }
2431 
2432   // Returns true if this save context is below a given JavaScript frame.
2433   bool IsBelowFrame(CommonFrame* frame);
2434 
2435  private:
2436   Isolate* const isolate_;
2437   Handle<Context> context_;
2438   Address c_entry_fp_;
2439 };
2440 
2441 // Like SaveContext, but also switches the Context to a new one in the
2442 // constructor.
2443 class V8_EXPORT_PRIVATE SaveAndSwitchContext : public SaveContext {
2444  public:
2445   SaveAndSwitchContext(Isolate* isolate, Context new_context);
2446 };
2447 
2448 // A scope which sets the given isolate's context to null for its lifetime to
2449 // ensure that code does not make assumptions on a context being available.
2450 class V8_NODISCARD NullContextScope : public SaveAndSwitchContext {
2451  public:
NullContextScope(Isolate * isolate)2452   explicit NullContextScope(Isolate* isolate)
2453       : SaveAndSwitchContext(isolate, Context()) {}
2454 };
2455 
2456 class AssertNoContextChange {
2457 #ifdef DEBUG
2458  public:
2459   explicit AssertNoContextChange(Isolate* isolate);
~AssertNoContextChange()2460   ~AssertNoContextChange() { DCHECK(isolate_->context() == *context_); }
2461 
2462  private:
2463   Isolate* isolate_;
2464   Handle<Context> context_;
2465 #else
2466  public:
2467   explicit AssertNoContextChange(Isolate* isolate) {}
2468 #endif
2469 };
2470 
2471 class ExecutionAccess {
2472  public:
ExecutionAccess(Isolate * isolate)2473   explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
2474     Lock(isolate);
2475   }
~ExecutionAccess()2476   ~ExecutionAccess() { Unlock(isolate_); }
2477 
Lock(Isolate * isolate)2478   static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); }
Unlock(Isolate * isolate)2479   static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); }
2480 
TryLock(Isolate * isolate)2481   static bool TryLock(Isolate* isolate) {
2482     return isolate->break_access()->TryLock();
2483   }
2484 
2485  private:
2486   Isolate* isolate_;
2487 };
2488 
2489 // Support for checking for stack-overflows.
2490 class StackLimitCheck {
2491  public:
StackLimitCheck(Isolate * isolate)2492   explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) {}
2493 
2494   // Use this to check for stack-overflows in C++ code.
HasOverflowed()2495   bool HasOverflowed() const {
2496     StackGuard* stack_guard = isolate_->stack_guard();
2497     return GetCurrentStackPosition() < stack_guard->real_climit();
2498   }
2499   static bool HasOverflowed(LocalIsolate* local_isolate);
2500 
2501   // Use this to check for interrupt request in C++ code.
InterruptRequested()2502   bool InterruptRequested() {
2503     StackGuard* stack_guard = isolate_->stack_guard();
2504     return GetCurrentStackPosition() < stack_guard->climit();
2505   }
2506 
2507   // Use this to check for stack-overflow when entering runtime from JS code.
2508   bool JsHasOverflowed(uintptr_t gap = 0) const;
2509 
2510  private:
2511   Isolate* isolate_;
2512 };
2513 
2514 // This macro may be used in context that disallows JS execution.
2515 // That is why it checks only for a stack overflow and termination.
2516 #define STACK_CHECK(isolate, result_value)                   \
2517   do {                                                       \
2518     StackLimitCheck stack_check(isolate);                    \
2519     if (stack_check.InterruptRequested()) {                  \
2520       if (stack_check.HasOverflowed()) {                     \
2521         isolate->StackOverflow();                            \
2522         return result_value;                                 \
2523       }                                                      \
2524       if (isolate->stack_guard()->HasTerminationRequest()) { \
2525         isolate->TerminateExecution();                       \
2526         return result_value;                                 \
2527       }                                                      \
2528     }                                                        \
2529   } while (false)
2530 
2531 class StackTraceFailureMessage {
2532  public:
2533   enum StackTraceMode { kIncludeStackTrace, kDontIncludeStackTrace };
2534 
2535   explicit StackTraceFailureMessage(Isolate* isolate, StackTraceMode mode,
2536                                     void* ptr1 = nullptr, void* ptr2 = nullptr,
2537                                     void* ptr3 = nullptr, void* ptr4 = nullptr,
2538                                     void* ptr5 = nullptr, void* ptr6 = nullptr);
2539 
2540   V8_NOINLINE void Print() volatile;
2541 
2542   static const uintptr_t kStartMarker = 0xdecade30;
2543   static const uintptr_t kEndMarker = 0xdecade31;
2544   static const int kStacktraceBufferSize = 32 * KB;
2545 
2546   uintptr_t start_marker_ = kStartMarker;
2547   void* isolate_;
2548   void* ptr1_;
2549   void* ptr2_;
2550   void* ptr3_;
2551   void* ptr4_;
2552   void* ptr5_;
2553   void* ptr6_;
2554   void* code_objects_[4];
2555   char js_stack_trace_[kStacktraceBufferSize];
2556   uintptr_t end_marker_ = kEndMarker;
2557 };
2558 
2559 template <base::MutexSharedType kIsShared>
2560 class V8_NODISCARD SharedMutexGuardIfOffThread<Isolate, kIsShared> final {
2561  public:
SharedMutexGuardIfOffThread(base::SharedMutex * mutex,Isolate * isolate)2562   SharedMutexGuardIfOffThread(base::SharedMutex* mutex, Isolate* isolate) {
2563     DCHECK_NOT_NULL(mutex);
2564     DCHECK_NOT_NULL(isolate);
2565     DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
2566   }
2567 
2568   SharedMutexGuardIfOffThread(const SharedMutexGuardIfOffThread&) = delete;
2569   SharedMutexGuardIfOffThread& operator=(const SharedMutexGuardIfOffThread&) =
2570       delete;
2571 };
2572 
2573 }  // namespace internal
2574 }  // namespace v8
2575 
2576 #endif  // V8_EXECUTION_ISOLATE_H_
2577