• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_EXECUTION_ISOLATE_H_
6 #define V8_EXECUTION_ISOLATE_H_
7 
8 #include <atomic>
9 #include <cstddef>
10 #include <functional>
11 #include <memory>
12 #include <queue>
13 #include <unordered_map>
14 #include <vector>
15 
16 #include "include/v8-inspector.h"
17 #include "include/v8-internal.h"
18 #include "include/v8-metrics.h"
19 #include "include/v8.h"
20 #include "src/base/macros.h"
21 #include "src/base/platform/mutex.h"
22 #include "src/builtins/builtins.h"
23 #include "src/common/globals.h"
24 #include "src/debug/interface-types.h"
25 #include "src/execution/execution.h"
26 #include "src/execution/external-pointer-table.h"
27 #include "src/execution/futex-emulation.h"
28 #include "src/execution/isolate-data.h"
29 #include "src/execution/messages.h"
30 #include "src/execution/stack-guard.h"
31 #include "src/handles/handles.h"
32 #include "src/heap/factory.h"
33 #include "src/heap/heap.h"
34 #include "src/heap/read-only-heap.h"
35 #include "src/init/isolate-allocator.h"
36 #include "src/objects/code.h"
37 #include "src/objects/contexts.h"
38 #include "src/objects/debug-objects.h"
39 #include "src/runtime/runtime.h"
40 #include "src/strings/unicode.h"
41 #include "src/utils/allocation.h"
42 
43 #ifdef V8_INTL_SUPPORT
44 #include "unicode/uversion.h"  // Define U_ICU_NAMESPACE.
45 namespace U_ICU_NAMESPACE {
46 class UMemory;
47 }  // namespace U_ICU_NAMESPACE
48 #endif  // V8_INTL_SUPPORT
49 
50 namespace v8 {
51 
52 namespace base {
53 class RandomNumberGenerator;
54 }  // namespace base
55 
56 namespace debug {
57 class ConsoleDelegate;
58 class AsyncEventDelegate;
59 }  // namespace debug
60 
61 namespace internal {
62 
63 namespace heap {
64 class HeapTester;
65 }  // namespace heap
66 
67 class AddressToIndexHashMap;
68 class AstStringConstants;
69 class Bootstrapper;
70 class BuiltinsConstantsTableBuilder;
71 class CancelableTaskManager;
72 class CodeEventDispatcher;
73 class CodeTracer;
74 class CommonFrame;
75 class CompilationCache;
76 class CompilationStatistics;
77 class CompilerDispatcher;
78 class Counters;
79 class Debug;
80 class Deoptimizer;
81 class DescriptorLookupCache;
82 class EmbeddedFileWriterInterface;
83 class EternalHandles;
84 class HandleScopeImplementer;
85 class HeapObjectToIndexHashMap;
86 class HeapProfiler;
87 class InnerPointerToCodeCache;
88 class LocalIsolate;
89 class Logger;
90 class MaterializedObjectStore;
91 class Microtask;
92 class MicrotaskQueue;
93 class OptimizingCompileDispatcher;
94 class PersistentHandles;
95 class PersistentHandlesList;
96 class ReadOnlyArtifacts;
97 class RegExpStack;
98 class RootVisitor;
99 class RuntimeProfiler;
100 class SetupIsolateDelegate;
101 class Simulator;
102 class SnapshotData;
103 class StringTable;
104 class StubCache;
105 class ThreadManager;
106 class ThreadState;
107 class ThreadVisitor;  // Defined in v8threads.h
108 class TracingCpuProfilerImpl;
109 class UnicodeCache;
110 struct ManagedPtrDestructor;
111 
112 template <StateTag Tag>
113 class VMState;
114 
115 namespace interpreter {
116 class Interpreter;
117 }  // namespace interpreter
118 
119 namespace compiler {
120 class PerIsolateCompilerCache;
121 }  // namespace compiler
122 
123 namespace wasm {
124 class WasmEngine;
125 }  // namespace wasm
126 
127 namespace win64_unwindinfo {
128 class BuiltinUnwindInfo;
129 }  // namespace win64_unwindinfo
130 
131 namespace metrics {
132 class Recorder;
133 }  // namespace metrics
134 
135 #define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
136   do {                                                 \
137     Isolate* __isolate__ = (isolate);                  \
138     DCHECK(!__isolate__->has_pending_exception());     \
139     if (__isolate__->has_scheduled_exception()) {      \
140       return __isolate__->PromoteScheduledException(); \
141     }                                                  \
142   } while (false)
143 
144 // Macros for MaybeHandle.
145 
146 #define RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, value) \
147   do {                                                      \
148     Isolate* __isolate__ = (isolate);                       \
149     DCHECK(!__isolate__->has_pending_exception());          \
150     if (__isolate__->has_scheduled_exception()) {           \
151       __isolate__->PromoteScheduledException();             \
152       return value;                                         \
153     }                                                       \
154   } while (false)
155 
156 #define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
157   RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())
158 
159 #define ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, dst, call, value) \
160   do {                                                                        \
161     Isolate* __isolate__ = (isolate);                                         \
162     if (!(call).ToLocal(&dst)) {                                              \
163       DCHECK(__isolate__->has_scheduled_exception());                         \
164       __isolate__->PromoteScheduledException();                               \
165       return value;                                                           \
166     }                                                                         \
167   } while (false)
168 
169 #define RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, call, value) \
170   do {                                                            \
171     Isolate* __isolate__ = (isolate);                             \
172     if ((call).IsNothing()) {                                     \
173       DCHECK(__isolate__->has_scheduled_exception());             \
174       __isolate__->PromoteScheduledException();                   \
175       return value;                                               \
176     }                                                             \
177   } while (false)
178 
179 /**
180  * RETURN_RESULT_OR_FAILURE is used in functions with return type Object (such
181  * as "RUNTIME_FUNCTION(...) {...}" or "BUILTIN(...) {...}" ) to return either
182  * the contents of a MaybeHandle<X>, or the "exception" sentinel value.
183  * Example usage:
184  *
185  * RUNTIME_FUNCTION(Runtime_Func) {
186  *   ...
187  *   RETURN_RESULT_OR_FAILURE(
188  *       isolate,
189  *       FunctionWithReturnTypeMaybeHandleX(...));
190  * }
191  *
192  * If inside a function with return type MaybeHandle<X> use RETURN_ON_EXCEPTION
193  * instead.
194  * If inside a function with return type Handle<X>, or Maybe<X> use
195  * RETURN_ON_EXCEPTION_VALUE instead.
196  */
197 #define RETURN_RESULT_OR_FAILURE(isolate, call)      \
198   do {                                               \
199     Handle<Object> __result__;                       \
200     Isolate* __isolate__ = (isolate);                \
201     if (!(call).ToHandle(&__result__)) {             \
202       DCHECK(__isolate__->has_pending_exception());  \
203       return ReadOnlyRoots(__isolate__).exception(); \
204     }                                                \
205     DCHECK(!__isolate__->has_pending_exception());   \
206     return *__result__;                              \
207   } while (false)
208 
209 #define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value) \
210   do {                                                              \
211     if (!(call).ToHandle(&dst)) {                                   \
212       DCHECK((isolate)->has_pending_exception());                   \
213       return value;                                                 \
214     }                                                               \
215   } while (false)
216 
217 #define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call)                \
218   do {                                                                        \
219     auto* __isolate__ = (isolate);                                            \
220     ASSIGN_RETURN_ON_EXCEPTION_VALUE(__isolate__, dst, call,                  \
221                                      ReadOnlyRoots(__isolate__).exception()); \
222   } while (false)
223 
224 #define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T) \
225   ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())
226 
227 #define THROW_NEW_ERROR(isolate, call, T)                                \
228   do {                                                                   \
229     auto* __isolate__ = (isolate);                                       \
230     return __isolate__->template Throw<T>(__isolate__->factory()->call); \
231   } while (false)
232 
233 #define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call)         \
234   do {                                                        \
235     auto* __isolate__ = (isolate);                            \
236     return __isolate__->Throw(*__isolate__->factory()->call); \
237   } while (false)
238 
239 #define THROW_NEW_ERROR_RETURN_VALUE(isolate, call, value) \
240   do {                                                     \
241     auto* __isolate__ = (isolate);                         \
242     __isolate__->Throw(*__isolate__->factory()->call);     \
243     return value;                                          \
244   } while (false)
245 
246 /**
247  * RETURN_ON_EXCEPTION_VALUE conditionally returns the given value when the
248  * given MaybeHandle is empty. It is typically used in functions with return
249  * type Maybe<X> or Handle<X>. Example usage:
250  *
251  * Handle<X> Func() {
252  *   ...
253  *   RETURN_ON_EXCEPTION_VALUE(
254  *       isolate,
255  *       FunctionWithReturnTypeMaybeHandleX(...),
256  *       Handle<X>());
257  *   // code to handle non exception
258  *   ...
259  * }
260  *
261  * Maybe<bool> Func() {
262  *   ..
263  *   RETURN_ON_EXCEPTION_VALUE(
264  *       isolate,
265  *       FunctionWithReturnTypeMaybeHandleX(...),
266  *       Nothing<bool>);
267  *   // code to handle non exception
268  *   return Just(true);
269  * }
270  *
271  * If inside a function with return type MaybeHandle<X>, use RETURN_ON_EXCEPTION
272  * instead.
273  * If inside a function with return type Object, use
274  * RETURN_FAILURE_ON_EXCEPTION instead.
275  */
276 #define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \
277   do {                                                  \
278     if ((call).is_null()) {                             \
279       DCHECK((isolate)->has_pending_exception());       \
280       return value;                                     \
281     }                                                   \
282   } while (false)
283 
284 /**
285  * RETURN_FAILURE_ON_EXCEPTION conditionally returns the "exception" sentinel if
286  * the given MaybeHandle is empty; so it can only be used in functions with
287  * return type Object, such as RUNTIME_FUNCTION(...) {...} or BUILTIN(...)
288  * {...}. Example usage:
289  *
290  * RUNTIME_FUNCTION(Runtime_Func) {
291  *   ...
292  *   RETURN_FAILURE_ON_EXCEPTION(
293  *       isolate,
294  *       FunctionWithReturnTypeMaybeHandleX(...));
295  *   // code to handle non exception
296  *   ...
297  * }
298  *
299  * If inside a function with return type MaybeHandle<X>, use RETURN_ON_EXCEPTION
300  * instead.
301  * If inside a function with return type Maybe<X> or Handle<X>, use
302  * RETURN_ON_EXCEPTION_VALUE instead.
303  */
304 #define RETURN_FAILURE_ON_EXCEPTION(isolate, call)                     \
305   do {                                                                 \
306     Isolate* __isolate__ = (isolate);                                  \
307     RETURN_ON_EXCEPTION_VALUE(__isolate__, call,                       \
308                               ReadOnlyRoots(__isolate__).exception()); \
309   } while (false);
310 
311 /**
312  * RETURN_ON_EXCEPTION conditionally returns an empty MaybeHandle<T> if the
313  * given MaybeHandle is empty. Use it to return immediately from a function with
314  * return type MaybeHandle when an exception was thrown. Example usage:
315  *
316  * MaybeHandle<X> Func() {
317  *   ...
318  *   RETURN_ON_EXCEPTION(
319  *       isolate,
320  *       FunctionWithReturnTypeMaybeHandleY(...),
321  *       X);
322  *   // code to handle non exception
323  *   ...
324  * }
325  *
326  * If inside a function with return type Object, use
327  * RETURN_FAILURE_ON_EXCEPTION instead.
328  * If inside a function with return type
329  * Maybe<X> or Handle<X>, use RETURN_ON_EXCEPTION_VALUE instead.
330  */
331 #define RETURN_ON_EXCEPTION(isolate, call, T) \
332   RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
333 
334 #define RETURN_FAILURE(isolate, should_throw, call) \
335   do {                                              \
336     if ((should_throw) == kDontThrow) {             \
337       return Just(false);                           \
338     } else {                                        \
339       isolate->Throw(*isolate->factory()->call);    \
340       return Nothing<bool>();                       \
341     }                                               \
342   } while (false)
343 
344 #define MAYBE_RETURN(call, value)         \
345   do {                                    \
346     if ((call).IsNothing()) return value; \
347   } while (false)
348 
349 #define MAYBE_RETURN_NULL(call) MAYBE_RETURN(call, MaybeHandle<Object>())
350 
351 #define MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
352   do {                                                               \
353     Isolate* __isolate__ = (isolate);                                \
354     if (!(call).To(&dst)) {                                          \
355       DCHECK(__isolate__->has_pending_exception());                  \
356       return ReadOnlyRoots(__isolate__).exception();                 \
357     }                                                                \
358   } while (false)
359 
360 #define FOR_WITH_HANDLE_SCOPE(isolate, loop_var_type, init, loop_var,      \
361                               limit_check, increment, body)                \
362   do {                                                                     \
363     loop_var_type init;                                                    \
364     loop_var_type for_with_handle_limit = loop_var;                        \
365     Isolate* for_with_handle_isolate = isolate;                            \
366     while (limit_check) {                                                  \
367       for_with_handle_limit += 1024;                                       \
368       HandleScope loop_scope(for_with_handle_isolate);                     \
369       for (; limit_check && loop_var < for_with_handle_limit; increment) { \
370         body                                                               \
371       }                                                                    \
372     }                                                                      \
373   } while (false)
374 
375 #define WHILE_WITH_HANDLE_SCOPE(isolate, limit_check, body)                  \
376   do {                                                                       \
377     Isolate* for_with_handle_isolate = isolate;                              \
378     while (limit_check) {                                                    \
379       HandleScope loop_scope(for_with_handle_isolate);                       \
380       for (int for_with_handle_it = 0;                                       \
381            limit_check && for_with_handle_it < 1024; ++for_with_handle_it) { \
382         body                                                                 \
383       }                                                                      \
384     }                                                                        \
385   } while (false)
386 
387 #define FIELD_ACCESSOR(type, name)                \
388   inline void set_##name(type v) { name##_ = v; } \
389   inline type name() const { return name##_; }
390 
391 // Controls for manual embedded blob lifecycle management, used by tests and
392 // mksnapshot.
393 V8_EXPORT_PRIVATE void DisableEmbeddedBlobRefcounting();
394 V8_EXPORT_PRIVATE void FreeCurrentEmbeddedBlob();
395 
396 #ifdef DEBUG
397 
398 #define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)               \
399   V(CommentStatistic, paged_space_comments_statistics, \
400     CommentStatistic::kMaxComments + 1)                \
401   V(int, code_kind_statistics, kCodeKindCount)
402 #else
403 
404 #define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
405 
406 #endif
407 
408 #define ISOLATE_INIT_ARRAY_LIST(V)                                             \
409   /* SerializerDeserializer state. */                                          \
410   V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
411   V(int, bad_char_shift_table, kUC16AlphabetSize)                              \
412   V(int, good_suffix_shift_table, (kBMMaxShift + 1))                           \
413   V(int, suffix_table, (kBMMaxShift + 1))                                      \
414   ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
415 
416 using DebugObjectCache = std::vector<Handle<HeapObject>>;
417 
418 #define ISOLATE_INIT_LIST(V)                                                   \
419   /* Assembler state. */                                                       \
420   V(FatalErrorCallback, exception_behavior, nullptr)                           \
421   V(OOMErrorCallback, oom_behavior, nullptr)                                   \
422   V(LogEventCallback, event_logger, nullptr)                                   \
423   V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr)  \
424   V(ModifyCodeGenerationFromStringsCallback, modify_code_gen_callback,         \
425     nullptr)                                                                   \
426   V(ModifyCodeGenerationFromStringsCallback2, modify_code_gen_callback2,       \
427     nullptr)                                                                   \
428   V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr)    \
429   V(ExtensionCallback, wasm_module_callback, &NoExtension)                     \
430   V(ExtensionCallback, wasm_instance_callback, &NoExtension)                   \
431   V(WasmStreamingCallback, wasm_streaming_callback, nullptr)                   \
432   V(WasmThreadsEnabledCallback, wasm_threads_enabled_callback, nullptr)        \
433   V(WasmLoadSourceMapCallback, wasm_load_source_map_callback, nullptr)         \
434   V(WasmSimdEnabledCallback, wasm_simd_enabled_callback, nullptr)              \
435   /* State for Relocatable. */                                                 \
436   V(Relocatable*, relocatable_top, nullptr)                                    \
437   V(DebugObjectCache*, string_stream_debug_object_cache, nullptr)              \
438   V(Object, string_stream_current_security_token, Object())                    \
439   V(const intptr_t*, api_external_references, nullptr)                         \
440   V(AddressToIndexHashMap*, external_reference_map, nullptr)                   \
441   V(HeapObjectToIndexHashMap*, root_index_map, nullptr)                        \
442   V(MicrotaskQueue*, default_microtask_queue, nullptr)                         \
443   V(CompilationStatistics*, turbo_statistics, nullptr)                         \
444   V(CodeTracer*, code_tracer, nullptr)                                         \
445   V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu)                            \
446   V(PromiseRejectCallback, promise_reject_callback, nullptr)                   \
447   V(const v8::StartupData*, snapshot_blob, nullptr)                            \
448   V(int, code_and_metadata_size, 0)                                            \
449   V(int, bytecode_and_metadata_size, 0)                                        \
450   V(int, external_script_source_size, 0)                                       \
451   /* Number of CPU profilers running on the isolate. */                        \
452   V(size_t, num_cpu_profilers, 0)                                              \
453   /* true if a trace is being formatted through Error.prepareStackTrace. */    \
454   V(bool, formatting_stack_trace, false)                                       \
455   /* Perform side effect checks on function call and API callbacks. */         \
456   V(DebugInfo::ExecutionMode, debug_execution_mode, DebugInfo::kBreakpoints)   \
457   /* Current code coverage mode */                                             \
458   V(debug::CoverageMode, code_coverage_mode, debug::CoverageMode::kBestEffort) \
459   V(debug::TypeProfileMode, type_profile_mode, debug::TypeProfileMode::kNone)  \
460   V(int, last_console_context_id, 0)                                           \
461   V(v8_inspector::V8Inspector*, inspector, nullptr)                            \
462   V(bool, next_v8_call_is_safe_for_termination, false)                         \
463   V(bool, only_terminate_in_safe_scope, false)                                 \
464   V(bool, detailed_source_positions_for_profiling, FLAG_detailed_line_info)    \
465   V(int, embedder_wrapper_type_index, -1)                                      \
466   V(int, embedder_wrapper_object_index, -1)
467 
468 #define THREAD_LOCAL_TOP_ACCESSOR(type, name)                         \
469   inline void set_##name(type v) { thread_local_top()->name##_ = v; } \
470   inline type name() const { return thread_local_top()->name##_; }
471 
472 #define THREAD_LOCAL_TOP_ADDRESS(type, name) \
473   type* name##_address() { return &thread_local_top()->name##_; }
474 
475 // HiddenFactory exists so Isolate can privately inherit from it without making
476 // Factory's members available to Isolate directly.
477 class V8_EXPORT_PRIVATE HiddenFactory : private Factory {};
478 
479 class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
480   // These forward declarations are required to make the friend declarations in
481   // PerIsolateThreadData work on some older versions of gcc.
482   class ThreadDataTable;
483   class EntryStackItem;
484 
485  public:
486   using HandleScopeType = HandleScope;
487   void* operator new(size_t) = delete;
488   void operator delete(void*) = delete;
489 
490   // A thread has a PerIsolateThreadData instance for each isolate that it has
491   // entered. That instance is allocated when the isolate is initially entered
492   // and reused on subsequent entries.
493   class PerIsolateThreadData {
494    public:
PerIsolateThreadData(Isolate * isolate,ThreadId thread_id)495     PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
496         : isolate_(isolate),
497           thread_id_(thread_id),
498           stack_limit_(0),
499           thread_state_(nullptr)
500 #if USE_SIMULATOR
501           ,
502           simulator_(nullptr)
503 #endif
504     {
505     }
506     ~PerIsolateThreadData();
isolate()507     Isolate* isolate() const { return isolate_; }
thread_id()508     ThreadId thread_id() const { return thread_id_; }
509 
FIELD_ACCESSOR(uintptr_t,stack_limit)510     FIELD_ACCESSOR(uintptr_t, stack_limit)
511     FIELD_ACCESSOR(ThreadState*, thread_state)
512 
513 #if USE_SIMULATOR
514     FIELD_ACCESSOR(Simulator*, simulator)
515 #endif
516 
517     bool Matches(Isolate* isolate, ThreadId thread_id) const {
518       return isolate_ == isolate && thread_id_ == thread_id;
519     }
520 
521    private:
522     Isolate* isolate_;
523     ThreadId thread_id_;
524     uintptr_t stack_limit_;
525     ThreadState* thread_state_;
526 
527 #if USE_SIMULATOR
528     Simulator* simulator_;
529 #endif
530 
531     friend class Isolate;
532     friend class ThreadDataTable;
533     friend class EntryStackItem;
534 
535     DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
536   };
537 
538   static void InitializeOncePerProcess();
539 
540   // Creates Isolate object. Must be used instead of constructing Isolate with
541   // new operator.
542   static Isolate* New();
543 
544   // Deletes Isolate object. Must be used instead of delete operator.
545   // Destroys the non-default isolates.
546   // Sets default isolate into "has_been_disposed" state rather then destroying,
547   // for legacy API reasons.
548   static void Delete(Isolate* isolate);
549 
550   void SetUpFromReadOnlyArtifacts(std::shared_ptr<ReadOnlyArtifacts> artifacts,
551                                   ReadOnlyHeap* ro_heap);
set_read_only_heap(ReadOnlyHeap * ro_heap)552   void set_read_only_heap(ReadOnlyHeap* ro_heap) { read_only_heap_ = ro_heap; }
553 
554   // Page allocator that must be used for allocating V8 heap pages.
555   v8::PageAllocator* page_allocator();
556 
557   // Returns the PerIsolateThreadData for the current thread (or nullptr if one
558   // is not currently set).
CurrentPerIsolateThreadData()559   static PerIsolateThreadData* CurrentPerIsolateThreadData() {
560     return reinterpret_cast<PerIsolateThreadData*>(
561         base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
562   }
563 
564   // Returns the isolate inside which the current thread is running or nullptr.
TryGetCurrent()565   V8_INLINE static Isolate* TryGetCurrent() {
566     DCHECK_EQ(true, isolate_key_created_.load(std::memory_order_relaxed));
567     return reinterpret_cast<Isolate*>(
568         base::Thread::GetExistingThreadLocal(isolate_key_));
569   }
570 
571   // Returns the isolate inside which the current thread is running.
Current()572   V8_INLINE static Isolate* Current() {
573     Isolate* isolate = TryGetCurrent();
574     DCHECK_NOT_NULL(isolate);
575     return isolate;
576   }
577 
578   // Usually called by Init(), but can be called early e.g. to allow
579   // testing components that require logging but not the whole
580   // isolate.
581   //
582   // Safe to call more than once.
583   void InitializeLoggingAndCounters();
584   bool InitializeCounters();  // Returns false if already initialized.
585 
586   bool InitWithoutSnapshot();
587   bool InitWithSnapshot(SnapshotData* startup_snapshot_data,
588                         SnapshotData* read_only_snapshot_data, bool can_rehash);
589 
590   // True if at least one thread Enter'ed this isolate.
IsInUse()591   bool IsInUse() { return entry_stack_ != nullptr; }
592 
593   void ReleaseSharedPtrs();
594 
595   void ClearSerializerData();
596 
597   bool LogObjectRelocation();
598 
599   // Initializes the current thread to run this Isolate.
600   // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
601   // at the same time, this should be prevented using external locking.
602   void Enter();
603 
604   // Exits the current thread. The previosuly entered Isolate is restored
605   // for the thread.
606   // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
607   // at the same time, this should be prevented using external locking.
608   void Exit();
609 
610   // Find the PerThread for this particular (isolate, thread) combination.
611   // If one does not yet exist, allocate a new one.
612   PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
613 
614   // Find the PerThread for this particular (isolate, thread) combination
615   // If one does not yet exist, return null.
616   PerIsolateThreadData* FindPerThreadDataForThisThread();
617 
618   // Find the PerThread for given (isolate, thread) combination
619   // If one does not yet exist, return null.
620   PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id);
621 
622   // Discard the PerThread for this particular (isolate, thread) combination
623   // If one does not yet exist, no-op.
624   void DiscardPerThreadDataForThisThread();
625 
626   // Mutex for serializing access to break control structures.
break_access()627   base::RecursiveMutex* break_access() { return &break_access_; }
628 
629   // Shared mutex for allowing concurrent read/writes to FeedbackVectors.
feedback_vector_access()630   base::SharedMutex* feedback_vector_access() {
631     return &feedback_vector_access_;
632   }
633 
634   // Shared mutex for allowing concurrent read/writes to Strings.
string_access()635   base::SharedMutex* string_access() { return &string_access_; }
636 
637   // Shared mutex for allowing concurrent read/writes to TransitionArrays.
transition_array_access()638   base::SharedMutex* transition_array_access() {
639     return &transition_array_access_;
640   }
641 
642   // The isolate's string table.
string_table()643   StringTable* string_table() { return string_table_.get(); }
644 
645   Address get_address_from_id(IsolateAddressId id);
646 
647   // Access to top context (where the current function object was created).
context()648   Context context() { return thread_local_top()->context_; }
649   inline void set_context(Context context);
context_address()650   Context* context_address() { return &thread_local_top()->context_; }
651 
652   // Access to current thread id.
653   THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
654 
655   // Interface to pending exception.
656   inline Object pending_exception();
657   inline void set_pending_exception(Object exception_obj);
658   inline void clear_pending_exception();
659 
660   bool AreWasmThreadsEnabled(Handle<Context> context);
661   bool IsWasmSimdEnabled(Handle<Context> context);
662 
663   THREAD_LOCAL_TOP_ADDRESS(Object, pending_exception)
664 
665   inline bool has_pending_exception();
666 
THREAD_LOCAL_TOP_ADDRESS(Context,pending_handler_context)667   THREAD_LOCAL_TOP_ADDRESS(Context, pending_handler_context)
668   THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_entrypoint)
669   THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_constant_pool)
670   THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_fp)
671   THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_sp)
672 
673   THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
674 
675   v8::TryCatch* try_catch_handler() {
676     return thread_local_top()->try_catch_handler_;
677   }
external_caught_exception_address()678   bool* external_caught_exception_address() {
679     return &thread_local_top()->external_caught_exception_;
680   }
681 
682   THREAD_LOCAL_TOP_ADDRESS(Object, scheduled_exception)
683 
684   inline void clear_pending_message();
pending_message_obj_address()685   Address pending_message_obj_address() {
686     return reinterpret_cast<Address>(&thread_local_top()->pending_message_obj_);
687   }
688 
689   inline Object scheduled_exception();
690   inline bool has_scheduled_exception();
691   inline void clear_scheduled_exception();
692 
693   bool IsJavaScriptHandlerOnTop(Object exception);
694   bool IsExternalHandlerOnTop(Object exception);
695 
696   inline bool is_catchable_by_javascript(Object exception);
697   inline bool is_catchable_by_wasm(Object exception);
698 
699   // JS execution stack (see frames.h).
c_entry_fp(ThreadLocalTop * thread)700   static Address c_entry_fp(ThreadLocalTop* thread) {
701     return thread->c_entry_fp_;
702   }
handler(ThreadLocalTop * thread)703   static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
c_function()704   Address c_function() { return thread_local_top()->c_function_; }
705 
c_entry_fp_address()706   inline Address* c_entry_fp_address() {
707     return &thread_local_top()->c_entry_fp_;
708   }
c_entry_fp_offset()709   static uint32_t c_entry_fp_offset() {
710     return static_cast<uint32_t>(
711         OFFSET_OF(Isolate, thread_local_top()->c_entry_fp_) -
712         isolate_root_bias());
713   }
handler_address()714   inline Address* handler_address() { return &thread_local_top()->handler_; }
c_function_address()715   inline Address* c_function_address() {
716     return &thread_local_top()->c_function_;
717   }
718 
719 #if defined(DEBUG) || defined(VERIFY_HEAP)
720   // Count the number of active deserializers, so that the heap verifier knows
721   // whether there is currently an active deserialization happening.
722   //
723   // This is needed as the verifier currently doesn't support verifying objects
724   // which are partially deserialized.
725   //
726   // TODO(leszeks): Make the verifier a bit more deserialization compatible.
RegisterDeserializerStarted()727   void RegisterDeserializerStarted() { ++num_active_deserializers_; }
RegisterDeserializerFinished()728   void RegisterDeserializerFinished() {
729     CHECK_GE(--num_active_deserializers_, 0);
730   }
has_active_deserializer()731   bool has_active_deserializer() const {
732     return num_active_deserializers_.load(std::memory_order_acquire) > 0;
733   }
734 #else
RegisterDeserializerStarted()735   void RegisterDeserializerStarted() {}
RegisterDeserializerFinished()736   void RegisterDeserializerFinished() {}
has_active_deserializer()737   bool has_active_deserializer() const { UNREACHABLE(); }
738 #endif
739 
740   // Bottom JS entry.
js_entry_sp()741   Address js_entry_sp() { return thread_local_top()->js_entry_sp_; }
js_entry_sp_address()742   inline Address* js_entry_sp_address() {
743     return &thread_local_top()->js_entry_sp_;
744   }
745 
746   std::vector<MemoryRange>* GetCodePages() const;
747 
748   void SetCodePages(std::vector<MemoryRange>* new_code_pages);
749 
750   // Returns the global object of the current context. It could be
751   // a builtin object, or a JS global object.
752   inline Handle<JSGlobalObject> global_object();
753 
754   // Returns the global proxy object of the current context.
755   inline Handle<JSGlobalProxy> global_proxy();
756 
ArchiveSpacePerThread()757   static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
FreeThreadResources()758   void FreeThreadResources() { thread_local_top()->Free(); }
759 
760   // This method is called by the api after operations that may throw
761   // exceptions.  If an exception was thrown and not handled by an external
762   // handler the exception is scheduled to be rethrown when we return to running
763   // JavaScript code.  If an exception is scheduled true is returned.
764   bool OptionalRescheduleException(bool clear_exception);
765 
766   // Push and pop a promise and the current try-catch handler.
767   void PushPromise(Handle<JSObject> promise);
768   void PopPromise();
769 
770   // Return the relevant Promise that a throw/rejection pertains to, based
771   // on the contents of the Promise stack
772   Handle<Object> GetPromiseOnStackOnThrow();
773 
774   // Heuristically guess whether a Promise is handled by user catch handler
775   bool PromiseHasUserDefinedRejectHandler(Handle<JSPromise> promise);
776 
777   class ExceptionScope {
778    public:
779     // Scope currently can only be used for regular exceptions,
780     // not termination exception.
781     inline explicit ExceptionScope(Isolate* isolate);
782     inline ~ExceptionScope();
783 
784    private:
785     Isolate* isolate_;
786     Handle<Object> pending_exception_;
787   };
788 
789   void SetCaptureStackTraceForUncaughtExceptions(
790       bool capture, int frame_limit, StackTrace::StackTraceOptions options);
791   bool get_capture_stack_trace_for_uncaught_exceptions() const;
792 
793   void SetAbortOnUncaughtExceptionCallback(
794       v8::Isolate::AbortOnUncaughtExceptionCallback callback);
795 
796   enum PrintStackMode { kPrintStackConcise, kPrintStackVerbose };
797   void PrintCurrentStackTrace(FILE* out);
798   void PrintStack(StringStream* accumulator,
799                   PrintStackMode mode = kPrintStackVerbose);
800   void PrintStack(FILE* out, PrintStackMode mode = kPrintStackVerbose);
801   Handle<String> StackTraceString();
802   // Stores a stack trace in a stack-allocated temporary buffer which will
803   // end up in the minidump for debugging purposes.
804   V8_NOINLINE void PushStackTraceAndDie(void* ptr1 = nullptr,
805                                         void* ptr2 = nullptr,
806                                         void* ptr3 = nullptr,
807                                         void* ptr4 = nullptr);
808   Handle<FixedArray> CaptureCurrentStackTrace(
809       int frame_limit, StackTrace::StackTraceOptions options);
810   Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
811                                          FrameSkipMode mode,
812                                          Handle<Object> caller);
813   MaybeHandle<JSReceiver> CaptureAndSetDetailedStackTrace(
814       Handle<JSReceiver> error_object);
815   MaybeHandle<JSReceiver> CaptureAndSetSimpleStackTrace(
816       Handle<JSReceiver> error_object, FrameSkipMode mode,
817       Handle<Object> caller);
818   Handle<FixedArray> GetDetailedStackTrace(Handle<JSObject> error_object);
819 
820   Address GetAbstractPC(int* line, int* column);
821 
822   // Returns if the given context may access the given global object. If
823   // the result is false, the pending exception is guaranteed to be
824   // set.
825   bool MayAccess(Handle<Context> accessing_context, Handle<JSObject> receiver);
826 
827   void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
828   void ReportFailedAccessCheck(Handle<JSObject> receiver);
829 
830   // Exception throwing support. The caller should use the result
831   // of Throw() as its return value.
Throw(Object exception)832   Object Throw(Object exception) { return ThrowInternal(exception, nullptr); }
833   Object ThrowAt(Handle<JSObject> exception, MessageLocation* location);
834   Object ThrowIllegalOperation();
835 
836   template <typename T>
Throw(Handle<Object> exception)837   V8_WARN_UNUSED_RESULT MaybeHandle<T> Throw(Handle<Object> exception) {
838     Throw(*exception);
839     return MaybeHandle<T>();
840   }
841 
842   template <typename T>
ThrowAt(Handle<JSObject> exception,MessageLocation * location)843   V8_WARN_UNUSED_RESULT MaybeHandle<T> ThrowAt(Handle<JSObject> exception,
844                                                MessageLocation* location) {
845     ThrowAt(exception, location);
846     return MaybeHandle<T>();
847   }
848 
FatalProcessOutOfHeapMemory(const char * location)849   void FatalProcessOutOfHeapMemory(const char* location) {
850     heap()->FatalProcessOutOfMemory(location);
851   }
852 
set_console_delegate(debug::ConsoleDelegate * delegate)853   void set_console_delegate(debug::ConsoleDelegate* delegate) {
854     console_delegate_ = delegate;
855   }
console_delegate()856   debug::ConsoleDelegate* console_delegate() { return console_delegate_; }
857 
set_async_event_delegate(debug::AsyncEventDelegate * delegate)858   void set_async_event_delegate(debug::AsyncEventDelegate* delegate) {
859     async_event_delegate_ = delegate;
860     PromiseHookStateUpdated();
861   }
862   void OnAsyncFunctionStateChanged(Handle<JSPromise> promise,
863                                    debug::DebugAsyncActionType);
864 
865   // Re-throw an exception.  This involves no error reporting since error
866   // reporting was handled when the exception was thrown originally.
867   Object ReThrow(Object exception);
868 
869   // Find the correct handler for the current pending exception. This also
870   // clears and returns the current pending exception.
871   Object UnwindAndFindHandler();
872 
873   // Tries to predict whether an exception will be caught. Note that this can
874   // only produce an estimate, because it is undecidable whether a finally
875   // clause will consume or re-throw an exception.
876   enum CatchType {
877     NOT_CAUGHT,
878     CAUGHT_BY_JAVASCRIPT,
879     CAUGHT_BY_EXTERNAL,
880     CAUGHT_BY_DESUGARING,
881     CAUGHT_BY_PROMISE,
882     CAUGHT_BY_ASYNC_AWAIT
883   };
884   CatchType PredictExceptionCatcher();
885 
886   void ScheduleThrow(Object exception);
887   // Re-set pending message, script and positions reported to the TryCatch
888   // back to the TLS for re-use when rethrowing.
889   void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
890   // Un-schedule an exception that was caught by a TryCatch handler.
891   void CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler);
892   void ReportPendingMessages();
893 
894   // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
895   Object PromoteScheduledException();
896 
897   // Attempts to compute the current source location, storing the
898   // result in the target out parameter. The source location is attached to a
899   // Message object as the location which should be shown to the user. It's
900   // typically the top-most meaningful location on the stack.
901   bool ComputeLocation(MessageLocation* target);
902   bool ComputeLocationFromException(MessageLocation* target,
903                                     Handle<Object> exception);
904   bool ComputeLocationFromStackTrace(MessageLocation* target,
905                                      Handle<Object> exception);
906 
907   Handle<JSMessageObject> CreateMessage(Handle<Object> exception,
908                                         MessageLocation* location);
909   Handle<JSMessageObject> CreateMessageOrAbort(Handle<Object> exception,
910                                                MessageLocation* location);
911 
912   // Out of resource exception helpers.
913   Object StackOverflow();
914   Object TerminateExecution();
915   void CancelTerminateExecution();
916 
917   void RequestInterrupt(InterruptCallback callback, void* data);
918   void InvokeApiInterruptCallbacks();
919 
920   // Administration
921   void Iterate(RootVisitor* v);
922   void Iterate(RootVisitor* v, ThreadLocalTop* t);
923   char* Iterate(RootVisitor* v, char* t);
924   void IterateThread(ThreadVisitor* v, char* t);
925 
926   // Returns the current native context.
927   inline Handle<NativeContext> native_context();
928   inline NativeContext raw_native_context();
929 
930   Handle<Context> GetIncumbentContext();
931 
932   void RegisterTryCatchHandler(v8::TryCatch* that);
933   void UnregisterTryCatchHandler(v8::TryCatch* that);
934 
935   char* ArchiveThread(char* to);
936   char* RestoreThread(char* from);
937 
938   static const int kUC16AlphabetSize = 256;  // See StringSearchBase.
939   static const int kBMMaxShift = 250;        // See StringSearchBase.
940 
941   // Accessors.
942 #define GLOBAL_ACCESSOR(type, name, initialvalue)                \
943   inline type name() const {                                     \
944     DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
945     return name##_;                                              \
946   }                                                              \
947   inline void set_##name(type value) {                           \
948     DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
949     name##_ = value;                                             \
950   }
951   ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
952 #undef GLOBAL_ACCESSOR
953 
954 #define GLOBAL_ARRAY_ACCESSOR(type, name, length)                \
955   inline type* name() {                                          \
956     DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
957     return &(name##_)[0];                                        \
958   }
ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)959   ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
960 #undef GLOBAL_ARRAY_ACCESSOR
961 
962 #define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
963   inline Handle<type> name();                            \
964   inline bool is_##name(type value);
965   NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
966 #undef NATIVE_CONTEXT_FIELD_ACCESSOR
967 
968   Bootstrapper* bootstrapper() { return bootstrapper_; }
969   // Use for updating counters on a foreground thread.
counters()970   Counters* counters() { return async_counters().get(); }
971   // Use for updating counters on a background thread.
async_counters()972   const std::shared_ptr<Counters>& async_counters() {
973     // Make sure InitializeCounters() has been called.
974     DCHECK_NOT_NULL(async_counters_.get());
975     return async_counters_;
976   }
metrics_recorder()977   const std::shared_ptr<metrics::Recorder>& metrics_recorder() {
978     return metrics_recorder_;
979   }
runtime_profiler()980   RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
compilation_cache()981   CompilationCache* compilation_cache() { return compilation_cache_; }
logger()982   Logger* logger() {
983     // Call InitializeLoggingAndCounters() if logging is needed before
984     // the isolate is fully initialized.
985     DCHECK_NOT_NULL(logger_);
986     return logger_;
987   }
stack_guard()988   StackGuard* stack_guard() { return isolate_data()->stack_guard(); }
heap()989   Heap* heap() { return &heap_; }
read_only_heap()990   ReadOnlyHeap* read_only_heap() const { return read_only_heap_; }
FromHeap(Heap * heap)991   static Isolate* FromHeap(Heap* heap) {
992     return reinterpret_cast<Isolate*>(reinterpret_cast<Address>(heap) -
993                                       OFFSET_OF(Isolate, heap_));
994   }
995 
isolate_data()996   const IsolateData* isolate_data() const { return &isolate_data_; }
isolate_data()997   IsolateData* isolate_data() { return &isolate_data_; }
998 
999   // Generated code can embed this address to get access to the isolate-specific
1000   // data (for example, roots, external references, builtins, etc.).
1001   // The kRootRegister is set to this value.
isolate_root()1002   Address isolate_root() const { return isolate_data()->isolate_root(); }
isolate_root_bias()1003   static size_t isolate_root_bias() {
1004     return OFFSET_OF(Isolate, isolate_data_) + IsolateData::kIsolateRootBias;
1005   }
FromRootAddress(Address isolate_root)1006   static Isolate* FromRootAddress(Address isolate_root) {
1007     return reinterpret_cast<Isolate*>(isolate_root - isolate_root_bias());
1008   }
1009 
roots_table()1010   RootsTable& roots_table() { return isolate_data()->roots(); }
roots_table()1011   const RootsTable& roots_table() const { return isolate_data()->roots(); }
1012 
1013   // A sub-region of the Isolate object that has "predictable" layout which
1014   // depends only on the pointer size and therefore it's guaranteed that there
1015   // will be no compatibility issues because of different compilers used for
1016   // snapshot generator and actual V8 code.
1017   // Thus, kRootRegister may be used to address any location that falls into
1018   // this region.
1019   // See IsolateData::AssertPredictableLayout() for details.
root_register_addressable_region()1020   base::AddressRegion root_register_addressable_region() const {
1021     return base::AddressRegion(reinterpret_cast<Address>(&isolate_data_),
1022                                sizeof(IsolateData));
1023   }
1024 
root(RootIndex index)1025   Object root(RootIndex index) { return Object(roots_table()[index]); }
1026 
root_handle(RootIndex index)1027   Handle<Object> root_handle(RootIndex index) {
1028     return Handle<Object>(&roots_table()[index]);
1029   }
1030 
external_reference_table()1031   ExternalReferenceTable* external_reference_table() {
1032     DCHECK(isolate_data()->external_reference_table()->is_initialized());
1033     return isolate_data()->external_reference_table();
1034   }
1035 
builtin_entry_table()1036   Address* builtin_entry_table() { return isolate_data_.builtin_entry_table(); }
builtins_table()1037   V8_INLINE Address* builtins_table() { return isolate_data_.builtins(); }
1038 
1039   bool IsBuiltinsTableHandleLocation(Address* handle_location);
1040 
load_stub_cache()1041   StubCache* load_stub_cache() { return load_stub_cache_; }
store_stub_cache()1042   StubCache* store_stub_cache() { return store_stub_cache_; }
GetAndClearCurrentDeoptimizer()1043   Deoptimizer* GetAndClearCurrentDeoptimizer() {
1044     Deoptimizer* result = current_deoptimizer_;
1045     CHECK_NOT_NULL(result);
1046     current_deoptimizer_ = nullptr;
1047     return result;
1048   }
set_current_deoptimizer(Deoptimizer * deoptimizer)1049   void set_current_deoptimizer(Deoptimizer* deoptimizer) {
1050     DCHECK_NULL(current_deoptimizer_);
1051     DCHECK_NOT_NULL(deoptimizer);
1052     current_deoptimizer_ = deoptimizer;
1053   }
deoptimizer_lazy_throw()1054   bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; }
set_deoptimizer_lazy_throw(bool value)1055   void set_deoptimizer_lazy_throw(bool value) {
1056     deoptimizer_lazy_throw_ = value;
1057   }
1058   void InitializeThreadLocal();
thread_local_top()1059   ThreadLocalTop* thread_local_top() {
1060     return &isolate_data_.thread_local_top_;
1061   }
thread_local_top()1062   ThreadLocalTop const* thread_local_top() const {
1063     return &isolate_data_.thread_local_top_;
1064   }
1065 
thread_in_wasm_flag_address_offset()1066   static uint32_t thread_in_wasm_flag_address_offset() {
1067     // For WebAssembly trap handlers there is a flag in thread-local storage
1068     // which indicates that the executing thread executes WebAssembly code. To
1069     // access this flag directly from generated code, we store a pointer to the
1070     // flag in ThreadLocalTop in thread_in_wasm_flag_address_. This function
1071     // here returns the offset of that member from {isolate_root()}.
1072     return static_cast<uint32_t>(
1073         OFFSET_OF(Isolate, thread_local_top()->thread_in_wasm_flag_address_) -
1074         isolate_root_bias());
1075   }
1076 
materialized_object_store()1077   MaterializedObjectStore* materialized_object_store() {
1078     return materialized_object_store_;
1079   }
1080 
descriptor_lookup_cache()1081   DescriptorLookupCache* descriptor_lookup_cache() {
1082     return descriptor_lookup_cache_;
1083   }
1084 
handle_scope_data()1085   HandleScopeData* handle_scope_data() { return &handle_scope_data_; }
1086 
handle_scope_implementer()1087   HandleScopeImplementer* handle_scope_implementer() {
1088     DCHECK(handle_scope_implementer_);
1089     return handle_scope_implementer_;
1090   }
1091 
unicode_cache()1092   UnicodeCache* unicode_cache() { return unicode_cache_; }
1093 
inner_pointer_to_code_cache()1094   InnerPointerToCodeCache* inner_pointer_to_code_cache() {
1095     return inner_pointer_to_code_cache_;
1096   }
1097 
global_handles()1098   GlobalHandles* global_handles() { return global_handles_; }
1099 
eternal_handles()1100   EternalHandles* eternal_handles() { return eternal_handles_; }
1101 
thread_manager()1102   ThreadManager* thread_manager() { return thread_manager_; }
1103 
1104 #ifndef V8_INTL_SUPPORT
jsregexp_uncanonicalize()1105   unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
1106     return &jsregexp_uncanonicalize_;
1107   }
1108 
jsregexp_canonrange()1109   unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
1110     return &jsregexp_canonrange_;
1111   }
1112 
1113   unibrow::Mapping<unibrow::Ecma262Canonicalize>*
regexp_macro_assembler_canonicalize()1114   regexp_macro_assembler_canonicalize() {
1115     return &regexp_macro_assembler_canonicalize_;
1116   }
1117 #endif  // !V8_INTL_SUPPORT
1118 
runtime_state()1119   RuntimeState* runtime_state() { return &runtime_state_; }
1120 
builtins()1121   Builtins* builtins() { return &builtins_; }
1122 
regexp_stack()1123   RegExpStack* regexp_stack() { return regexp_stack_; }
1124 
total_regexp_code_generated()1125   size_t total_regexp_code_generated() { return total_regexp_code_generated_; }
1126   void IncreaseTotalRegexpCodeGenerated(Handle<HeapObject> code);
1127 
regexp_indices()1128   std::vector<int>* regexp_indices() { return &regexp_indices_; }
1129 
debug()1130   Debug* debug() { return debug_; }
1131 
is_profiling_address()1132   void* is_profiling_address() { return &is_profiling_; }
1133 
is_profiling()1134   bool is_profiling() const {
1135     return is_profiling_.load(std::memory_order_relaxed);
1136   }
1137 
set_is_profiling(bool enabled)1138   void set_is_profiling(bool enabled) {
1139     is_profiling_.store(enabled, std::memory_order_relaxed);
1140   }
1141 
code_event_dispatcher()1142   CodeEventDispatcher* code_event_dispatcher() const {
1143     return code_event_dispatcher_.get();
1144   }
heap_profiler()1145   HeapProfiler* heap_profiler() const { return heap_profiler_; }
1146 
1147 #ifdef DEBUG
non_disposed_isolates()1148   static size_t non_disposed_isolates() { return non_disposed_isolates_; }
1149 #endif
1150 
factory()1151   v8::internal::Factory* factory() {
1152     // Upcast to the privately inherited base-class using c-style casts to avoid
1153     // undefined behavior (as static_cast cannot cast across private bases).
1154     // NOLINTNEXTLINE (google-readability-casting)
1155     return (v8::internal::Factory*)this;  // NOLINT(readability/casting)
1156   }
1157 
1158   static const int kJSRegexpStaticOffsetsVectorSize = 128;
1159 
THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope *,external_callback_scope)1160   THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
1161 
1162   THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
1163 
1164   void SetData(uint32_t slot, void* data) {
1165     DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
1166     isolate_data_.embedder_data_[slot] = data;
1167   }
GetData(uint32_t slot)1168   void* GetData(uint32_t slot) {
1169     DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
1170     return isolate_data_.embedder_data_[slot];
1171   }
1172 
serializer_enabled()1173   bool serializer_enabled() const { return serializer_enabled_; }
1174 
enable_serializer()1175   void enable_serializer() { serializer_enabled_ = true; }
1176 
snapshot_available()1177   bool snapshot_available() const {
1178     return snapshot_blob_ != nullptr && snapshot_blob_->raw_size != 0;
1179   }
1180 
IsDead()1181   bool IsDead() { return has_fatal_error_; }
SignalFatalError()1182   void SignalFatalError() { has_fatal_error_ = true; }
1183 
1184   bool use_optimizer();
1185 
initialized_from_snapshot()1186   bool initialized_from_snapshot() { return initialized_from_snapshot_; }
1187 
1188   bool NeedsSourcePositionsForProfiling() const;
1189 
1190   bool NeedsDetailedOptimizedCodeLineInfo() const;
1191 
is_best_effort_code_coverage()1192   bool is_best_effort_code_coverage() const {
1193     return code_coverage_mode() == debug::CoverageMode::kBestEffort;
1194   }
1195 
is_precise_count_code_coverage()1196   bool is_precise_count_code_coverage() const {
1197     return code_coverage_mode() == debug::CoverageMode::kPreciseCount;
1198   }
1199 
is_precise_binary_code_coverage()1200   bool is_precise_binary_code_coverage() const {
1201     return code_coverage_mode() == debug::CoverageMode::kPreciseBinary;
1202   }
1203 
is_block_count_code_coverage()1204   bool is_block_count_code_coverage() const {
1205     return code_coverage_mode() == debug::CoverageMode::kBlockCount;
1206   }
1207 
is_block_binary_code_coverage()1208   bool is_block_binary_code_coverage() const {
1209     return code_coverage_mode() == debug::CoverageMode::kBlockBinary;
1210   }
1211 
is_block_code_coverage()1212   bool is_block_code_coverage() const {
1213     return is_block_count_code_coverage() || is_block_binary_code_coverage();
1214   }
1215 
is_binary_code_coverage()1216   bool is_binary_code_coverage() const {
1217     return is_precise_binary_code_coverage() || is_block_binary_code_coverage();
1218   }
1219 
is_count_code_coverage()1220   bool is_count_code_coverage() const {
1221     return is_precise_count_code_coverage() || is_block_count_code_coverage();
1222   }
1223 
is_collecting_type_profile()1224   bool is_collecting_type_profile() const {
1225     return type_profile_mode() == debug::TypeProfileMode::kCollect;
1226   }
1227 
1228   // Collect feedback vectors with data for code coverage or type profile.
1229   // Reset the list, when both code coverage and type profile are not
1230   // needed anymore. This keeps many feedback vectors alive, but code
1231   // coverage or type profile are used for debugging only and increase in
1232   // memory usage is expected.
1233   void SetFeedbackVectorsForProfilingTools(Object value);
1234 
1235   void MaybeInitializeVectorListFromHeap();
1236 
time_millis_since_init()1237   double time_millis_since_init() {
1238     return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
1239   }
1240 
date_cache()1241   DateCache* date_cache() { return date_cache_; }
1242 
1243   void set_date_cache(DateCache* date_cache);
1244 
1245 #ifdef V8_INTL_SUPPORT
1246 
default_locale()1247   const std::string& default_locale() { return default_locale_; }
1248 
ResetDefaultLocale()1249   void ResetDefaultLocale() { default_locale_.clear(); }
1250 
set_default_locale(const std::string & locale)1251   void set_default_locale(const std::string& locale) {
1252     DCHECK_EQ(default_locale_.length(), 0);
1253     default_locale_ = locale;
1254   }
1255 
1256   // enum to access the icu object cache.
1257   enum class ICUObjectCacheType{
1258       kDefaultCollator, kDefaultNumberFormat, kDefaultSimpleDateFormat,
1259       kDefaultSimpleDateFormatForTime, kDefaultSimpleDateFormatForDate};
1260 
1261   icu::UMemory* get_cached_icu_object(ICUObjectCacheType cache_type);
1262   void set_icu_object_in_cache(ICUObjectCacheType cache_type,
1263                                std::shared_ptr<icu::UMemory> obj);
1264   void clear_cached_icu_object(ICUObjectCacheType cache_type);
1265   void ClearCachedIcuObjects();
1266 
1267 #endif  // V8_INTL_SUPPORT
1268 
1269   enum class KnownPrototype { kNone, kObject, kArray, kString };
1270 
1271   KnownPrototype IsArrayOrObjectOrStringPrototype(Object object);
1272 
1273   // On intent to set an element in object, make sure that appropriate
1274   // notifications occur if the set is on the elements of the array or
1275   // object prototype. Also ensure that changes to prototype chain between
1276   // Array and Object fire notifications.
1277   void UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object);
UpdateNoElementsProtectorOnSetLength(Handle<JSObject> object)1278   void UpdateNoElementsProtectorOnSetLength(Handle<JSObject> object) {
1279     UpdateNoElementsProtectorOnSetElement(object);
1280   }
UpdateNoElementsProtectorOnSetPrototype(Handle<JSObject> object)1281   void UpdateNoElementsProtectorOnSetPrototype(Handle<JSObject> object) {
1282     UpdateNoElementsProtectorOnSetElement(object);
1283   }
UpdateNoElementsProtectorOnNormalizeElements(Handle<JSObject> object)1284   void UpdateNoElementsProtectorOnNormalizeElements(Handle<JSObject> object) {
1285     UpdateNoElementsProtectorOnSetElement(object);
1286   }
1287 
1288   // Returns true if array is the initial array prototype in any native context.
1289   bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
1290 
1291   std::unique_ptr<PersistentHandles> NewPersistentHandles();
1292 
persistent_handles_list()1293   PersistentHandlesList* persistent_handles_list() {
1294     return persistent_handles_list_.get();
1295   }
1296 
1297 #ifdef DEBUG
1298   bool IsDeferredHandle(Address* location);
1299 #endif  // DEBUG
1300 
concurrent_recompilation_enabled()1301   bool concurrent_recompilation_enabled() {
1302     // Thread is only available with flag enabled.
1303     DCHECK(optimizing_compile_dispatcher_ == nullptr ||
1304            FLAG_concurrent_recompilation);
1305     return optimizing_compile_dispatcher_ != nullptr;
1306   }
1307 
optimizing_compile_dispatcher()1308   OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
1309     return optimizing_compile_dispatcher_;
1310   }
1311   // Flushes all pending concurrent optimzation jobs from the optimizing
1312   // compile dispatcher's queue.
1313   void AbortConcurrentOptimization(BlockingBehavior blocking_behavior);
1314 
id()1315   int id() const { return id_; }
1316 
1317   CompilationStatistics* GetTurboStatistics();
1318   CodeTracer* GetCodeTracer();
1319 
1320   void DumpAndResetStats();
1321 
stress_deopt_count_address()1322   void* stress_deopt_count_address() { return &stress_deopt_count_; }
1323 
set_force_slow_path(bool v)1324   void set_force_slow_path(bool v) { force_slow_path_ = v; }
force_slow_path()1325   bool force_slow_path() const { return force_slow_path_; }
force_slow_path_address()1326   bool* force_slow_path_address() { return &force_slow_path_; }
1327 
debug_execution_mode_address()1328   DebugInfo::ExecutionMode* debug_execution_mode_address() {
1329     return &debug_execution_mode_;
1330   }
1331 
1332   base::RandomNumberGenerator* random_number_generator();
1333 
1334   base::RandomNumberGenerator* fuzzer_rng();
1335 
1336   // Generates a random number that is non-zero when masked
1337   // with the provided mask.
1338   int GenerateIdentityHash(uint32_t mask);
1339 
1340   // Given an address occupied by a live code object, return that object.
1341   Code FindCodeObject(Address a);
1342 
NextOptimizationId()1343   int NextOptimizationId() {
1344     int id = next_optimization_id_++;
1345     if (!Smi::IsValid(next_optimization_id_)) {
1346       next_optimization_id_ = 0;
1347     }
1348     return id;
1349   }
1350 
1351   void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, void* data);
1352   void RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
1353                                    size_t heap_limit);
1354   void AddCallCompletedCallback(CallCompletedCallback callback);
1355   void RemoveCallCompletedCallback(CallCompletedCallback callback);
1356   void FireCallCompletedCallback(MicrotaskQueue* microtask_queue);
1357 
1358   void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
1359   void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
1360   inline void FireBeforeCallEnteredCallback();
1361 
1362   void SetPromiseRejectCallback(PromiseRejectCallback callback);
1363   void ReportPromiseReject(Handle<JSPromise> promise, Handle<Object> value,
1364                            v8::PromiseRejectEvent event);
1365 
1366   void SetTerminationOnExternalTryCatch();
1367 
1368   Handle<Symbol> SymbolFor(RootIndex dictionary_index, Handle<String> name,
1369                            bool private_symbol);
1370 
1371   void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
1372   void CountUsage(v8::Isolate::UseCounterFeature feature);
1373 
1374   static std::string GetTurboCfgFileName(Isolate* isolate);
1375 
1376   int GetNextScriptId();
1377 
1378 #if V8_SFI_HAS_UNIQUE_ID
GetNextUniqueSharedFunctionInfoId()1379   int GetNextUniqueSharedFunctionInfoId() {
1380     int current_id = next_unique_sfi_id_.load(std::memory_order_relaxed);
1381     int next_id;
1382     do {
1383       if (current_id >= Smi::kMaxValue) {
1384         next_id = 0;
1385       } else {
1386         next_id = current_id + 1;
1387       }
1388     } while (!next_unique_sfi_id_.compare_exchange_weak(
1389         current_id, next_id, std::memory_order_relaxed));
1390     return current_id;
1391   }
1392 #endif
1393 
promise_hook_address()1394   Address promise_hook_address() {
1395     return reinterpret_cast<Address>(&promise_hook_);
1396   }
1397 
async_event_delegate_address()1398   Address async_event_delegate_address() {
1399     return reinterpret_cast<Address>(&async_event_delegate_);
1400   }
1401 
promise_hook_or_async_event_delegate_address()1402   Address promise_hook_or_async_event_delegate_address() {
1403     return reinterpret_cast<Address>(&promise_hook_or_async_event_delegate_);
1404   }
1405 
promise_hook_or_debug_is_active_or_async_event_delegate_address()1406   Address promise_hook_or_debug_is_active_or_async_event_delegate_address() {
1407     return reinterpret_cast<Address>(
1408         &promise_hook_or_debug_is_active_or_async_event_delegate_);
1409   }
1410 
handle_scope_implementer_address()1411   Address handle_scope_implementer_address() {
1412     return reinterpret_cast<Address>(&handle_scope_implementer_);
1413   }
1414 
1415   void SetAtomicsWaitCallback(v8::Isolate::AtomicsWaitCallback callback,
1416                               void* data);
1417   void RunAtomicsWaitCallback(v8::Isolate::AtomicsWaitEvent event,
1418                               Handle<JSArrayBuffer> array_buffer,
1419                               size_t offset_in_bytes, int64_t value,
1420                               double timeout_in_ms,
1421                               AtomicsWaitWakeHandle* stop_handle);
1422 
1423   void SetPromiseHook(PromiseHook hook);
1424   void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
1425                       Handle<Object> parent);
1426   void PromiseHookStateUpdated();
1427 
1428   void AddDetachedContext(Handle<Context> context);
1429   void CheckDetachedContextsAfterGC();
1430 
1431   void AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object);
1432 
startup_object_cache()1433   std::vector<Object>* startup_object_cache() { return &startup_object_cache_; }
1434 
IsGeneratingEmbeddedBuiltins()1435   bool IsGeneratingEmbeddedBuiltins() const {
1436     return builtins_constants_table_builder() != nullptr;
1437   }
1438 
builtins_constants_table_builder()1439   BuiltinsConstantsTableBuilder* builtins_constants_table_builder() const {
1440     return builtins_constants_table_builder_;
1441   }
1442 
1443   // Hashes bits of the Isolate that are relevant for embedded builtins. In
1444   // particular, the embedded blob requires builtin Code object layout and the
1445   // builtins constants table to remain unchanged from build-time.
1446   size_t HashIsolateForEmbeddedBlob();
1447 
1448   static const uint8_t* CurrentEmbeddedBlobCode();
1449   static uint32_t CurrentEmbeddedBlobCodeSize();
1450   static const uint8_t* CurrentEmbeddedBlobData();
1451   static uint32_t CurrentEmbeddedBlobDataSize();
1452   static bool CurrentEmbeddedBlobIsBinaryEmbedded();
1453 
1454   // These always return the same result as static methods above, but don't
1455   // access the global atomic variable (and thus *might be* slightly faster).
1456   const uint8_t* embedded_blob_code() const;
1457   uint32_t embedded_blob_code_size() const;
1458   const uint8_t* embedded_blob_data() const;
1459   uint32_t embedded_blob_data_size() const;
1460 
set_array_buffer_allocator(v8::ArrayBuffer::Allocator * allocator)1461   void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
1462     array_buffer_allocator_ = allocator;
1463   }
array_buffer_allocator()1464   v8::ArrayBuffer::Allocator* array_buffer_allocator() const {
1465     return array_buffer_allocator_;
1466   }
1467 
set_array_buffer_allocator_shared(std::shared_ptr<v8::ArrayBuffer::Allocator> allocator)1468   void set_array_buffer_allocator_shared(
1469       std::shared_ptr<v8::ArrayBuffer::Allocator> allocator) {
1470     array_buffer_allocator_shared_ = std::move(allocator);
1471   }
array_buffer_allocator_shared()1472   std::shared_ptr<v8::ArrayBuffer::Allocator> array_buffer_allocator_shared()
1473       const {
1474     return array_buffer_allocator_shared_;
1475   }
1476 
futex_wait_list_node()1477   FutexWaitListNode* futex_wait_list_node() { return &futex_wait_list_node_; }
1478 
cancelable_task_manager()1479   CancelableTaskManager* cancelable_task_manager() {
1480     return cancelable_task_manager_;
1481   }
1482 
ast_string_constants()1483   const AstStringConstants* ast_string_constants() const {
1484     return ast_string_constants_;
1485   }
1486 
interpreter()1487   interpreter::Interpreter* interpreter() const { return interpreter_; }
1488 
compiler_cache()1489   compiler::PerIsolateCompilerCache* compiler_cache() const {
1490     return compiler_cache_;
1491   }
set_compiler_utils(compiler::PerIsolateCompilerCache * cache,Zone * zone)1492   void set_compiler_utils(compiler::PerIsolateCompilerCache* cache,
1493                           Zone* zone) {
1494     compiler_cache_ = cache;
1495     compiler_zone_ = zone;
1496   }
1497 
allocator()1498   AccountingAllocator* allocator() { return allocator_; }
1499 
compiler_dispatcher()1500   CompilerDispatcher* compiler_dispatcher() const {
1501     return compiler_dispatcher_;
1502   }
1503 
1504   bool IsInAnyContext(Object object, uint32_t index);
1505 
1506   void ClearKeptObjects();
1507 
1508   void SetHostImportModuleDynamicallyCallback(
1509       HostImportModuleDynamicallyCallback callback);
1510   MaybeHandle<JSPromise> RunHostImportModuleDynamicallyCallback(
1511       Handle<Script> referrer, Handle<Object> specifier);
1512 
1513   void SetHostInitializeImportMetaObjectCallback(
1514       HostInitializeImportMetaObjectCallback callback);
1515   MaybeHandle<JSObject> RunHostInitializeImportMetaObjectCallback(
1516       Handle<SourceTextModule> module);
1517 
RegisterEmbeddedFileWriter(EmbeddedFileWriterInterface * writer)1518   void RegisterEmbeddedFileWriter(EmbeddedFileWriterInterface* writer) {
1519     embedded_file_writer_ = writer;
1520   }
1521 
1522   int LookupOrAddExternallyCompiledFilename(const char* filename);
1523   const char* GetExternallyCompiledFilename(int index) const;
1524   int GetExternallyCompiledFilenameCount() const;
1525   // PrepareBuiltinSourcePositionMap is necessary in order to preserve the
1526   // builtin source positions before the corresponding code objects are
1527   // replaced with trampolines. Those source positions are used to
1528   // annotate the builtin blob with debugging information.
1529   void PrepareBuiltinSourcePositionMap();
1530 
1531   // Store the position of the labels that will be used in the list of allowed
1532   // return addresses.
1533   void PrepareBuiltinLabelInfoMap();
1534 
1535 #if defined(V8_OS_WIN64)
1536   void SetBuiltinUnwindData(
1537       int builtin_index,
1538       const win64_unwindinfo::BuiltinUnwindInfo& unwinding_info);
1539 #endif  // V8_OS_WIN64
1540 
1541   void SetPrepareStackTraceCallback(PrepareStackTraceCallback callback);
1542   MaybeHandle<Object> RunPrepareStackTraceCallback(Handle<Context>,
1543                                                    Handle<JSObject> Error,
1544                                                    Handle<JSArray> sites);
1545   bool HasPrepareStackTraceCallback() const;
1546 
1547   void SetAddCrashKeyCallback(AddCrashKeyCallback callback);
AddCrashKey(CrashKeyId id,const std::string & value)1548   void AddCrashKey(CrashKeyId id, const std::string& value) {
1549     if (add_crash_key_callback_) {
1550       add_crash_key_callback_(id, value);
1551     }
1552   }
1553 
1554   void SetRAILMode(RAILMode rail_mode);
1555 
rail_mode()1556   RAILMode rail_mode() { return rail_mode_.load(); }
1557 
1558   double LoadStartTimeMs();
1559 
1560   void IsolateInForegroundNotification();
1561 
1562   void IsolateInBackgroundNotification();
1563 
IsIsolateInBackground()1564   bool IsIsolateInBackground() { return is_isolate_in_background_; }
1565 
EnableMemorySavingsMode()1566   void EnableMemorySavingsMode() { memory_savings_mode_active_ = true; }
1567 
DisableMemorySavingsMode()1568   void DisableMemorySavingsMode() { memory_savings_mode_active_ = false; }
1569 
IsMemorySavingsModeActive()1570   bool IsMemorySavingsModeActive() { return memory_savings_mode_active_; }
1571 
1572   PRINTF_FORMAT(2, 3) void PrintWithTimestamp(const char* format, ...);
1573 
set_allow_atomics_wait(bool set)1574   void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
allow_atomics_wait()1575   bool allow_atomics_wait() { return allow_atomics_wait_; }
1576 
1577   // Register a finalizer to be called at isolate teardown.
1578   void RegisterManagedPtrDestructor(ManagedPtrDestructor* finalizer);
1579 
1580   // Removes a previously-registered shared object finalizer.
1581   void UnregisterManagedPtrDestructor(ManagedPtrDestructor* finalizer);
1582 
elements_deletion_counter()1583   size_t elements_deletion_counter() { return elements_deletion_counter_; }
set_elements_deletion_counter(size_t value)1584   void set_elements_deletion_counter(size_t value) {
1585     elements_deletion_counter_ = value;
1586   }
1587 
wasm_engine()1588   wasm::WasmEngine* wasm_engine() const { return wasm_engine_.get(); }
1589   void SetWasmEngine(std::shared_ptr<wasm::WasmEngine> engine);
1590 
top_backup_incumbent_scope()1591   const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope() const {
1592     return top_backup_incumbent_scope_;
1593   }
set_top_backup_incumbent_scope(const v8::Context::BackupIncumbentScope * top_backup_incumbent_scope)1594   void set_top_backup_incumbent_scope(
1595       const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope) {
1596     top_backup_incumbent_scope_ = top_backup_incumbent_scope;
1597   }
1598 
1599   void SetIdle(bool is_idle);
1600 
1601   // Changing various modes can cause differences in generated bytecode which
1602   // interferes with lazy source positions, so this should be called immediately
1603   // before such a mode change to ensure that this cannot happen.
1604   void CollectSourcePositionsForAllBytecodeArrays();
1605 
1606   void AddCodeMemoryChunk(MemoryChunk* chunk);
1607   void RemoveCodeMemoryChunk(MemoryChunk* chunk);
1608   void AddCodeRange(Address begin, size_t length_in_bytes);
1609 
1610   bool RequiresCodeRange() const;
1611 
1612   static Address load_from_stack_count_address(const char* function_name);
1613   static Address store_to_stack_count_address(const char* function_name);
1614 
1615   v8::metrics::Recorder::ContextId GetOrRegisterRecorderContextId(
1616       Handle<NativeContext> context);
1617   MaybeLocal<v8::Context> GetContextFromRecorderContextId(
1618       v8::metrics::Recorder::ContextId id);
1619 
1620 #ifdef V8_HEAP_SANDBOX
external_pointer_table()1621   ExternalPointerTable& external_pointer_table() {
1622     return isolate_data_.external_pointer_table_;
1623   }
1624 
external_pointer_table()1625   const ExternalPointerTable& external_pointer_table() const {
1626     return isolate_data_.external_pointer_table_;
1627   }
1628 
external_pointer_table_address()1629   Address external_pointer_table_address() {
1630     return reinterpret_cast<Address>(&isolate_data_.external_pointer_table_);
1631   }
1632 #endif
1633 
1634  private:
1635   explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator);
1636   ~Isolate();
1637 
1638   bool Init(SnapshotData* startup_snapshot_data,
1639             SnapshotData* read_only_snapshot_data, bool can_rehash);
1640 
1641   void CheckIsolateLayout();
1642 
1643   void InitializeCodeRanges();
1644   void AddCodeMemoryRange(MemoryRange range);
1645 
1646   static void RemoveContextIdCallback(const v8::WeakCallbackInfo<void>& data);
1647 
1648   class ThreadDataTable {
1649    public:
1650     ThreadDataTable() = default;
1651 
1652     PerIsolateThreadData* Lookup(ThreadId thread_id);
1653     void Insert(PerIsolateThreadData* data);
1654     void Remove(PerIsolateThreadData* data);
1655     void RemoveAllThreads();
1656 
1657    private:
1658     struct Hasher {
operatorHasher1659       std::size_t operator()(const ThreadId& t) const {
1660         return std::hash<int>()(t.ToInteger());
1661       }
1662     };
1663 
1664     std::unordered_map<ThreadId, PerIsolateThreadData*, Hasher> table_;
1665   };
1666 
1667   // These items form a stack synchronously with threads Enter'ing and Exit'ing
1668   // the Isolate. The top of the stack points to a thread which is currently
1669   // running the Isolate. When the stack is empty, the Isolate is considered
1670   // not entered by any thread and can be Disposed.
1671   // If the same thread enters the Isolate more than once, the entry_count_
1672   // is incremented rather then a new item pushed to the stack.
1673   class EntryStackItem {
1674    public:
EntryStackItem(PerIsolateThreadData * previous_thread_data,Isolate * previous_isolate,EntryStackItem * previous_item)1675     EntryStackItem(PerIsolateThreadData* previous_thread_data,
1676                    Isolate* previous_isolate, EntryStackItem* previous_item)
1677         : entry_count(1),
1678           previous_thread_data(previous_thread_data),
1679           previous_isolate(previous_isolate),
1680           previous_item(previous_item) {}
1681 
1682     int entry_count;
1683     PerIsolateThreadData* previous_thread_data;
1684     Isolate* previous_isolate;
1685     EntryStackItem* previous_item;
1686 
1687    private:
1688     DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
1689   };
1690 
1691   static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
1692   static base::Thread::LocalStorageKey isolate_key_;
1693 
1694 #ifdef DEBUG
1695   static std::atomic<bool> isolate_key_created_;
1696 #endif
1697 
1698   void Deinit();
1699 
1700   static void SetIsolateThreadLocals(Isolate* isolate,
1701                                      PerIsolateThreadData* data);
1702 
1703   void MarkCompactPrologue(bool is_compacting,
1704                            ThreadLocalTop* archived_thread_data);
1705   void MarkCompactEpilogue(bool is_compacting,
1706                            ThreadLocalTop* archived_thread_data);
1707 
1708   void FillCache();
1709 
1710   // Propagate pending exception message to the v8::TryCatch.
1711   // If there is no external try-catch or message was successfully propagated,
1712   // then return true.
1713   bool PropagatePendingExceptionToExternalTryCatch();
1714 
1715   void RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
1716                                            Handle<JSPromise> promise);
1717 
RAILModeName(RAILMode rail_mode)1718   const char* RAILModeName(RAILMode rail_mode) const {
1719     switch (rail_mode) {
1720       case PERFORMANCE_RESPONSE:
1721         return "RESPONSE";
1722       case PERFORMANCE_ANIMATION:
1723         return "ANIMATION";
1724       case PERFORMANCE_IDLE:
1725         return "IDLE";
1726       case PERFORMANCE_LOAD:
1727         return "LOAD";
1728     }
1729     return "";
1730   }
1731 
1732   void AddCrashKeysForIsolateAndHeapPointers();
1733 
1734   // Returns the Exception sentinel.
1735   Object ThrowInternal(Object exception, MessageLocation* location);
1736 
1737   // This class contains a collection of data accessible from both C++ runtime
1738   // and compiled code (including assembly stubs, builtins, interpreter bytecode
1739   // handlers and optimized code).
1740   IsolateData isolate_data_;
1741 
1742   std::unique_ptr<IsolateAllocator> isolate_allocator_;
1743   Heap heap_;
1744   ReadOnlyHeap* read_only_heap_ = nullptr;
1745   std::shared_ptr<ReadOnlyArtifacts> artifacts_;
1746   std::unique_ptr<StringTable> string_table_;
1747 
1748   const int id_;
1749   EntryStackItem* entry_stack_ = nullptr;
1750   int stack_trace_nesting_level_ = 0;
1751   StringStream* incomplete_message_ = nullptr;
1752   Address isolate_addresses_[kIsolateAddressCount + 1] = {};
1753   Bootstrapper* bootstrapper_ = nullptr;
1754   RuntimeProfiler* runtime_profiler_ = nullptr;
1755   CompilationCache* compilation_cache_ = nullptr;
1756   std::shared_ptr<Counters> async_counters_;
1757   base::RecursiveMutex break_access_;
1758   base::SharedMutex feedback_vector_access_;
1759   base::SharedMutex string_access_;
1760   base::SharedMutex transition_array_access_;
1761   Logger* logger_ = nullptr;
1762   StubCache* load_stub_cache_ = nullptr;
1763   StubCache* store_stub_cache_ = nullptr;
1764   Deoptimizer* current_deoptimizer_ = nullptr;
1765   bool deoptimizer_lazy_throw_ = false;
1766   MaterializedObjectStore* materialized_object_store_ = nullptr;
1767   bool capture_stack_trace_for_uncaught_exceptions_ = false;
1768   int stack_trace_for_uncaught_exceptions_frame_limit_ = 0;
1769   StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_ =
1770       StackTrace::kOverview;
1771   DescriptorLookupCache* descriptor_lookup_cache_ = nullptr;
1772   HandleScopeData handle_scope_data_;
1773   HandleScopeImplementer* handle_scope_implementer_ = nullptr;
1774   UnicodeCache* unicode_cache_ = nullptr;
1775   AccountingAllocator* allocator_ = nullptr;
1776   InnerPointerToCodeCache* inner_pointer_to_code_cache_ = nullptr;
1777   GlobalHandles* global_handles_ = nullptr;
1778   EternalHandles* eternal_handles_ = nullptr;
1779   ThreadManager* thread_manager_ = nullptr;
1780   RuntimeState runtime_state_;
1781   Builtins builtins_;
1782   SetupIsolateDelegate* setup_delegate_ = nullptr;
1783 #if defined(DEBUG) || defined(VERIFY_HEAP)
1784   std::atomic<int> num_active_deserializers_;
1785 #endif
1786 #ifndef V8_INTL_SUPPORT
1787   unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
1788   unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
1789   unibrow::Mapping<unibrow::Ecma262Canonicalize>
1790       regexp_macro_assembler_canonicalize_;
1791 #endif  // !V8_INTL_SUPPORT
1792   RegExpStack* regexp_stack_ = nullptr;
1793   std::vector<int> regexp_indices_;
1794   DateCache* date_cache_ = nullptr;
1795   base::RandomNumberGenerator* random_number_generator_ = nullptr;
1796   base::RandomNumberGenerator* fuzzer_rng_ = nullptr;
1797   std::atomic<RAILMode> rail_mode_;
1798   v8::Isolate::AtomicsWaitCallback atomics_wait_callback_ = nullptr;
1799   void* atomics_wait_callback_data_ = nullptr;
1800   PromiseHook promise_hook_ = nullptr;
1801   HostImportModuleDynamicallyCallback host_import_module_dynamically_callback_ =
1802       nullptr;
1803   HostInitializeImportMetaObjectCallback
1804       host_initialize_import_meta_object_callback_ = nullptr;
1805   base::Mutex rail_mutex_;
1806   double load_start_time_ms_ = 0;
1807 
1808 #ifdef V8_INTL_SUPPORT
1809   std::string default_locale_;
1810 
1811   struct ICUObjectCacheTypeHash {
operatorICUObjectCacheTypeHash1812     std::size_t operator()(ICUObjectCacheType a) const {
1813       return static_cast<std::size_t>(a);
1814     }
1815   };
1816   std::unordered_map<ICUObjectCacheType, std::shared_ptr<icu::UMemory>,
1817                      ICUObjectCacheTypeHash>
1818       icu_object_cache_;
1819 
1820 #endif  // V8_INTL_SUPPORT
1821 
1822   // true if being profiled. Causes collection of extra compile info.
1823   std::atomic<bool> is_profiling_{false};
1824 
1825   // Whether the isolate has been created for snapshotting.
1826   bool serializer_enabled_ = false;
1827 
1828   // True if fatal error has been signaled for this isolate.
1829   bool has_fatal_error_ = false;
1830 
1831   // True if this isolate was initialized from a snapshot.
1832   bool initialized_from_snapshot_ = false;
1833 
1834   // TODO(ishell): remove
1835   // True if ES2015 tail call elimination feature is enabled.
1836   bool is_tail_call_elimination_enabled_ = true;
1837 
1838   // True if the isolate is in background. This flag is used
1839   // to prioritize between memory usage and latency.
1840   bool is_isolate_in_background_ = false;
1841 
1842   // True if the isolate is in memory savings mode. This flag is used to
1843   // favor memory over runtime performance.
1844   bool memory_savings_mode_active_ = false;
1845 
1846   // Time stamp at initialization.
1847   double time_millis_at_init_ = 0;
1848 
1849 #ifdef DEBUG
1850   static std::atomic<size_t> non_disposed_isolates_;
1851 
1852   JSObject::SpillInformation js_spill_information_;
1853 #endif
1854 
1855   Debug* debug_ = nullptr;
1856   HeapProfiler* heap_profiler_ = nullptr;
1857   std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_;
1858 
1859   const AstStringConstants* ast_string_constants_ = nullptr;
1860 
1861   interpreter::Interpreter* interpreter_ = nullptr;
1862 
1863   compiler::PerIsolateCompilerCache* compiler_cache_ = nullptr;
1864   // The following zone is for compiler-related objects that should live
1865   // through all compilations (and thus all JSHeapBroker instances).
1866   Zone* compiler_zone_ = nullptr;
1867 
1868   CompilerDispatcher* compiler_dispatcher_ = nullptr;
1869 
1870   using InterruptEntry = std::pair<InterruptCallback, void*>;
1871   std::queue<InterruptEntry> api_interrupts_queue_;
1872 
1873 #define GLOBAL_BACKING_STORE(type, name, initialvalue) type name##_;
1874   ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
1875 #undef GLOBAL_BACKING_STORE
1876 
1877 #define GLOBAL_ARRAY_BACKING_STORE(type, name, length) type name##_[length];
1878   ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
1879 #undef GLOBAL_ARRAY_BACKING_STORE
1880 
1881 #ifdef DEBUG
1882   // This class is huge and has a number of fields controlled by
1883   // preprocessor defines. Make sure the offsets of these fields agree
1884   // between compilation units.
1885 #define ISOLATE_FIELD_OFFSET(type, name, ignored) \
1886   static const intptr_t name##_debug_offset_;
1887   ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
1888   ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
1889 #undef ISOLATE_FIELD_OFFSET
1890 #endif
1891 
1892   OptimizingCompileDispatcher* optimizing_compile_dispatcher_ = nullptr;
1893 
1894   std::unique_ptr<PersistentHandlesList> persistent_handles_list_;
1895 
1896   // Counts deopt points if deopt_every_n_times is enabled.
1897   unsigned int stress_deopt_count_ = 0;
1898 
1899   bool force_slow_path_ = false;
1900 
1901   bool jitless_ = false;
1902 
1903   int next_optimization_id_ = 0;
1904 
1905 #if V8_SFI_HAS_UNIQUE_ID
1906   std::atomic<int> next_unique_sfi_id_;
1907 #endif
1908 
1909   // Vector of callbacks before a Call starts execution.
1910   std::vector<BeforeCallEnteredCallback> before_call_entered_callbacks_;
1911 
1912   // Vector of callbacks when a Call completes.
1913   std::vector<CallCompletedCallback> call_completed_callbacks_;
1914 
1915   v8::Isolate::UseCounterCallback use_counter_callback_ = nullptr;
1916 
1917   std::shared_ptr<metrics::Recorder> metrics_recorder_;
1918   uintptr_t last_recorder_context_id_ = 0;
1919   std::unordered_map<
1920       uintptr_t,
1921       Persistent<v8::Context, v8::CopyablePersistentTraits<v8::Context>>>
1922       recorder_context_id_map_;
1923 
1924   std::vector<Object> startup_object_cache_;
1925 
1926   // Used during builtins compilation to build the builtins constants table,
1927   // which is stored on the root list prior to serialization.
1928   BuiltinsConstantsTableBuilder* builtins_constants_table_builder_ = nullptr;
1929 
1930   void InitializeDefaultEmbeddedBlob();
1931   void CreateAndSetEmbeddedBlob();
1932   void TearDownEmbeddedBlob();
1933 
1934   void SetEmbeddedBlob(const uint8_t* code, uint32_t code_size,
1935                        const uint8_t* data, uint32_t data_size);
1936   void ClearEmbeddedBlob();
1937 
1938   const uint8_t* embedded_blob_code_ = nullptr;
1939   uint32_t embedded_blob_code_size_ = 0;
1940   const uint8_t* embedded_blob_data_ = nullptr;
1941   uint32_t embedded_blob_data_size_ = 0;
1942 
1943   v8::ArrayBuffer::Allocator* array_buffer_allocator_ = nullptr;
1944   std::shared_ptr<v8::ArrayBuffer::Allocator> array_buffer_allocator_shared_;
1945 
1946   FutexWaitListNode futex_wait_list_node_;
1947 
1948   CancelableTaskManager* cancelable_task_manager_ = nullptr;
1949 
1950   debug::ConsoleDelegate* console_delegate_ = nullptr;
1951 
1952   debug::AsyncEventDelegate* async_event_delegate_ = nullptr;
1953   bool promise_hook_or_async_event_delegate_ = false;
1954   bool promise_hook_or_debug_is_active_or_async_event_delegate_ = false;
1955   int async_task_count_ = 0;
1956 
1957   v8::Isolate::AbortOnUncaughtExceptionCallback
1958       abort_on_uncaught_exception_callback_ = nullptr;
1959 
1960   bool allow_atomics_wait_ = true;
1961 
1962   base::Mutex managed_ptr_destructors_mutex_;
1963   ManagedPtrDestructor* managed_ptr_destructors_head_ = nullptr;
1964 
1965   size_t total_regexp_code_generated_ = 0;
1966 
1967   size_t elements_deletion_counter_ = 0;
1968 
1969   std::shared_ptr<wasm::WasmEngine> wasm_engine_;
1970 
1971   std::unique_ptr<TracingCpuProfilerImpl> tracing_cpu_profiler_;
1972 
1973   EmbeddedFileWriterInterface* embedded_file_writer_ = nullptr;
1974 
1975   // The top entry of the v8::Context::BackupIncumbentScope stack.
1976   const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope_ =
1977       nullptr;
1978 
1979   PrepareStackTraceCallback prepare_stack_trace_callback_ = nullptr;
1980 
1981   // TODO(kenton@cloudflare.com): This mutex can be removed if
1982   // thread_data_table_ is always accessed under the isolate lock. I do not
1983   // know if this is the case, so I'm preserving it for now.
1984   base::Mutex thread_data_table_mutex_;
1985   ThreadDataTable thread_data_table_;
1986 
1987   // A signal-safe vector of heap pages containing code. Used with the
1988   // v8::Unwinder API.
1989   std::atomic<std::vector<MemoryRange>*> code_pages_{nullptr};
1990   std::vector<MemoryRange> code_pages_buffer1_;
1991   std::vector<MemoryRange> code_pages_buffer2_;
1992 
1993   // Enables the host application to provide a mechanism for recording a
1994   // predefined set of data as crash keys to be used in postmortem debugging
1995   // in case of a crash.
1996   AddCrashKeyCallback add_crash_key_callback_ = nullptr;
1997 
1998   // Delete new/delete operators to ensure that Isolate::New() and
1999   // Isolate::Delete() are used for Isolate creation and deletion.
new(size_t,void * ptr)2000   void* operator new(size_t, void* ptr) { return ptr; }
2001 
2002   friend class heap::HeapTester;
2003   friend class TestSerializer;
2004 
2005   DISALLOW_COPY_AND_ASSIGN(Isolate);
2006 };
2007 
2008 #undef FIELD_ACCESSOR
2009 #undef THREAD_LOCAL_TOP_ACCESSOR
2010 
2011 class PromiseOnStack {
2012  public:
PromiseOnStack(Handle<JSObject> promise,PromiseOnStack * prev)2013   PromiseOnStack(Handle<JSObject> promise, PromiseOnStack* prev)
2014       : promise_(promise), prev_(prev) {}
promise()2015   Handle<JSObject> promise() { return promise_; }
prev()2016   PromiseOnStack* prev() { return prev_; }
2017 
2018  private:
2019   Handle<JSObject> promise_;
2020   PromiseOnStack* prev_;
2021 };
2022 
2023 // SaveContext scopes save the current context on the Isolate on creation, and
2024 // restore it on destruction.
2025 class V8_EXPORT_PRIVATE SaveContext {
2026  public:
2027   explicit SaveContext(Isolate* isolate);
2028 
2029   ~SaveContext();
2030 
context()2031   Handle<Context> context() { return context_; }
2032 
2033   // Returns true if this save context is below a given JavaScript frame.
2034   bool IsBelowFrame(CommonFrame* frame);
2035 
2036  private:
2037   Isolate* const isolate_;
2038   Handle<Context> context_;
2039   Address c_entry_fp_;
2040 };
2041 
2042 // Like SaveContext, but also switches the Context to a new one in the
2043 // constructor.
2044 class V8_EXPORT_PRIVATE SaveAndSwitchContext : public SaveContext {
2045  public:
2046   SaveAndSwitchContext(Isolate* isolate, Context new_context);
2047 };
2048 
2049 // A scope which sets the given isolate's context to null for its lifetime to
2050 // ensure that code does not make assumptions on a context being available.
2051 class NullContextScope : public SaveAndSwitchContext {
2052  public:
NullContextScope(Isolate * isolate)2053   explicit NullContextScope(Isolate* isolate)
2054       : SaveAndSwitchContext(isolate, Context()) {}
2055 };
2056 
2057 class AssertNoContextChange {
2058 #ifdef DEBUG
2059  public:
2060   explicit AssertNoContextChange(Isolate* isolate);
~AssertNoContextChange()2061   ~AssertNoContextChange() { DCHECK(isolate_->context() == *context_); }
2062 
2063  private:
2064   Isolate* isolate_;
2065   Handle<Context> context_;
2066 #else
2067  public:
2068   explicit AssertNoContextChange(Isolate* isolate) {}
2069 #endif
2070 };
2071 
2072 class ExecutionAccess {
2073  public:
ExecutionAccess(Isolate * isolate)2074   explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
2075     Lock(isolate);
2076   }
~ExecutionAccess()2077   ~ExecutionAccess() { Unlock(isolate_); }
2078 
Lock(Isolate * isolate)2079   static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); }
Unlock(Isolate * isolate)2080   static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); }
2081 
TryLock(Isolate * isolate)2082   static bool TryLock(Isolate* isolate) {
2083     return isolate->break_access()->TryLock();
2084   }
2085 
2086  private:
2087   Isolate* isolate_;
2088 };
2089 
2090 // Support for checking for stack-overflows.
2091 class StackLimitCheck {
2092  public:
StackLimitCheck(Isolate * isolate)2093   explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) {}
2094 
2095   // Use this to check for stack-overflows in C++ code.
HasOverflowed()2096   bool HasOverflowed() const {
2097     StackGuard* stack_guard = isolate_->stack_guard();
2098     return GetCurrentStackPosition() < stack_guard->real_climit();
2099   }
2100   static bool HasOverflowed(LocalIsolate* local_isolate);
2101 
2102   // Use this to check for interrupt request in C++ code.
InterruptRequested()2103   bool InterruptRequested() {
2104     StackGuard* stack_guard = isolate_->stack_guard();
2105     return GetCurrentStackPosition() < stack_guard->climit();
2106   }
2107 
2108   // Use this to check for stack-overflow when entering runtime from JS code.
2109   bool JsHasOverflowed(uintptr_t gap = 0) const;
2110 
2111  private:
2112   Isolate* isolate_;
2113 };
2114 
2115 #define STACK_CHECK(isolate, result_value) \
2116   do {                                     \
2117     StackLimitCheck stack_check(isolate);  \
2118     if (stack_check.HasOverflowed()) {     \
2119       isolate->StackOverflow();            \
2120       return result_value;                 \
2121     }                                      \
2122   } while (false)
2123 
2124 class StackTraceFailureMessage {
2125  public:
2126   explicit StackTraceFailureMessage(Isolate* isolate, void* ptr1 = nullptr,
2127                                     void* ptr2 = nullptr, void* ptr3 = nullptr,
2128                                     void* ptr4 = nullptr);
2129 
2130   V8_NOINLINE void Print() volatile;
2131 
2132   static const uintptr_t kStartMarker = 0xdecade30;
2133   static const uintptr_t kEndMarker = 0xdecade31;
2134   static const int kStacktraceBufferSize = 32 * KB;
2135 
2136   uintptr_t start_marker_ = kStartMarker;
2137   void* isolate_;
2138   void* ptr1_;
2139   void* ptr2_;
2140   void* ptr3_;
2141   void* ptr4_;
2142   void* code_objects_[4];
2143   char js_stack_trace_[kStacktraceBufferSize];
2144   uintptr_t end_marker_ = kEndMarker;
2145 };
2146 
2147 }  // namespace internal
2148 }  // namespace v8
2149 
2150 #endif  // V8_EXECUTION_ISOLATE_H_
2151