• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_COMMON_GLOBALS_H_
6 #define V8_COMMON_GLOBALS_H_
7 
8 #include <stddef.h>
9 #include <stdint.h>
10 
11 #include <limits>
12 #include <ostream>
13 
14 #include "include/v8-internal.h"
15 #include "src/base/atomic-utils.h"
16 #include "src/base/build_config.h"
17 #include "src/base/enum-set.h"
18 #include "src/base/flags.h"
19 #include "src/base/logging.h"
20 #include "src/base/macros.h"
21 
22 #define V8_INFINITY std::numeric_limits<double>::infinity()
23 
24 namespace v8 {
25 
26 namespace base {
27 class Mutex;
28 class RecursiveMutex;
29 }  // namespace base
30 
31 namespace internal {
32 
33 // Determine whether we are running in a simulated environment.
34 // Setting USE_SIMULATOR explicitly from the build script will force
35 // the use of a simulated environment.
36 #if !defined(USE_SIMULATOR)
37 #if (V8_TARGET_ARCH_ARM64 && !V8_HOST_ARCH_ARM64)
38 #define USE_SIMULATOR 1
39 #endif
40 #if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
41 #define USE_SIMULATOR 1
42 #endif
43 #if (V8_TARGET_ARCH_PPC && !V8_HOST_ARCH_PPC)
44 #define USE_SIMULATOR 1
45 #endif
46 #if (V8_TARGET_ARCH_PPC64 && !V8_HOST_ARCH_PPC64)
47 #define USE_SIMULATOR 1
48 #endif
49 #if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS)
50 #define USE_SIMULATOR 1
51 #endif
52 #if (V8_TARGET_ARCH_MIPS64 && !V8_HOST_ARCH_MIPS64)
53 #define USE_SIMULATOR 1
54 #endif
55 #if (V8_TARGET_ARCH_S390 && !V8_HOST_ARCH_S390)
56 #define USE_SIMULATOR 1
57 #endif
58 #if (V8_TARGET_ARCH_RISCV64 && !V8_HOST_ARCH_RISCV64)
59 #define USE_SIMULATOR 1
60 #endif
61 #if (V8_TARGET_ARCH_LOONG64 && !V8_HOST_ARCH_LOONG64)
62 #define USE_SIMULATOR 1
63 #endif
64 #endif
65 
66 // Determine whether the architecture uses an embedded constant pool
67 // (contiguous constant pool embedded in code object).
68 #if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
69 #define V8_EMBEDDED_CONSTANT_POOL true
70 #else
71 #define V8_EMBEDDED_CONSTANT_POOL false
72 #endif
73 
74 #if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
75 // Set stack limit lower for ARM and ARM64 than for other architectures because:
76 //  - on Arm stack allocating MacroAssembler takes 120K bytes.
77 //    See issue crbug.com/405338
78 //  - on Arm64 when running in single-process mode for Android WebView, when
79 //    initializing V8 we already have a large stack and so have to set the
80 //    limit lower. See issue crbug.com/v8/10575
81 #define V8_DEFAULT_STACK_SIZE_KB 864
82 #else
83 // Slightly less than 1MB, since Windows' default stack size for
84 // the main execution thread is 1MB for both 32 and 64-bit.
85 #define V8_DEFAULT_STACK_SIZE_KB 984
86 #endif
87 
88 // Helper macros to enable handling of direct C calls in the simulator.
89 #if defined(USE_SIMULATOR) && defined(V8_TARGET_ARCH_ARM64)
90 #define V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
91 #define V8_IF_USE_SIMULATOR(V) , V
92 #else
93 #define V8_IF_USE_SIMULATOR(V)
94 #endif  // defined(USE_SIMULATOR) && defined(V8_TARGET_ARCH_ARM64)
95 
96 // Minimum stack size in KB required by compilers.
97 constexpr int kStackSpaceRequiredForCompilation = 40;
98 
99 // In order to emit more efficient stack checks in optimized code,
100 // deoptimization may implicitly exceed the V8 stack limit by this many bytes.
101 // Stack checks in functions with `difference between optimized and unoptimized
102 // stack frame sizes <= slack` can simply emit the simple stack check.
103 constexpr int kStackLimitSlackForDeoptimizationInBytes = 256;
104 
105 // Sanity-check, assuming that we aim for a real OS stack size of at least 1MB.
106 STATIC_ASSERT(V8_DEFAULT_STACK_SIZE_KB* KB +
107                   kStackLimitSlackForDeoptimizationInBytes <=
108               MB);
109 
110 // The V8_ENABLE_NEAR_CODE_RANGE_BOOL enables logic that tries to allocate
111 // code range within a pc-relative call/jump proximity from embedded builtins.
112 // This machinery could help only when we have an opportunity to choose where
113 // to allocate code range and could benefit from it. This is the case for the
114 // following configurations:
115 // - external code space AND pointer compression are enabled,
116 // - short builtin calls feature is enabled while pointer compression is not.
117 #if (defined(V8_SHORT_BUILTIN_CALLS) && !defined(V8_COMPRESS_POINTERS)) || \
118     defined(V8_EXTERNAL_CODE_SPACE)
119 #define V8_ENABLE_NEAR_CODE_RANGE_BOOL true
120 #else
121 #define V8_ENABLE_NEAR_CODE_RANGE_BOOL false
122 #endif
123 
124 // This constant is used for detecting whether the machine has >= 4GB of
125 // physical memory by checking the max old space size.
126 const size_t kShortBuiltinCallsOldSpaceSizeThreshold = size_t{2} * GB;
127 
128 // Determine whether dict mode prototypes feature is enabled.
129 #ifdef V8_ENABLE_SWISS_NAME_DICTIONARY
130 #define V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL true
131 #else
132 #define V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL false
133 #endif
134 
135 // Determine whether dict property constness tracking feature is enabled.
136 #ifdef V8_DICT_PROPERTY_CONST_TRACKING
137 #define V8_DICT_PROPERTY_CONST_TRACKING_BOOL true
138 #else
139 #define V8_DICT_PROPERTY_CONST_TRACKING_BOOL false
140 #endif
141 
142 #ifdef V8_EXTERNAL_CODE_SPACE
143 #define V8_EXTERNAL_CODE_SPACE_BOOL true
144 class CodeDataContainer;
145 using CodeT = CodeDataContainer;
146 #else
147 #define V8_EXTERNAL_CODE_SPACE_BOOL false
148 class Code;
149 using CodeT = Code;
150 #endif
151 
152 // Determine whether tagged pointers are 8 bytes (used in Torque layouts for
153 // choosing where to insert padding).
154 #if V8_TARGET_ARCH_64_BIT && !defined(V8_COMPRESS_POINTERS)
155 #define TAGGED_SIZE_8_BYTES true
156 #else
157 #define TAGGED_SIZE_8_BYTES false
158 #endif
159 
160 // Some types of tracing require the SFI to store a unique ID.
161 #if defined(V8_TRACE_MAPS) || defined(V8_TRACE_UNOPTIMIZED)
162 #define V8_SFI_HAS_UNIQUE_ID true
163 #else
164 #define V8_SFI_HAS_UNIQUE_ID false
165 #endif
166 
167 #if defined(V8_OS_WIN) && defined(V8_TARGET_ARCH_X64)
168 #define V8_OS_WIN_X64 true
169 #endif
170 
171 #if defined(V8_OS_WIN) && defined(V8_TARGET_ARCH_ARM64)
172 #define V8_OS_WIN_ARM64 true
173 #endif
174 
175 #if defined(V8_OS_WIN_X64) || defined(V8_OS_WIN_ARM64)
176 #define V8_OS_WIN64 true
177 #endif
178 
179 // Superclass for classes only using static method functions.
180 // The subclass of AllStatic cannot be instantiated at all.
181 class AllStatic {
182 #ifdef DEBUG
183  public:
184   AllStatic() = delete;
185 #endif
186 };
187 
188 using byte = uint8_t;
189 
190 // -----------------------------------------------------------------------------
191 // Constants
192 
193 constexpr int kMaxInt = 0x7FFFFFFF;
194 constexpr int kMinInt = -kMaxInt - 1;
195 constexpr int kMaxInt8 = (1 << 7) - 1;
196 constexpr int kMinInt8 = -(1 << 7);
197 constexpr int kMaxUInt8 = (1 << 8) - 1;
198 constexpr int kMinUInt8 = 0;
199 constexpr int kMaxInt16 = (1 << 15) - 1;
200 constexpr int kMinInt16 = -(1 << 15);
201 constexpr int kMaxUInt16 = (1 << 16) - 1;
202 constexpr int kMinUInt16 = 0;
203 constexpr int kMaxInt31 = kMaxInt / 2;
204 constexpr int kMinInt31 = kMinInt / 2;
205 
206 constexpr uint32_t kMaxUInt32 = 0xFFFFFFFFu;
207 constexpr int kMinUInt32 = 0;
208 
209 constexpr int kInt8Size = sizeof(int8_t);
210 constexpr int kUInt8Size = sizeof(uint8_t);
211 constexpr int kByteSize = sizeof(byte);
212 constexpr int kCharSize = sizeof(char);
213 constexpr int kShortSize = sizeof(short);  // NOLINT
214 constexpr int kInt16Size = sizeof(int16_t);
215 constexpr int kUInt16Size = sizeof(uint16_t);
216 constexpr int kIntSize = sizeof(int);
217 constexpr int kInt32Size = sizeof(int32_t);
218 constexpr int kInt64Size = sizeof(int64_t);
219 constexpr int kUInt32Size = sizeof(uint32_t);
220 constexpr int kSizetSize = sizeof(size_t);
221 constexpr int kFloatSize = sizeof(float);
222 constexpr int kDoubleSize = sizeof(double);
223 constexpr int kIntptrSize = sizeof(intptr_t);
224 constexpr int kUIntptrSize = sizeof(uintptr_t);
225 constexpr int kSystemPointerSize = sizeof(void*);
226 constexpr int kSystemPointerHexDigits = kSystemPointerSize == 4 ? 8 : 12;
227 constexpr int kPCOnStackSize = kSystemPointerSize;
228 constexpr int kFPOnStackSize = kSystemPointerSize;
229 
230 #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
231 constexpr int kElidedFrameSlots = kPCOnStackSize / kSystemPointerSize;
232 #else
233 constexpr int kElidedFrameSlots = 0;
234 #endif
235 
236 constexpr int kDoubleSizeLog2 = 3;
237 // The maximal length of the string representation for a double value
238 // (e.g. "-2.2250738585072020E-308"). It is composed as follows:
239 // - 17 decimal digits, see base::kBase10MaximalLength (dtoa.h)
240 // - 1 sign
241 // - 1 decimal point
242 // - 1 E or e
243 // - 1 exponent sign
244 // - 3 exponent
245 constexpr int kMaxDoubleStringLength = 24;
246 
247 // Total wasm code space per engine (i.e. per process) is limited to make
248 // certain attacks that rely on heap spraying harder.
249 // Just below 4GB, such that {kMaxWasmCodeMemory} fits in a 32-bit size_t.
250 constexpr size_t kMaxWasmCodeMB = 4095;
251 constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB;
252 
253 #if V8_HOST_ARCH_64_BIT
254 constexpr int kSystemPointerSizeLog2 = 3;
255 constexpr intptr_t kIntptrSignBit =
256     static_cast<intptr_t>(uintptr_t{0x8000000000000000});
257 constexpr bool kPlatformRequiresCodeRange = true;
258 #if (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) && \
259     (V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64) && V8_OS_LINUX
260 constexpr size_t kMaximalCodeRangeSize = 512 * MB;
261 constexpr size_t kMinExpectedOSPageSize = 64 * KB;  // OS page on PPC Linux
262 #elif V8_TARGET_ARCH_ARM64
263 constexpr size_t kMaximalCodeRangeSize = 128 * MB;
264 constexpr size_t kMinExpectedOSPageSize = 4 * KB;  // OS page.
265 #else
266 constexpr size_t kMaximalCodeRangeSize = 128 * MB;
267 constexpr size_t kMinExpectedOSPageSize = 4 * KB;  // OS page.
268 #endif
269 #if V8_OS_WIN
270 constexpr size_t kMinimumCodeRangeSize = 4 * MB;
271 constexpr size_t kReservedCodeRangePages = 1;
272 #else
273 constexpr size_t kMinimumCodeRangeSize = 3 * MB;
274 constexpr size_t kReservedCodeRangePages = 0;
275 #endif
276 #else
277 constexpr int kSystemPointerSizeLog2 = 2;
278 constexpr intptr_t kIntptrSignBit = 0x80000000;
279 #if (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) && \
280     (V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64) && V8_OS_LINUX
281 constexpr bool kPlatformRequiresCodeRange = false;
282 constexpr size_t kMaximalCodeRangeSize = 0 * MB;
283 constexpr size_t kMinimumCodeRangeSize = 0 * MB;
284 constexpr size_t kMinExpectedOSPageSize = 64 * KB;  // OS page on PPC Linux
285 #elif V8_TARGET_ARCH_MIPS
286 constexpr bool kPlatformRequiresCodeRange = false;
287 constexpr size_t kMaximalCodeRangeSize = 2048LL * MB;
288 constexpr size_t kMinimumCodeRangeSize = 0 * MB;
289 constexpr size_t kMinExpectedOSPageSize = 4 * KB;  // OS page.
290 #else
291 constexpr bool kPlatformRequiresCodeRange = false;
292 constexpr size_t kMaximalCodeRangeSize = 0 * MB;
293 constexpr size_t kMinimumCodeRangeSize = 0 * MB;
294 constexpr size_t kMinExpectedOSPageSize = 4 * KB;  // OS page.
295 #endif
296 constexpr size_t kReservedCodeRangePages = 0;
297 #endif
298 
299 STATIC_ASSERT(kSystemPointerSize == (1 << kSystemPointerSizeLog2));
300 
301 #ifdef V8_COMPRESS_ZONES
302 #define COMPRESS_ZONES_BOOL true
303 #else
304 #define COMPRESS_ZONES_BOOL false
305 #endif  // V8_COMPRESS_ZONES
306 
307 // The flag controls whether zones pointer compression should be enabled for
308 // TurboFan graphs or not.
309 static constexpr bool kCompressGraphZone = COMPRESS_ZONES_BOOL;
310 
311 #ifdef V8_COMPRESS_POINTERS
312 static_assert(
313     kSystemPointerSize == kInt64Size,
314     "Pointer compression can be enabled only for 64-bit architectures");
315 
316 constexpr int kTaggedSize = kInt32Size;
317 constexpr int kTaggedSizeLog2 = 2;
318 
319 // These types define raw and atomic storage types for tagged values stored
320 // on V8 heap.
321 using Tagged_t = uint32_t;
322 using AtomicTagged_t = base::Atomic32;
323 
324 #else
325 
326 constexpr int kTaggedSize = kSystemPointerSize;
327 constexpr int kTaggedSizeLog2 = kSystemPointerSizeLog2;
328 
329 // These types define raw and atomic storage types for tagged values stored
330 // on V8 heap.
331 using Tagged_t = Address;
332 using AtomicTagged_t = base::AtomicWord;
333 
334 #endif  // V8_COMPRESS_POINTERS
335 
336 STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2));
337 STATIC_ASSERT((kTaggedSize == 8) == TAGGED_SIZE_8_BYTES);
338 
339 using AsAtomicTagged = base::AsAtomicPointerImpl<AtomicTagged_t>;
340 STATIC_ASSERT(sizeof(Tagged_t) == kTaggedSize);
341 STATIC_ASSERT(sizeof(AtomicTagged_t) == kTaggedSize);
342 
343 STATIC_ASSERT(kTaggedSize == kApiTaggedSize);
344 
345 // TODO(ishell): use kTaggedSize or kSystemPointerSize instead.
346 #ifndef V8_COMPRESS_POINTERS
347 constexpr int kPointerSize = kSystemPointerSize;
348 constexpr int kPointerSizeLog2 = kSystemPointerSizeLog2;
349 STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
350 #endif
351 
352 // This type defines raw storage type for external (or off-V8 heap) pointers
353 // stored on V8 heap.
354 constexpr int kExternalPointerSize = sizeof(ExternalPointer_t);
355 #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
356 STATIC_ASSERT(kExternalPointerSize == kTaggedSize);
357 #else
358 STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
359 #endif
360 
361 constexpr int kEmbedderDataSlotSize = kSystemPointerSize;
362 
363 constexpr int kEmbedderDataSlotSizeInTaggedSlots =
364     kEmbedderDataSlotSize / kTaggedSize;
365 STATIC_ASSERT(kEmbedderDataSlotSize >= kSystemPointerSize);
366 
367 constexpr int kExternalAllocationSoftLimit =
368     internal::Internals::kExternalAllocationSoftLimit;
369 
370 // Maximum object size that gets allocated into regular pages. Objects larger
371 // than that size are allocated in large object space and are never moved in
372 // memory. This also applies to new space allocation, since objects are never
373 // migrated from new space to large object space. Takes double alignment into
374 // account.
375 //
376 // Current value: half of the page size.
377 constexpr int kMaxRegularHeapObjectSize = (1 << (kPageSizeBits - 1));
378 
379 constexpr int kBitsPerByte = 8;
380 constexpr int kBitsPerByteLog2 = 3;
381 constexpr int kBitsPerSystemPointer = kSystemPointerSize * kBitsPerByte;
382 constexpr int kBitsPerSystemPointerLog2 =
383     kSystemPointerSizeLog2 + kBitsPerByteLog2;
384 constexpr int kBitsPerInt = kIntSize * kBitsPerByte;
385 
386 // IEEE 754 single precision floating point number bit layout.
387 constexpr uint32_t kBinary32SignMask = 0x80000000u;
388 constexpr uint32_t kBinary32ExponentMask = 0x7f800000u;
389 constexpr uint32_t kBinary32MantissaMask = 0x007fffffu;
390 constexpr int kBinary32ExponentBias = 127;
391 constexpr int kBinary32MaxExponent = 0xFE;
392 constexpr int kBinary32MinExponent = 0x01;
393 constexpr int kBinary32MantissaBits = 23;
394 constexpr int kBinary32ExponentShift = 23;
395 
396 // Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
397 // other bits set.
398 constexpr uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
399 
400 constexpr int kOneByteSize = kCharSize;
401 
402 // 128 bit SIMD value size.
403 constexpr int kSimd128Size = 16;
404 
405 // Maximum ordinal used for tracking asynchronous module evaluation order.
406 constexpr unsigned kMaxModuleAsyncEvaluatingOrdinal = (1 << 30) - 1;
407 
408 // FUNCTION_ADDR(f) gets the address of a C function f.
409 #define FUNCTION_ADDR(f) (reinterpret_cast<v8::internal::Address>(f))
410 
411 // FUNCTION_CAST<F>(addr) casts an address into a function
412 // of type F. Used to invoke generated code from within C.
413 template <typename F>
FUNCTION_CAST(byte * addr)414 F FUNCTION_CAST(byte* addr) {
415   return reinterpret_cast<F>(reinterpret_cast<Address>(addr));
416 }
417 
418 template <typename F>
FUNCTION_CAST(Address addr)419 F FUNCTION_CAST(Address addr) {
420   return reinterpret_cast<F>(addr);
421 }
422 
423 // Determine whether the architecture uses function descriptors
424 // which provide a level of indirection between the function pointer
425 // and the function entrypoint.
426 #if (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) &&                    \
427     (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN && \
428                    (!defined(_CALL_ELF) || _CALL_ELF == 1)))
429 #define USES_FUNCTION_DESCRIPTORS 1
430 #define FUNCTION_ENTRYPOINT_ADDRESS(f)       \
431   (reinterpret_cast<v8::internal::Address*>( \
432       &(reinterpret_cast<intptr_t*>(f)[0])))
433 #else
434 #define USES_FUNCTION_DESCRIPTORS 0
435 #endif
436 
StaticStringsEqual(const char * s1,const char * s2)437 constexpr bool StaticStringsEqual(const char* s1, const char* s2) {
438   for (;; ++s1, ++s2) {
439     if (*s1 != *s2) return false;
440     if (*s1 == '\0') return true;
441   }
442 }
443 
444 // -----------------------------------------------------------------------------
445 // Declarations for use in both the preparser and the rest of V8.
446 
447 // The Strict Mode (ECMA-262 5th edition, 4.2.2).
448 
449 enum class LanguageMode : bool { kSloppy, kStrict };
450 static const size_t LanguageModeSize = 2;
451 
hash_value(LanguageMode mode)452 inline size_t hash_value(LanguageMode mode) {
453   return static_cast<size_t>(mode);
454 }
455 
LanguageMode2String(LanguageMode mode)456 inline const char* LanguageMode2String(LanguageMode mode) {
457   switch (mode) {
458     case LanguageMode::kSloppy:
459       return "sloppy";
460     case LanguageMode::kStrict:
461       return "strict";
462   }
463   UNREACHABLE();
464 }
465 
466 inline std::ostream& operator<<(std::ostream& os, LanguageMode mode) {
467   return os << LanguageMode2String(mode);
468 }
469 
is_sloppy(LanguageMode language_mode)470 inline bool is_sloppy(LanguageMode language_mode) {
471   return language_mode == LanguageMode::kSloppy;
472 }
473 
is_strict(LanguageMode language_mode)474 inline bool is_strict(LanguageMode language_mode) {
475   return language_mode != LanguageMode::kSloppy;
476 }
477 
is_valid_language_mode(int language_mode)478 inline bool is_valid_language_mode(int language_mode) {
479   return language_mode == static_cast<int>(LanguageMode::kSloppy) ||
480          language_mode == static_cast<int>(LanguageMode::kStrict);
481 }
482 
construct_language_mode(bool strict_bit)483 inline LanguageMode construct_language_mode(bool strict_bit) {
484   return static_cast<LanguageMode>(strict_bit);
485 }
486 
487 // Return kStrict if either of the language modes is kStrict, or kSloppy
488 // otherwise.
stricter_language_mode(LanguageMode mode1,LanguageMode mode2)489 inline LanguageMode stricter_language_mode(LanguageMode mode1,
490                                            LanguageMode mode2) {
491   STATIC_ASSERT(LanguageModeSize == 2);
492   return static_cast<LanguageMode>(static_cast<int>(mode1) |
493                                    static_cast<int>(mode2));
494 }
495 
496 // A non-keyed store is of the form a.x = foo or a["x"] = foo whereas
497 // a keyed store is of the form a[expression] = foo.
498 enum class StoreOrigin { kMaybeKeyed, kNamed };
499 
500 enum class TypeofMode { kInside, kNotInside };
501 
502 // Use by RecordWrite stubs.
503 enum class RememberedSetAction { kOmit, kEmit };
504 // Enums used by CEntry.
505 enum class SaveFPRegsMode { kIgnore, kSave };
506 enum class ArgvMode { kStack, kRegister };
507 
508 // This constant is used as an undefined value when passing source positions.
509 constexpr int kNoSourcePosition = -1;
510 
511 // This constant is used to signal the function entry implicit stack check
512 // bytecode offset.
513 constexpr int kFunctionEntryBytecodeOffset = -1;
514 
515 // This constant is used to signal the function exit interrupt budget handling
516 // bytecode offset.
517 constexpr int kFunctionExitBytecodeOffset = -1;
518 
519 // This constant is used to indicate missing deoptimization information.
520 constexpr int kNoDeoptimizationId = -1;
521 
522 // Deoptimize bailout kind:
523 // - Eager: a check failed in the optimized code and deoptimization happens
524 //   immediately.
525 // - Unused: replaces "Soft" for ABI compatibility.
526 // - Lazy: the code has been marked as dependent on some assumption which
527 //   is checked elsewhere and can trigger deoptimization the next time the
528 //   code is executed.
529 enum class DeoptimizeKind : uint8_t {
530   kEager,
531   kUnused,
532   kLazy,
533 };
534 constexpr DeoptimizeKind kFirstDeoptimizeKind = DeoptimizeKind::kEager;
535 constexpr DeoptimizeKind kLastDeoptimizeKind = DeoptimizeKind::kLazy;
536 STATIC_ASSERT(static_cast<int>(kFirstDeoptimizeKind) == 0);
537 constexpr int kDeoptimizeKindCount = static_cast<int>(kLastDeoptimizeKind) + 1;
hash_value(DeoptimizeKind kind)538 inline size_t hash_value(DeoptimizeKind kind) {
539   return static_cast<size_t>(kind);
540 }
ToString(DeoptimizeKind kind)541 constexpr const char* ToString(DeoptimizeKind kind) {
542   switch (kind) {
543     case DeoptimizeKind::kEager:
544       return "Eager";
545     case DeoptimizeKind::kUnused:
546       return "Unused";
547     case DeoptimizeKind::kLazy:
548       return "Lazy";
549   }
550 }
551 inline std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
552   return os << ToString(kind);
553 }
554 
555 // Indicates whether the lookup is related to sloppy-mode block-scoped
556 // function hoisting, and is a synthetic assignment for that.
557 enum class LookupHoistingMode { kNormal, kLegacySloppy };
558 
559 inline std::ostream& operator<<(std::ostream& os,
560                                 const LookupHoistingMode& mode) {
561   switch (mode) {
562     case LookupHoistingMode::kNormal:
563       return os << "normal hoisting";
564     case LookupHoistingMode::kLegacySloppy:
565       return os << "legacy sloppy hoisting";
566   }
567   UNREACHABLE();
568 }
569 
570 static_assert(kSmiValueSize <= 32, "Unsupported Smi tagging scheme");
571 // Smi sign bit position must be 32-bit aligned so we can use sign extension
572 // instructions on 64-bit architectures without additional shifts.
573 static_assert((kSmiValueSize + kSmiShiftSize + kSmiTagSize) % 32 == 0,
574               "Unsupported Smi tagging scheme");
575 
576 constexpr bool kIsSmiValueInUpper32Bits =
577     (kSmiValueSize + kSmiShiftSize + kSmiTagSize) == 64;
578 constexpr bool kIsSmiValueInLower32Bits =
579     (kSmiValueSize + kSmiShiftSize + kSmiTagSize) == 32;
580 static_assert(!SmiValuesAre32Bits() == SmiValuesAre31Bits(),
581               "Unsupported Smi tagging scheme");
582 static_assert(SmiValuesAre32Bits() == kIsSmiValueInUpper32Bits,
583               "Unsupported Smi tagging scheme");
584 static_assert(SmiValuesAre31Bits() == kIsSmiValueInLower32Bits,
585               "Unsupported Smi tagging scheme");
586 
587 // Mask for the sign bit in a smi.
588 constexpr intptr_t kSmiSignMask = static_cast<intptr_t>(
589     uintptr_t{1} << (kSmiValueSize + kSmiShiftSize + kSmiTagSize - 1));
590 
591 // Desired alignment for tagged pointers.
592 constexpr int kObjectAlignmentBits = kTaggedSizeLog2;
593 constexpr intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
594 constexpr intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
595 
596 // Desired alignment for system pointers.
597 constexpr intptr_t kPointerAlignment = (1 << kSystemPointerSizeLog2);
598 constexpr intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
599 
600 // Desired alignment for double values.
601 constexpr intptr_t kDoubleAlignment = 8;
602 constexpr intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
603 
604 // Desired alignment for generated code is 64 bytes on x64 (to allow 64-bytes
605 // loop header alignment) and 32 bytes (to improve cache line utilization) on
606 // other architectures.
607 #if V8_TARGET_ARCH_X64
608 constexpr int kCodeAlignmentBits = 6;
609 #elif V8_TARGET_ARCH_PPC64
610 // 64 byte alignment is needed on ppc64 to make sure p10 prefixed instructions
611 // don't cross 64-byte boundaries.
612 constexpr int kCodeAlignmentBits = 6;
613 #else
614 constexpr int kCodeAlignmentBits = 5;
615 #endif
616 constexpr intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
617 constexpr intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
618 
619 const Address kWeakHeapObjectMask = 1 << 1;
620 
621 // The lower 32 bits of the cleared weak reference value is always equal to
622 // the |kClearedWeakHeapObjectLower32| constant but on 64-bit architectures
623 // the value of the upper 32 bits part may be
624 // 1) zero when pointer compression is disabled,
625 // 2) upper 32 bits of the isolate root value when pointer compression is
626 //    enabled.
627 // This is necessary to make pointer decompression computation also suitable
628 // for cleared weak reference.
629 // Note, that real heap objects can't have lower 32 bits equal to 3 because
630 // this offset belongs to page header. So, in either case it's enough to
631 // compare only the lower 32 bits of a MaybeObject value in order to figure
632 // out if it's a cleared reference or not.
633 const uint32_t kClearedWeakHeapObjectLower32 = 3;
634 
635 // Zap-value: The value used for zapping dead objects.
636 // Should be a recognizable hex value tagged as a failure.
637 #ifdef V8_HOST_ARCH_64_BIT
638 constexpr uint64_t kClearedFreeMemoryValue = 0;
639 constexpr uint64_t kZapValue = uint64_t{0xdeadbeedbeadbeef};
640 constexpr uint64_t kHandleZapValue = uint64_t{0x1baddead0baddeaf};
641 constexpr uint64_t kGlobalHandleZapValue = uint64_t{0x1baffed00baffedf};
642 constexpr uint64_t kFromSpaceZapValue = uint64_t{0x1beefdad0beefdaf};
643 constexpr uint64_t kDebugZapValue = uint64_t{0xbadbaddbbadbaddb};
644 constexpr uint64_t kSlotsZapValue = uint64_t{0xbeefdeadbeefdeef};
645 constexpr uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
646 #else
647 constexpr uint32_t kClearedFreeMemoryValue = 0;
648 constexpr uint32_t kZapValue = 0xdeadbeef;
649 constexpr uint32_t kHandleZapValue = 0xbaddeaf;
650 constexpr uint32_t kGlobalHandleZapValue = 0xbaffedf;
651 constexpr uint32_t kFromSpaceZapValue = 0xbeefdaf;
652 constexpr uint32_t kSlotsZapValue = 0xbeefdeef;
653 constexpr uint32_t kDebugZapValue = 0xbadbaddb;
654 constexpr uint32_t kFreeListZapValue = 0xfeed1eaf;
655 #endif
656 
657 constexpr int kCodeZapValue = 0xbadc0de;
658 constexpr uint32_t kPhantomReferenceZap = 0xca11bac;
659 
660 // Page constants.
661 static const intptr_t kPageAlignmentMask = (intptr_t{1} << kPageSizeBits) - 1;
662 
663 // On Intel architecture, cache line size is 64 bytes.
664 // On ARM it may be less (32 bytes), but as far this constant is
665 // used for aligning data, it doesn't hurt to align on a greater value.
666 #define PROCESSOR_CACHE_LINE_SIZE 64
667 
668 // Constants relevant to double precision floating point numbers.
669 // If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
670 constexpr uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
671 
672 enum class HeapObjectReferenceType {
673   WEAK,
674   STRONG,
675 };
676 
677 enum class ArgumentsType {
678   kRuntime,
679   kJS,
680 };
681 
682 // -----------------------------------------------------------------------------
683 // Forward declarations for frequently used classes
684 
685 class AccessorInfo;
686 template <ArgumentsType>
687 class Arguments;
688 using RuntimeArguments = Arguments<ArgumentsType::kRuntime>;
689 using JavaScriptArguments = Arguments<ArgumentsType::kJS>;
690 class Assembler;
691 class ClassScope;
692 class Code;
693 class CodeDataContainer;
694 class CodeSpace;
695 class Context;
696 class DeclarationScope;
697 class Debug;
698 class DebugInfo;
699 class Descriptor;
700 class DescriptorArray;
701 class TransitionArray;
702 class ExternalReference;
703 class FeedbackVector;
704 class FixedArray;
705 class Foreign;
706 class FreeStoreAllocationPolicy;
707 class FunctionTemplateInfo;
708 class GlobalDictionary;
709 template <typename T>
710 class Handle;
711 class Heap;
712 class HeapObject;
713 class HeapObjectReference;
714 class IC;
715 class InterceptorInfo;
716 class Isolate;
717 class JSReceiver;
718 class JSArray;
719 class JSFunction;
720 class JSObject;
721 class LocalIsolate;
722 class MacroAssembler;
723 class Map;
724 class MapSpace;
725 class MarkCompactCollector;
726 template <typename T>
727 class MaybeHandle;
728 class MaybeObject;
729 class MemoryChunk;
730 class MessageLocation;
731 class ModuleScope;
732 class Name;
733 class NameDictionary;
734 class NativeContext;
735 class NewSpace;
736 class NewLargeObjectSpace;
737 class NumberDictionary;
738 class Object;
739 class OldLargeObjectSpace;
740 template <HeapObjectReferenceType kRefType, typename StorageType>
741 class TaggedImpl;
742 class StrongTaggedValue;
743 class TaggedValue;
744 class CompressedObjectSlot;
745 class CompressedMaybeObjectSlot;
746 class CompressedMapWordSlot;
747 class CompressedHeapObjectSlot;
748 class OffHeapCompressedObjectSlot;
749 class FullObjectSlot;
750 class FullMaybeObjectSlot;
751 class FullHeapObjectSlot;
752 class OffHeapFullObjectSlot;
753 class OldSpace;
754 class ReadOnlySpace;
755 class RelocInfo;
756 class Scope;
757 class ScopeInfo;
758 class Script;
759 class SimpleNumberDictionary;
760 class Smi;
761 template <typename Config, class Allocator = FreeStoreAllocationPolicy>
762 class SplayTree;
763 class String;
764 class StringStream;
765 class Struct;
766 class Symbol;
767 class Variable;
768 
769 // Slots are either full-pointer slots or compressed slots depending on whether
770 // pointer compression is enabled or not.
771 struct SlotTraits {
772 #ifdef V8_COMPRESS_POINTERS
773   using TObjectSlot = CompressedObjectSlot;
774   using TMaybeObjectSlot = CompressedMaybeObjectSlot;
775   using THeapObjectSlot = CompressedHeapObjectSlot;
776   using TOffHeapObjectSlot = OffHeapCompressedObjectSlot;
777   using TCodeObjectSlot = OffHeapCompressedObjectSlot;
778 #else
779   using TObjectSlot = FullObjectSlot;
780   using TMaybeObjectSlot = FullMaybeObjectSlot;
781   using THeapObjectSlot = FullHeapObjectSlot;
782   using TOffHeapObjectSlot = OffHeapFullObjectSlot;
783   using TCodeObjectSlot = OffHeapFullObjectSlot;
784 #endif
785 };
786 
787 // An ObjectSlot instance describes a kTaggedSize-sized on-heap field ("slot")
788 // holding an Object value (smi or strong heap object).
789 using ObjectSlot = SlotTraits::TObjectSlot;
790 
791 // A MaybeObjectSlot instance describes a kTaggedSize-sized on-heap field
792 // ("slot") holding MaybeObject (smi or weak heap object or strong heap object).
793 using MaybeObjectSlot = SlotTraits::TMaybeObjectSlot;
794 
795 // A HeapObjectSlot instance describes a kTaggedSize-sized field ("slot")
796 // holding a weak or strong pointer to a heap object (think:
797 // HeapObjectReference).
798 using HeapObjectSlot = SlotTraits::THeapObjectSlot;
799 
800 // An OffHeapObjectSlot instance describes a kTaggedSize-sized field ("slot")
801 // holding an Object value (smi or strong heap object), whose slot location is
802 // off-heap.
803 using OffHeapObjectSlot = SlotTraits::TOffHeapObjectSlot;
804 
805 // A CodeObjectSlot instance describes a kTaggedSize-sized field ("slot")
806 // holding a strong pointer to a Code object. The Code object slots might be
807 // compressed and since code space might be allocated off the main heap
808 // the load operations require explicit cage base value for code space.
809 using CodeObjectSlot = SlotTraits::TCodeObjectSlot;
810 
811 using WeakSlotCallback = bool (*)(FullObjectSlot pointer);
812 
813 using WeakSlotCallbackWithHeap = bool (*)(Heap* heap, FullObjectSlot pointer);
814 
815 // -----------------------------------------------------------------------------
816 // Miscellaneous
817 
818 // NOTE: SpaceIterator depends on AllocationSpace enumeration values being
819 // consecutive.
820 enum AllocationSpace {
821   RO_SPACE,       // Immortal, immovable and immutable objects,
822   OLD_SPACE,      // Old generation regular object space.
823   CODE_SPACE,     // Old generation code object space, marked executable.
824   MAP_SPACE,      // Old generation map object space, non-movable.
825   LO_SPACE,       // Old generation large object space.
826   CODE_LO_SPACE,  // Old generation large code object space.
827   NEW_LO_SPACE,   // Young generation large object space.
828   NEW_SPACE,  // Young generation semispaces for regular objects collected with
829               // Scavenger.
830 
831   FIRST_SPACE = RO_SPACE,
832   LAST_SPACE = NEW_SPACE,
833   FIRST_MUTABLE_SPACE = OLD_SPACE,
834   LAST_MUTABLE_SPACE = NEW_SPACE,
835   FIRST_GROWABLE_PAGED_SPACE = OLD_SPACE,
836   LAST_GROWABLE_PAGED_SPACE = MAP_SPACE
837 };
838 constexpr int kSpaceTagSize = 4;
839 STATIC_ASSERT(FIRST_SPACE == 0);
840 
841 enum class AllocationType : uint8_t {
842   kYoung,      // Regular object allocated in NEW_SPACE or NEW_LO_SPACE
843   kOld,        // Regular object allocated in OLD_SPACE or LO_SPACE
844   kCode,       // Code object allocated in CODE_SPACE or CODE_LO_SPACE
845   kMap,        // Map object allocated in MAP_SPACE
846   kReadOnly,   // Object allocated in RO_SPACE
847   kSharedOld,  // Regular object allocated in OLD_SPACE in the shared heap
848   kSharedMap,  // Map object in MAP_SPACE in the shared heap
849 };
850 
hash_value(AllocationType kind)851 inline size_t hash_value(AllocationType kind) {
852   return static_cast<uint8_t>(kind);
853 }
854 
855 inline std::ostream& operator<<(std::ostream& os, AllocationType kind) {
856   switch (kind) {
857     case AllocationType::kYoung:
858       return os << "Young";
859     case AllocationType::kOld:
860       return os << "Old";
861     case AllocationType::kCode:
862       return os << "Code";
863     case AllocationType::kMap:
864       return os << "Map";
865     case AllocationType::kReadOnly:
866       return os << "ReadOnly";
867     case AllocationType::kSharedOld:
868       return os << "SharedOld";
869     case AllocationType::kSharedMap:
870       return os << "SharedMap";
871   }
872   UNREACHABLE();
873 }
874 
IsSharedAllocationType(AllocationType kind)875 inline constexpr bool IsSharedAllocationType(AllocationType kind) {
876   return kind == AllocationType::kSharedOld ||
877          kind == AllocationType::kSharedMap;
878 }
879 
880 enum AllocationAlignment {
881   // The allocated address is kTaggedSize aligned (this is default for most of
882   // the allocations).
883   kTaggedAligned,
884   // The allocated address is kDoubleSize aligned.
885   kDoubleAligned,
886   // The (allocated address + kTaggedSize) is kDoubleSize aligned.
887   kDoubleUnaligned
888 };
889 
890 #ifdef V8_HOST_ARCH_32_BIT
891 #define USE_ALLOCATION_ALIGNMENT_BOOL true
892 #else
893 #ifdef V8_COMPRESS_POINTERS
894 // TODO(ishell, v8:8875): Consider using aligned allocations once the
895 // allocation alignment inconsistency is fixed. For now we keep using
896 // unaligned access since both x64 and arm64 architectures (where pointer
897 // compression is supported) allow unaligned access to doubles and full words.
898 #endif  // V8_COMPRESS_POINTERS
899 #define USE_ALLOCATION_ALIGNMENT_BOOL false
900 #endif  // V8_HOST_ARCH_32_BIT
901 
902 enum class AccessMode { ATOMIC, NON_ATOMIC };
903 
904 enum class AllowLargeObjects { kFalse, kTrue };
905 
906 enum MinimumCapacity {
907   USE_DEFAULT_MINIMUM_CAPACITY,
908   USE_CUSTOM_MINIMUM_CAPACITY
909 };
910 
911 enum class GarbageCollector { SCAVENGER, MARK_COMPACTOR, MINOR_MARK_COMPACTOR };
912 
913 enum class CompactionSpaceKind {
914   kNone,
915   kCompactionSpaceForScavenge,
916   kCompactionSpaceForMarkCompact,
917   kCompactionSpaceForMinorMarkCompact,
918 };
919 
920 enum Executability { NOT_EXECUTABLE, EXECUTABLE };
921 
922 enum class PageSize { kRegular, kLarge };
923 
924 enum class CodeFlushMode {
925   kFlushBytecode,
926   kFlushBaselineCode,
927   kStressFlushCode,
928 };
929 
IsBaselineCodeFlushingEnabled(base::EnumSet<CodeFlushMode> mode)930 bool inline IsBaselineCodeFlushingEnabled(base::EnumSet<CodeFlushMode> mode) {
931   return mode.contains(CodeFlushMode::kFlushBaselineCode);
932 }
933 
IsByteCodeFlushingEnabled(base::EnumSet<CodeFlushMode> mode)934 bool inline IsByteCodeFlushingEnabled(base::EnumSet<CodeFlushMode> mode) {
935   return mode.contains(CodeFlushMode::kFlushBytecode);
936 }
937 
IsStressFlushingEnabled(base::EnumSet<CodeFlushMode> mode)938 bool inline IsStressFlushingEnabled(base::EnumSet<CodeFlushMode> mode) {
939   return mode.contains(CodeFlushMode::kStressFlushCode);
940 }
941 
IsFlushingDisabled(base::EnumSet<CodeFlushMode> mode)942 bool inline IsFlushingDisabled(base::EnumSet<CodeFlushMode> mode) {
943   return mode.empty();
944 }
945 
946 // Indicates whether a script should be parsed and compiled in REPL mode.
947 enum class REPLMode {
948   kYes,
949   kNo,
950 };
951 
construct_repl_mode(bool is_repl_mode)952 inline REPLMode construct_repl_mode(bool is_repl_mode) {
953   return is_repl_mode ? REPLMode::kYes : REPLMode::kNo;
954 }
955 
956 // Indicates whether a script is parsed during debugging.
957 enum class ParsingWhileDebugging {
958   kYes,
959   kNo,
960 };
961 
962 // Flag indicating whether code is built into the VM (one of the natives files).
963 enum NativesFlag { NOT_NATIVES_CODE, EXTENSION_CODE, INSPECTOR_CODE };
964 
965 // ParseRestriction is used to restrict the set of valid statements in a
966 // unit of compilation.  Restriction violations cause a syntax error.
967 enum ParseRestriction : bool {
968   NO_PARSE_RESTRICTION,         // All expressions are allowed.
969   ONLY_SINGLE_FUNCTION_LITERAL  // Only a single FunctionLiteral expression.
970 };
971 
972 // State for inline cache call sites. Aliased as IC::State.
973 enum class InlineCacheState {
974   // No feedback will be collected.
975   NO_FEEDBACK,
976   // Has never been executed.
977   UNINITIALIZED,
978   // Has been executed and only one receiver type has been seen.
979   MONOMORPHIC,
980   // Check failed due to prototype (or map deprecation).
981   RECOMPUTE_HANDLER,
982   // Multiple receiver types have been seen.
983   POLYMORPHIC,
984   // Many DOM receiver types have been seen for the same accessor.
985   MEGADOM,
986   // Many receiver types have been seen.
987   MEGAMORPHIC,
988   // A generic handler is installed and no extra typefeedback is recorded.
989   GENERIC,
990 };
991 
hash_value(InlineCacheState mode)992 inline size_t hash_value(InlineCacheState mode) { return bit_cast<int>(mode); }
993 
994 // Printing support.
InlineCacheState2String(InlineCacheState state)995 inline const char* InlineCacheState2String(InlineCacheState state) {
996   switch (state) {
997     case InlineCacheState::NO_FEEDBACK:
998       return "NOFEEDBACK";
999     case InlineCacheState::UNINITIALIZED:
1000       return "UNINITIALIZED";
1001     case InlineCacheState::MONOMORPHIC:
1002       return "MONOMORPHIC";
1003     case InlineCacheState::RECOMPUTE_HANDLER:
1004       return "RECOMPUTE_HANDLER";
1005     case InlineCacheState::POLYMORPHIC:
1006       return "POLYMORPHIC";
1007     case InlineCacheState::MEGAMORPHIC:
1008       return "MEGAMORPHIC";
1009     case InlineCacheState::MEGADOM:
1010       return "MEGADOM";
1011     case InlineCacheState::GENERIC:
1012       return "GENERIC";
1013   }
1014   UNREACHABLE();
1015 }
1016 
1017 enum WhereToStart { kStartAtReceiver, kStartAtPrototype };
1018 
1019 enum ResultSentinel { kNotFound = -1, kUnsupported = -2 };
1020 
1021 enum ShouldThrow {
1022   kThrowOnError = Internals::kThrowOnError,
1023   kDontThrow = Internals::kDontThrow
1024 };
1025 
1026 enum class ThreadKind { kMain, kBackground };
1027 
1028 // Union used for customized checking of the IEEE double types
1029 // inlined within v8 runtime, rather than going to the underlying
1030 // platform headers and libraries
1031 union IeeeDoubleLittleEndianArchType {
1032   double d;
1033   struct {
1034     unsigned int man_low : 32;
1035     unsigned int man_high : 20;
1036     unsigned int exp : 11;
1037     unsigned int sign : 1;
1038   } bits;
1039 };
1040 
1041 union IeeeDoubleBigEndianArchType {
1042   double d;
1043   struct {
1044     unsigned int sign : 1;
1045     unsigned int exp : 11;
1046     unsigned int man_high : 20;
1047     unsigned int man_low : 32;
1048   } bits;
1049 };
1050 
1051 #if V8_TARGET_LITTLE_ENDIAN
1052 using IeeeDoubleArchType = IeeeDoubleLittleEndianArchType;
1053 constexpr int kIeeeDoubleMantissaWordOffset = 0;
1054 constexpr int kIeeeDoubleExponentWordOffset = 4;
1055 #else
1056 using IeeeDoubleArchType = IeeeDoubleBigEndianArchType;
1057 constexpr int kIeeeDoubleMantissaWordOffset = 4;
1058 constexpr int kIeeeDoubleExponentWordOffset = 0;
1059 #endif
1060 
1061 // -----------------------------------------------------------------------------
1062 // Macros
1063 
1064 // Testers for test.
1065 
1066 #define HAS_SMI_TAG(value) \
1067   ((static_cast<i::Tagged_t>(value) & ::i::kSmiTagMask) == ::i::kSmiTag)
1068 
1069 #define HAS_STRONG_HEAP_OBJECT_TAG(value)                          \
1070   (((static_cast<i::Tagged_t>(value) & ::i::kHeapObjectTagMask) == \
1071     ::i::kHeapObjectTag))
1072 
1073 #define HAS_WEAK_HEAP_OBJECT_TAG(value)                            \
1074   (((static_cast<i::Tagged_t>(value) & ::i::kHeapObjectTagMask) == \
1075     ::i::kWeakHeapObjectTag))
1076 
1077 // OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
1078 #define OBJECT_POINTER_ALIGN(value) \
1079   (((value) + ::i::kObjectAlignmentMask) & ~::i::kObjectAlignmentMask)
1080 
1081 // OBJECT_POINTER_PADDING returns the padding size required to align value
1082 // as a HeapObject pointer
1083 #define OBJECT_POINTER_PADDING(value) (OBJECT_POINTER_ALIGN(value) - (value))
1084 
1085 // POINTER_SIZE_ALIGN returns the value aligned as a system pointer.
1086 #define POINTER_SIZE_ALIGN(value) \
1087   (((value) + ::i::kPointerAlignmentMask) & ~::i::kPointerAlignmentMask)
1088 
1089 // POINTER_SIZE_PADDING returns the padding size required to align value
1090 // as a system pointer.
1091 #define POINTER_SIZE_PADDING(value) (POINTER_SIZE_ALIGN(value) - (value))
1092 
1093 // CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
1094 #define CODE_POINTER_ALIGN(value) \
1095   (((value) + ::i::kCodeAlignmentMask) & ~::i::kCodeAlignmentMask)
1096 
1097 // CODE_POINTER_PADDING returns the padding size required to align value
1098 // as a generated code segment.
1099 #define CODE_POINTER_PADDING(value) (CODE_POINTER_ALIGN(value) - (value))
1100 
1101 // DOUBLE_POINTER_ALIGN returns the value algined for double pointers.
1102 #define DOUBLE_POINTER_ALIGN(value) \
1103   (((value) + ::i::kDoubleAlignmentMask) & ~::i::kDoubleAlignmentMask)
1104 
1105 // Defines hints about receiver values based on structural knowledge.
1106 enum class ConvertReceiverMode : unsigned {
1107   kNullOrUndefined,     // Guaranteed to be null or undefined.
1108   kNotNullOrUndefined,  // Guaranteed to never be null or undefined.
1109   kAny                  // No specific knowledge about receiver.
1110 };
1111 
hash_value(ConvertReceiverMode mode)1112 inline size_t hash_value(ConvertReceiverMode mode) {
1113   return bit_cast<unsigned>(mode);
1114 }
1115 
1116 inline std::ostream& operator<<(std::ostream& os, ConvertReceiverMode mode) {
1117   switch (mode) {
1118     case ConvertReceiverMode::kNullOrUndefined:
1119       return os << "NULL_OR_UNDEFINED";
1120     case ConvertReceiverMode::kNotNullOrUndefined:
1121       return os << "NOT_NULL_OR_UNDEFINED";
1122     case ConvertReceiverMode::kAny:
1123       return os << "ANY";
1124   }
1125   UNREACHABLE();
1126 }
1127 
1128 // Valid hints for the abstract operation OrdinaryToPrimitive,
1129 // implemented according to ES6, section 7.1.1.
1130 enum class OrdinaryToPrimitiveHint { kNumber, kString };
1131 
1132 // Valid hints for the abstract operation ToPrimitive,
1133 // implemented according to ES6, section 7.1.1.
1134 enum class ToPrimitiveHint { kDefault, kNumber, kString };
1135 
1136 // Defines specifics about arguments object or rest parameter creation.
1137 enum class CreateArgumentsType : uint8_t {
1138   kMappedArguments,
1139   kUnmappedArguments,
1140   kRestParameter
1141 };
1142 
hash_value(CreateArgumentsType type)1143 inline size_t hash_value(CreateArgumentsType type) {
1144   return bit_cast<uint8_t>(type);
1145 }
1146 
1147 inline std::ostream& operator<<(std::ostream& os, CreateArgumentsType type) {
1148   switch (type) {
1149     case CreateArgumentsType::kMappedArguments:
1150       return os << "MAPPED_ARGUMENTS";
1151     case CreateArgumentsType::kUnmappedArguments:
1152       return os << "UNMAPPED_ARGUMENTS";
1153     case CreateArgumentsType::kRestParameter:
1154       return os << "REST_PARAMETER";
1155   }
1156   UNREACHABLE();
1157 }
1158 
1159 // Threshold calculated using a microbenckmark.
1160 // https://chromium-review.googlesource.com/c/v8/v8/+/3429210
1161 constexpr int kScopeInfoMaxInlinedLocalNamesSize = 75;
1162 
1163 enum ScopeType : uint8_t {
1164   CLASS_SCOPE,     // The scope introduced by a class.
1165   EVAL_SCOPE,      // The top-level scope for an eval source.
1166   FUNCTION_SCOPE,  // The top-level scope for a function.
1167   MODULE_SCOPE,    // The scope introduced by a module literal
1168   SCRIPT_SCOPE,    // The top-level scope for a script or a top-level eval.
1169   CATCH_SCOPE,     // The scope introduced by catch.
1170   BLOCK_SCOPE,     // The scope introduced by a new block.
1171   WITH_SCOPE       // The scope introduced by with.
1172 };
1173 
1174 inline std::ostream& operator<<(std::ostream& os, ScopeType type) {
1175   switch (type) {
1176     case ScopeType::EVAL_SCOPE:
1177       return os << "EVAL_SCOPE";
1178     case ScopeType::FUNCTION_SCOPE:
1179       return os << "FUNCTION_SCOPE";
1180     case ScopeType::MODULE_SCOPE:
1181       return os << "MODULE_SCOPE";
1182     case ScopeType::SCRIPT_SCOPE:
1183       return os << "SCRIPT_SCOPE";
1184     case ScopeType::CATCH_SCOPE:
1185       return os << "CATCH_SCOPE";
1186     case ScopeType::BLOCK_SCOPE:
1187       return os << "BLOCK_SCOPE";
1188     case ScopeType::CLASS_SCOPE:
1189       return os << "CLASS_SCOPE";
1190     case ScopeType::WITH_SCOPE:
1191       return os << "WITH_SCOPE";
1192   }
1193   UNREACHABLE();
1194 }
1195 
1196 // AllocationSiteMode controls whether allocations are tracked by an allocation
1197 // site.
1198 enum AllocationSiteMode {
1199   DONT_TRACK_ALLOCATION_SITE,
1200   TRACK_ALLOCATION_SITE,
1201   LAST_ALLOCATION_SITE_MODE = TRACK_ALLOCATION_SITE
1202 };
1203 
1204 enum class AllocationSiteUpdateMode { kUpdate, kCheckOnly };
1205 
1206 // The mips architecture prior to revision 5 has inverted encoding for sNaN.
1207 #if (V8_TARGET_ARCH_MIPS && !defined(_MIPS_ARCH_MIPS32R6) &&           \
1208      (!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR))) || \
1209     (V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6) &&         \
1210      (!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR)))
1211 constexpr uint32_t kHoleNanUpper32 = 0xFFFF7FFF;
1212 constexpr uint32_t kHoleNanLower32 = 0xFFFF7FFF;
1213 #else
1214 constexpr uint32_t kHoleNanUpper32 = 0xFFF7FFFF;
1215 constexpr uint32_t kHoleNanLower32 = 0xFFF7FFFF;
1216 #endif
1217 
1218 constexpr uint64_t kHoleNanInt64 =
1219     (static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
1220 
1221 // ES6 section 20.1.2.6 Number.MAX_SAFE_INTEGER
1222 constexpr uint64_t kMaxSafeIntegerUint64 = 9007199254740991;  // 2^53-1
1223 constexpr double kMaxSafeInteger = static_cast<double>(kMaxSafeIntegerUint64);
1224 // ES6 section 21.1.2.8 Number.MIN_SAFE_INTEGER
1225 constexpr double kMinSafeInteger = -kMaxSafeInteger;
1226 
1227 constexpr double kMaxUInt32Double = double{kMaxUInt32};
1228 
1229 // The order of this enum has to be kept in sync with the predicates below.
1230 enum class VariableMode : uint8_t {
1231   // User declared variables:
1232   kLet,  // declared via 'let' declarations (first lexical)
1233 
1234   kConst,  // declared via 'const' declarations (last lexical)
1235 
1236   kVar,  // declared via 'var', and 'function' declarations
1237 
1238   // Variables introduced by the compiler:
1239   kTemporary,  // temporary variables (not user-visible), stack-allocated
1240                // unless the scope as a whole has forced context allocation
1241 
1242   kDynamic,  // always require dynamic lookup (we don't know
1243              // the declaration)
1244 
1245   kDynamicGlobal,  // requires dynamic lookup, but we know that the
1246                    // variable is global unless it has been shadowed
1247                    // by an eval-introduced variable
1248 
1249   kDynamicLocal,  // requires dynamic lookup, but we know that the
1250                   // variable is local and where it is unless it
1251                   // has been shadowed by an eval-introduced
1252                   // variable
1253 
1254   // Variables for private methods or accessors whose access require
1255   // brand check. Declared only in class scopes by the compiler
1256   // and allocated only in class contexts:
1257   kPrivateMethod,  // Does not coexist with any other variable with the same
1258                    // name in the same scope.
1259 
1260   kPrivateSetterOnly,  // Incompatible with variables with the same name but
1261                        // any mode other than kPrivateGetterOnly. Transition to
1262                        // kPrivateGetterAndSetter if a later declaration for the
1263                        // same name with kPrivateGetterOnly is made.
1264 
1265   kPrivateGetterOnly,  // Incompatible with variables with the same name but
1266                        // any mode other than kPrivateSetterOnly. Transition to
1267                        // kPrivateGetterAndSetter if a later declaration for the
1268                        // same name with kPrivateSetterOnly is made.
1269 
1270   kPrivateGetterAndSetter,  // Does not coexist with any other variable with the
1271                             // same name in the same scope.
1272 
1273   kLastLexicalVariableMode = kConst,
1274 };
1275 
1276 // Printing support
1277 #ifdef DEBUG
VariableMode2String(VariableMode mode)1278 inline const char* VariableMode2String(VariableMode mode) {
1279   switch (mode) {
1280     case VariableMode::kVar:
1281       return "VAR";
1282     case VariableMode::kLet:
1283       return "LET";
1284     case VariableMode::kPrivateGetterOnly:
1285       return "PRIVATE_GETTER_ONLY";
1286     case VariableMode::kPrivateSetterOnly:
1287       return "PRIVATE_SETTER_ONLY";
1288     case VariableMode::kPrivateMethod:
1289       return "PRIVATE_METHOD";
1290     case VariableMode::kPrivateGetterAndSetter:
1291       return "PRIVATE_GETTER_AND_SETTER";
1292     case VariableMode::kConst:
1293       return "CONST";
1294     case VariableMode::kDynamic:
1295       return "DYNAMIC";
1296     case VariableMode::kDynamicGlobal:
1297       return "DYNAMIC_GLOBAL";
1298     case VariableMode::kDynamicLocal:
1299       return "DYNAMIC_LOCAL";
1300     case VariableMode::kTemporary:
1301       return "TEMPORARY";
1302   }
1303   UNREACHABLE();
1304 }
1305 #endif
1306 
1307 enum VariableKind : uint8_t {
1308   NORMAL_VARIABLE,
1309   PARAMETER_VARIABLE,
1310   THIS_VARIABLE,
1311   SLOPPY_BLOCK_FUNCTION_VARIABLE,
1312   SLOPPY_FUNCTION_NAME_VARIABLE
1313 };
1314 
IsDynamicVariableMode(VariableMode mode)1315 inline bool IsDynamicVariableMode(VariableMode mode) {
1316   return mode >= VariableMode::kDynamic && mode <= VariableMode::kDynamicLocal;
1317 }
1318 
IsDeclaredVariableMode(VariableMode mode)1319 inline bool IsDeclaredVariableMode(VariableMode mode) {
1320   STATIC_ASSERT(static_cast<uint8_t>(VariableMode::kLet) ==
1321                 0);  // Implies that mode >= VariableMode::kLet.
1322   return mode <= VariableMode::kVar;
1323 }
1324 
IsPrivateMethodOrAccessorVariableMode(VariableMode mode)1325 inline bool IsPrivateMethodOrAccessorVariableMode(VariableMode mode) {
1326   return mode >= VariableMode::kPrivateMethod &&
1327          mode <= VariableMode::kPrivateGetterAndSetter;
1328 }
1329 
IsSerializableVariableMode(VariableMode mode)1330 inline bool IsSerializableVariableMode(VariableMode mode) {
1331   return IsDeclaredVariableMode(mode) ||
1332          IsPrivateMethodOrAccessorVariableMode(mode);
1333 }
1334 
IsConstVariableMode(VariableMode mode)1335 inline bool IsConstVariableMode(VariableMode mode) {
1336   return mode == VariableMode::kConst ||
1337          IsPrivateMethodOrAccessorVariableMode(mode);
1338 }
1339 
IsLexicalVariableMode(VariableMode mode)1340 inline bool IsLexicalVariableMode(VariableMode mode) {
1341   STATIC_ASSERT(static_cast<uint8_t>(VariableMode::kLet) ==
1342                 0);  // Implies that mode >= VariableMode::kLet.
1343   return mode <= VariableMode::kLastLexicalVariableMode;
1344 }
1345 
1346 enum VariableLocation : uint8_t {
1347   // Before and during variable allocation, a variable whose location is
1348   // not yet determined.  After allocation, a variable looked up as a
1349   // property on the global object (and possibly absent).  name() is the
1350   // variable name, index() is invalid.
1351   UNALLOCATED,
1352 
1353   // A slot in the parameter section on the stack.  index() is the
1354   // parameter index, counting left-to-right.  The receiver is index -1;
1355   // the first parameter is index 0.
1356   PARAMETER,
1357 
1358   // A slot in the local section on the stack.  index() is the variable
1359   // index in the stack frame, starting at 0.
1360   LOCAL,
1361 
1362   // An indexed slot in a heap context.  index() is the variable index in
1363   // the context object on the heap, starting at 0.  scope() is the
1364   // corresponding scope.
1365   CONTEXT,
1366 
1367   // A named slot in a heap context.  name() is the variable name in the
1368   // context object on the heap, with lookup starting at the current
1369   // context.  index() is invalid.
1370   LOOKUP,
1371 
1372   // A named slot in a module's export table.
1373   MODULE,
1374 
1375   // An indexed slot in a script context. index() is the variable
1376   // index in the context object on the heap, starting at 0.
1377   // Important: REPL_GLOBAL variables from different scripts with the
1378   //            same name share a single script context slot. Every
1379   //            script context will reserve a slot, but only one will be used.
1380   // REPL_GLOBAL variables are stored in script contexts, but accessed like
1381   // globals, i.e. they always require a lookup at runtime to find the right
1382   // script context.
1383   REPL_GLOBAL,
1384 
1385   kLastVariableLocation = REPL_GLOBAL
1386 };
1387 
1388 // ES6 specifies declarative environment records with mutable and immutable
1389 // bindings that can be in two states: initialized and uninitialized.
1390 // When accessing a binding, it needs to be checked for initialization.
1391 // However in the following cases the binding is initialized immediately
1392 // after creation so the initialization check can always be skipped:
1393 //
1394 // 1. Var declared local variables.
1395 //      var foo;
1396 // 2. A local variable introduced by a function declaration.
1397 //      function foo() {}
1398 // 3. Parameters
1399 //      function x(foo) {}
1400 // 4. Catch bound variables.
1401 //      try {} catch (foo) {}
1402 // 6. Function name variables of named function expressions.
1403 //      var x = function foo() {}
1404 // 7. Implicit binding of 'this'.
1405 // 8. Implicit binding of 'arguments' in functions.
1406 //
1407 // The following enum specifies a flag that indicates if the binding needs a
1408 // distinct initialization step (kNeedsInitialization) or if the binding is
1409 // immediately initialized upon creation (kCreatedInitialized).
1410 enum InitializationFlag : uint8_t { kNeedsInitialization, kCreatedInitialized };
1411 
1412 // Static variables can only be used with the class in the closest
1413 // class scope as receivers.
1414 enum class IsStaticFlag : uint8_t { kNotStatic, kStatic };
1415 
1416 enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned };
1417 
1418 enum class InterpreterPushArgsMode : unsigned {
1419   kArrayFunction,
1420   kWithFinalSpread,
1421   kOther
1422 };
1423 
hash_value(InterpreterPushArgsMode mode)1424 inline size_t hash_value(InterpreterPushArgsMode mode) {
1425   return bit_cast<unsigned>(mode);
1426 }
1427 
1428 inline std::ostream& operator<<(std::ostream& os,
1429                                 InterpreterPushArgsMode mode) {
1430   switch (mode) {
1431     case InterpreterPushArgsMode::kArrayFunction:
1432       return os << "ArrayFunction";
1433     case InterpreterPushArgsMode::kWithFinalSpread:
1434       return os << "WithFinalSpread";
1435     case InterpreterPushArgsMode::kOther:
1436       return os << "Other";
1437   }
1438   UNREACHABLE();
1439 }
1440 
ObjectHash(Address address)1441 inline uint32_t ObjectHash(Address address) {
1442   // All objects are at least pointer aligned, so we can remove the trailing
1443   // zeros.
1444   return static_cast<uint32_t>(address >> kTaggedSizeLog2);
1445 }
1446 
1447 // Type feedback is encoded in such a way that, we can combine the feedback
1448 // at different points by performing an 'OR' operation. Type feedback moves
1449 // to a more generic type when we combine feedback.
1450 //
1451 //   kSignedSmall -> kSignedSmallInputs -> kNumber  -> kNumberOrOddball -> kAny
1452 //                                                     kString          -> kAny
1453 //                                                     kBigInt          -> kAny
1454 //
1455 // Technically we wouldn't need the separation between the kNumber and the
1456 // kNumberOrOddball values here, since for binary operations, we always
1457 // truncate oddballs to numbers. In practice though it causes TurboFan to
1458 // generate quite a lot of unused code though if we always handle numbers
1459 // and oddballs everywhere, although in 99% of the use sites they are only
1460 // used with numbers.
1461 class BinaryOperationFeedback {
1462  public:
1463   enum {
1464     kNone = 0x0,
1465     kSignedSmall = 0x1,
1466     kSignedSmallInputs = 0x3,
1467     kNumber = 0x7,
1468     kNumberOrOddball = 0xF,
1469     kString = 0x10,
1470     kBigInt = 0x20,
1471     kAny = 0x7F
1472   };
1473 };
1474 
1475 // Type feedback is encoded in such a way that, we can combine the feedback
1476 // at different points by performing an 'OR' operation.
1477 // This is distinct from BinaryOperationFeedback on purpose, because the
1478 // feedback that matters differs greatly as well as the way it is consumed.
1479 class CompareOperationFeedback {
1480   enum {
1481     kSignedSmallFlag = 1 << 0,
1482     kOtherNumberFlag = 1 << 1,
1483     kBooleanFlag = 1 << 2,
1484     kNullOrUndefinedFlag = 1 << 3,
1485     kInternalizedStringFlag = 1 << 4,
1486     kOtherStringFlag = 1 << 5,
1487     kSymbolFlag = 1 << 6,
1488     kBigIntFlag = 1 << 7,
1489     kReceiverFlag = 1 << 8,
1490     kAnyMask = 0x1FF,
1491   };
1492 
1493  public:
1494   enum Type {
1495     kNone = 0,
1496 
1497     kBoolean = kBooleanFlag,
1498     kNullOrUndefined = kNullOrUndefinedFlag,
1499     kOddball = kBoolean | kNullOrUndefined,
1500 
1501     kSignedSmall = kSignedSmallFlag,
1502     kNumber = kSignedSmall | kOtherNumberFlag,
1503     kNumberOrBoolean = kNumber | kBoolean,
1504     kNumberOrOddball = kNumber | kOddball,
1505 
1506     kInternalizedString = kInternalizedStringFlag,
1507     kString = kInternalizedString | kOtherStringFlag,
1508 
1509     kReceiver = kReceiverFlag,
1510     kReceiverOrNullOrUndefined = kReceiver | kNullOrUndefined,
1511 
1512     kBigInt = kBigIntFlag,
1513     kSymbol = kSymbolFlag,
1514 
1515     kAny = kAnyMask,
1516   };
1517 };
1518 
1519 // Type feedback is encoded in such a way that, we can combine the feedback
1520 // at different points by performing an 'OR' operation. Type feedback moves
1521 // to a more generic type when we combine feedback.
1522 // kNone -> kEnumCacheKeysAndIndices -> kEnumCacheKeys -> kAny
1523 enum class ForInFeedback : uint8_t {
1524   kNone = 0x0,
1525   kEnumCacheKeysAndIndices = 0x1,
1526   kEnumCacheKeys = 0x3,
1527   kAny = 0x7
1528 };
1529 STATIC_ASSERT((static_cast<int>(ForInFeedback::kNone) |
1530                static_cast<int>(ForInFeedback::kEnumCacheKeysAndIndices)) ==
1531               static_cast<int>(ForInFeedback::kEnumCacheKeysAndIndices));
1532 STATIC_ASSERT((static_cast<int>(ForInFeedback::kEnumCacheKeysAndIndices) |
1533                static_cast<int>(ForInFeedback::kEnumCacheKeys)) ==
1534               static_cast<int>(ForInFeedback::kEnumCacheKeys));
1535 STATIC_ASSERT((static_cast<int>(ForInFeedback::kEnumCacheKeys) |
1536                static_cast<int>(ForInFeedback::kAny)) ==
1537               static_cast<int>(ForInFeedback::kAny));
1538 
1539 enum class UnicodeEncoding : uint8_t {
1540   // Different unicode encodings in a |word32|:
1541   UTF16,  // hi 16bits -> trailing surrogate or 0, low 16bits -> lead surrogate
1542   UTF32,  // full UTF32 code unit / Unicode codepoint
1543 };
1544 
hash_value(UnicodeEncoding encoding)1545 inline size_t hash_value(UnicodeEncoding encoding) {
1546   return static_cast<uint8_t>(encoding);
1547 }
1548 
1549 inline std::ostream& operator<<(std::ostream& os, UnicodeEncoding encoding) {
1550   switch (encoding) {
1551     case UnicodeEncoding::UTF16:
1552       return os << "UTF16";
1553     case UnicodeEncoding::UTF32:
1554       return os << "UTF32";
1555   }
1556   UNREACHABLE();
1557 }
1558 
1559 enum class IterationKind { kKeys, kValues, kEntries };
1560 
1561 inline std::ostream& operator<<(std::ostream& os, IterationKind kind) {
1562   switch (kind) {
1563     case IterationKind::kKeys:
1564       return os << "IterationKind::kKeys";
1565     case IterationKind::kValues:
1566       return os << "IterationKind::kValues";
1567     case IterationKind::kEntries:
1568       return os << "IterationKind::kEntries";
1569   }
1570   UNREACHABLE();
1571 }
1572 
1573 enum class CollectionKind { kMap, kSet };
1574 
1575 inline std::ostream& operator<<(std::ostream& os, CollectionKind kind) {
1576   switch (kind) {
1577     case CollectionKind::kMap:
1578       return os << "CollectionKind::kMap";
1579     case CollectionKind::kSet:
1580       return os << "CollectionKind::kSet";
1581   }
1582   UNREACHABLE();
1583 }
1584 
1585 // Flags for the runtime function kDefineKeyedOwnPropertyInLiteral. A property
1586 // can be enumerable or not, and, in case of functions, the function name can be
1587 // set or not.
1588 enum class DefineKeyedOwnPropertyInLiteralFlag {
1589   kNoFlags = 0,
1590   kDontEnum = 1 << 0,
1591   kSetFunctionName = 1 << 1
1592 };
1593 using DefineKeyedOwnPropertyInLiteralFlags =
1594     base::Flags<DefineKeyedOwnPropertyInLiteralFlag>;
1595 DEFINE_OPERATORS_FOR_FLAGS(DefineKeyedOwnPropertyInLiteralFlags)
1596 
1597 enum ExternalArrayType {
1598   kExternalInt8Array = 1,
1599   kExternalUint8Array,
1600   kExternalInt16Array,
1601   kExternalUint16Array,
1602   kExternalInt32Array,
1603   kExternalUint32Array,
1604   kExternalFloat32Array,
1605   kExternalFloat64Array,
1606   kExternalUint8ClampedArray,
1607   kExternalBigInt64Array,
1608   kExternalBigUint64Array,
1609 };
1610 
1611 struct AssemblerDebugInfo {
AssemblerDebugInfoAssemblerDebugInfo1612   AssemblerDebugInfo(const char* name, const char* file, int line)
1613       : name(name), file(file), line(line) {}
1614   const char* name;
1615   const char* file;
1616   int line;
1617 };
1618 
1619 inline std::ostream& operator<<(std::ostream& os,
1620                                 const AssemblerDebugInfo& info) {
1621   os << "(" << info.name << ":" << info.file << ":" << info.line << ")";
1622   return os;
1623 }
1624 
1625 using FileAndLine = std::pair<const char*, int>;
1626 
1627 #define TIERING_STATE_LIST(V)           \
1628   V(None, 0b000)                        \
1629   V(InProgress, 0b001)                  \
1630   V(RequestMaglev_Synchronous, 0b010)   \
1631   V(RequestMaglev_Concurrent, 0b011)    \
1632   V(RequestTurbofan_Synchronous, 0b100) \
1633   V(RequestTurbofan_Concurrent, 0b101)
1634 
1635 enum class TieringState : int32_t {
1636 #define V(Name, Value) k##Name = Value,
1637   TIERING_STATE_LIST(V)
1638 #undef V
1639       kLastTieringState = kRequestTurbofan_Concurrent,
1640 };
1641 
1642 // To efficiently check whether a marker is kNone or kInProgress using a single
1643 // mask, we expect the kNone to be 0 and kInProgress to be 1 so that we can
1644 // mask off the lsb for checking.
1645 STATIC_ASSERT(static_cast<int>(TieringState::kNone) == 0b00 &&
1646               static_cast<int>(TieringState::kInProgress) == 0b01);
1647 STATIC_ASSERT(static_cast<int>(TieringState::kLastTieringState) <= 0b111);
1648 static constexpr uint32_t kNoneOrInProgressMask = 0b110;
1649 
1650 #define V(Name, Value)                          \
1651   constexpr bool Is##Name(TieringState state) { \
1652     return state == TieringState::k##Name;      \
1653   }
TIERING_STATE_LIST(V)1654 TIERING_STATE_LIST(V)
1655 #undef V
1656 
1657 constexpr const char* ToString(TieringState marker) {
1658   switch (marker) {
1659 #define V(Name, Value)        \
1660   case TieringState::k##Name: \
1661     return "TieringState::k" #Name;
1662     TIERING_STATE_LIST(V)
1663 #undef V
1664   }
1665 }
1666 
1667 inline std::ostream& operator<<(std::ostream& os, TieringState marker) {
1668   return os << ToString(marker);
1669 }
1670 
1671 #undef TIERING_STATE_LIST
1672 
1673 enum class SpeculationMode { kAllowSpeculation, kDisallowSpeculation };
1674 enum class CallFeedbackContent { kTarget, kReceiver };
1675 
1676 inline std::ostream& operator<<(std::ostream& os,
1677                                 SpeculationMode speculation_mode) {
1678   switch (speculation_mode) {
1679     case SpeculationMode::kAllowSpeculation:
1680       return os << "SpeculationMode::kAllowSpeculation";
1681     case SpeculationMode::kDisallowSpeculation:
1682       return os << "SpeculationMode::kDisallowSpeculation";
1683   }
1684 }
1685 
1686 enum class BlockingBehavior { kBlock, kDontBlock };
1687 
1688 enum class ConcurrencyMode : uint8_t { kSynchronous, kConcurrent };
1689 
IsSynchronous(ConcurrencyMode mode)1690 constexpr bool IsSynchronous(ConcurrencyMode mode) {
1691   return mode == ConcurrencyMode::kSynchronous;
1692 }
IsConcurrent(ConcurrencyMode mode)1693 constexpr bool IsConcurrent(ConcurrencyMode mode) {
1694   return mode == ConcurrencyMode::kConcurrent;
1695 }
1696 
ToString(ConcurrencyMode mode)1697 constexpr const char* ToString(ConcurrencyMode mode) {
1698   switch (mode) {
1699     case ConcurrencyMode::kSynchronous:
1700       return "ConcurrencyMode::kSynchronous";
1701     case ConcurrencyMode::kConcurrent:
1702       return "ConcurrencyMode::kConcurrent";
1703   }
1704 }
1705 inline std::ostream& operator<<(std::ostream& os, ConcurrencyMode mode) {
1706   return os << ToString(mode);
1707 }
1708 
1709 // An architecture independent representation of the sets of registers available
1710 // for instruction creation.
1711 enum class AliasingKind {
1712   // Registers alias a single register of every other size (e.g. Intel).
1713   kOverlap,
1714   // Registers alias two registers of the next smaller size (e.g. ARM).
1715   kCombine,
1716   // SIMD128 Registers are independent of every other size (e.g Riscv)
1717   kIndependent
1718 };
1719 
1720 #define FOR_EACH_ISOLATE_ADDRESS_NAME(C)                            \
1721   C(Handler, handler)                                               \
1722   C(CEntryFP, c_entry_fp)                                           \
1723   C(CFunction, c_function)                                          \
1724   C(Context, context)                                               \
1725   C(PendingException, pending_exception)                            \
1726   C(PendingHandlerContext, pending_handler_context)                 \
1727   C(PendingHandlerEntrypoint, pending_handler_entrypoint)           \
1728   C(PendingHandlerConstantPool, pending_handler_constant_pool)      \
1729   C(PendingHandlerFP, pending_handler_fp)                           \
1730   C(PendingHandlerSP, pending_handler_sp)                           \
1731   C(NumFramesAbovePendingHandler, num_frames_above_pending_handler) \
1732   C(ExternalCaughtException, external_caught_exception)             \
1733   C(JSEntrySP, js_entry_sp)
1734 
1735 enum IsolateAddressId {
1736 #define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
1737   FOR_EACH_ISOLATE_ADDRESS_NAME(DECLARE_ENUM)
1738 #undef DECLARE_ENUM
1739       kIsolateAddressCount
1740 };
1741 
1742 // The reason for a WebAssembly trap.
1743 #define FOREACH_WASM_TRAPREASON(V) \
1744   V(TrapUnreachable)               \
1745   V(TrapMemOutOfBounds)            \
1746   V(TrapUnalignedAccess)           \
1747   V(TrapDivByZero)                 \
1748   V(TrapDivUnrepresentable)        \
1749   V(TrapRemByZero)                 \
1750   V(TrapFloatUnrepresentable)      \
1751   V(TrapFuncSigMismatch)           \
1752   V(TrapDataSegmentOutOfBounds)    \
1753   V(TrapElemSegmentDropped)        \
1754   V(TrapTableOutOfBounds)          \
1755   V(TrapRethrowNull)               \
1756   V(TrapNullDereference)           \
1757   V(TrapIllegalCast)               \
1758   V(TrapArrayOutOfBounds)          \
1759   V(TrapArrayTooLarge)
1760 
1761 enum KeyedAccessLoadMode {
1762   STANDARD_LOAD,
1763   LOAD_IGNORE_OUT_OF_BOUNDS,
1764 };
1765 
1766 enum KeyedAccessStoreMode {
1767   STANDARD_STORE,
1768   STORE_AND_GROW_HANDLE_COW,
1769   STORE_IGNORE_OUT_OF_BOUNDS,
1770   STORE_HANDLE_COW
1771 };
1772 
1773 enum MutableMode { MUTABLE, IMMUTABLE };
1774 
IsCOWHandlingStoreMode(KeyedAccessStoreMode store_mode)1775 inline bool IsCOWHandlingStoreMode(KeyedAccessStoreMode store_mode) {
1776   return store_mode == STORE_HANDLE_COW ||
1777          store_mode == STORE_AND_GROW_HANDLE_COW;
1778 }
1779 
IsGrowStoreMode(KeyedAccessStoreMode store_mode)1780 inline bool IsGrowStoreMode(KeyedAccessStoreMode store_mode) {
1781   return store_mode == STORE_AND_GROW_HANDLE_COW;
1782 }
1783 
1784 enum class IcCheckType { kElement, kProperty };
1785 
1786 // Helper stubs can be called in different ways depending on where the target
1787 // code is located and how the call sequence is expected to look like:
1788 //  - CodeObject: Call on-heap {Code} object via {RelocInfo::CODE_TARGET}.
1789 //  - WasmRuntimeStub: Call native {WasmCode} stub via
1790 //    {RelocInfo::WASM_STUB_CALL}.
1791 //  - BuiltinPointer: Call a builtin based on a builtin pointer with dynamic
1792 //    contents. If builtins are embedded, we call directly into off-heap code
1793 //    without going through the on-heap Code trampoline.
1794 enum class StubCallMode {
1795   kCallCodeObject,
1796 #if V8_ENABLE_WEBASSEMBLY
1797   kCallWasmRuntimeStub,
1798 #endif  // V8_ENABLE_WEBASSEMBLY
1799   kCallBuiltinPointer,
1800 };
1801 
1802 constexpr int kFunctionLiteralIdInvalid = -1;
1803 constexpr int kFunctionLiteralIdTopLevel = 0;
1804 
1805 constexpr int kSwissNameDictionaryInitialCapacity = 4;
1806 
1807 constexpr int kSmallOrderedHashSetMinCapacity = 4;
1808 constexpr int kSmallOrderedHashMapMinCapacity = 4;
1809 
1810 constexpr int kJSArgcReceiverSlots = 1;
1811 constexpr uint16_t kDontAdaptArgumentsSentinel = 0;
1812 
1813 // Helper to get the parameter count for functions with JS linkage.
JSParameterCount(int param_count_without_receiver)1814 inline constexpr int JSParameterCount(int param_count_without_receiver) {
1815   return param_count_without_receiver + kJSArgcReceiverSlots;
1816 }
1817 
1818 // Opaque data type for identifying stack frames. Used extensively
1819 // by the debugger.
1820 // ID_MIN_VALUE and ID_MAX_VALUE are specified to ensure that enumeration type
1821 // has correct value range (see Issue 830 for more details).
1822 enum StackFrameId { ID_MIN_VALUE = kMinInt, ID_MAX_VALUE = kMaxInt, NO_ID = 0 };
1823 
1824 enum class ExceptionStatus : bool { kException = false, kSuccess = true };
1825 V8_INLINE bool operator!(ExceptionStatus status) {
1826   return !static_cast<bool>(status);
1827 }
1828 
1829 enum class TraceRetainingPathMode { kEnabled, kDisabled };
1830 
1831 // Used in the ScopeInfo flags fields for the function name variable for named
1832 // function expressions, and for the receiver. Must be declared here so that it
1833 // can be used in Torque.
1834 enum class VariableAllocationInfo { NONE, STACK, CONTEXT, UNUSED };
1835 
1836 #ifdef V8_COMPRESS_POINTERS
1837 class PtrComprCageBase {
1838  public:
PtrComprCageBase(Address address)1839   explicit constexpr PtrComprCageBase(Address address) : address_(address) {}
1840   // NOLINTNEXTLINE
1841   inline PtrComprCageBase(const Isolate* isolate);
1842   // NOLINTNEXTLINE
1843   inline PtrComprCageBase(const LocalIsolate* isolate);
1844 
1845   inline Address address() const;
1846 
1847   bool operator==(const PtrComprCageBase& other) const {
1848     return address_ == other.address_;
1849   }
1850 
1851  private:
1852   Address address_;
1853 };
1854 #else
1855 class PtrComprCageBase {
1856  public:
1857   PtrComprCageBase() = default;
1858   // NOLINTNEXTLINE
PtrComprCageBase(const Isolate * isolate)1859   PtrComprCageBase(const Isolate* isolate) {}
1860   // NOLINTNEXTLINE
PtrComprCageBase(const LocalIsolate * isolate)1861   PtrComprCageBase(const LocalIsolate* isolate) {}
1862 };
1863 #endif
1864 
1865 class int31_t {
1866  public:
int31_t()1867   constexpr int31_t() : value_(0) {}
int31_t(int value)1868   constexpr int31_t(int value) : value_(value) {  // NOLINT(runtime/explicit)
1869     DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
1870   }
1871   int31_t& operator=(int value) {
1872     DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
1873     value_ = value;
1874     return *this;
1875   }
value()1876   int32_t value() const { return value_; }
int32_t()1877   operator int32_t() const { return value_; }
1878 
1879  private:
1880   int32_t value_;
1881 };
1882 
1883 enum PropertiesEnumerationMode {
1884   // String and then Symbol properties according to the spec
1885   // ES#sec-object.assign
1886   kEnumerationOrder,
1887   // Order of property addition
1888   kPropertyAdditionOrder,
1889 };
1890 
1891 enum class StringTransitionStrategy {
1892   // The string must be transitioned to a new representation by first copying.
1893   kCopy,
1894   // The string can be transitioned in-place by changing its map.
1895   kInPlace,
1896   // The string is already transitioned to the desired representation.
1897   kAlreadyTransitioned
1898 };
1899 
1900 }  // namespace internal
1901 
1902 // Tag dispatching support for atomic loads and stores.
1903 struct AcquireLoadTag {};
1904 struct RelaxedLoadTag {};
1905 struct ReleaseStoreTag {};
1906 struct RelaxedStoreTag {};
1907 struct SeqCstAccessTag {};
1908 static constexpr AcquireLoadTag kAcquireLoad;
1909 static constexpr RelaxedLoadTag kRelaxedLoad;
1910 static constexpr ReleaseStoreTag kReleaseStore;
1911 static constexpr RelaxedStoreTag kRelaxedStore;
1912 static constexpr SeqCstAccessTag kSeqCstAccess;
1913 
1914 }  // namespace v8
1915 
1916 namespace i = v8::internal;
1917 
1918 #endif  // V8_COMMON_GLOBALS_H_
1919