1 // Copyright 2018 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef INCLUDE_V8_INTERNAL_H_ 6 #define INCLUDE_V8_INTERNAL_H_ 7 8 #include <stddef.h> 9 #include <stdint.h> 10 #include <string.h> 11 #include <type_traits> 12 13 #include "v8-version.h" // NOLINT(build/include_directory) 14 #include "v8config.h" // NOLINT(build/include_directory) 15 16 namespace v8 { 17 18 class Array; 19 class Context; 20 class Data; 21 class Isolate; 22 template <typename T> 23 class Local; 24 25 namespace internal { 26 27 class Isolate; 28 29 typedef uintptr_t Address; 30 static const Address kNullAddress = 0; 31 32 constexpr int KB = 1024; 33 constexpr int MB = KB * 1024; 34 constexpr int GB = MB * 1024; 35 #ifdef V8_TARGET_ARCH_X64 36 constexpr size_t TB = size_t{GB} * 1024; 37 #endif 38 39 /** 40 * Configuration of tagging scheme. 41 */ 42 const int kApiSystemPointerSize = sizeof(void*); 43 const int kApiDoubleSize = sizeof(double); 44 const int kApiInt32Size = sizeof(int32_t); 45 const int kApiInt64Size = sizeof(int64_t); 46 const int kApiSizetSize = sizeof(size_t); 47 48 // Tag information for HeapObject. 49 const int kHeapObjectTag = 1; 50 const int kWeakHeapObjectTag = 3; 51 const int kHeapObjectTagSize = 2; 52 const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1; 53 54 // Tag information for fowarding pointers stored in object headers. 55 // 0b00 at the lowest 2 bits in the header indicates that the map word is a 56 // forwarding pointer. 57 const int kForwardingTag = 0; 58 const int kForwardingTagSize = 2; 59 const intptr_t kForwardingTagMask = (1 << kForwardingTagSize) - 1; 60 61 // Tag information for Smi. 62 const int kSmiTag = 0; 63 const int kSmiTagSize = 1; 64 const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1; 65 66 template <size_t tagged_ptr_size> 67 struct SmiTagging; 68 69 constexpr intptr_t kIntptrAllBitsSet = intptr_t{-1}; 70 constexpr uintptr_t kUintptrAllBitsSet = 71 static_cast<uintptr_t>(kIntptrAllBitsSet); 72 73 // Smi constants for systems where tagged pointer is a 32-bit value. 74 template <> 75 struct SmiTagging<4> { 76 enum { kSmiShiftSize = 0, kSmiValueSize = 31 }; 77 78 static constexpr intptr_t kSmiMinValue = 79 static_cast<intptr_t>(kUintptrAllBitsSet << (kSmiValueSize - 1)); 80 static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1); 81 82 V8_INLINE static int SmiToInt(const internal::Address value) { 83 int shift_bits = kSmiTagSize + kSmiShiftSize; 84 // Truncate and shift down (requires >> to be sign extending). 85 return static_cast<int32_t>(static_cast<uint32_t>(value)) >> shift_bits; 86 } 87 V8_INLINE static constexpr bool IsValidSmi(intptr_t value) { 88 // Is value in range [kSmiMinValue, kSmiMaxValue]. 89 // Use unsigned operations in order to avoid undefined behaviour in case of 90 // signed integer overflow. 91 return (static_cast<uintptr_t>(value) - 92 static_cast<uintptr_t>(kSmiMinValue)) <= 93 (static_cast<uintptr_t>(kSmiMaxValue) - 94 static_cast<uintptr_t>(kSmiMinValue)); 95 } 96 }; 97 98 // Smi constants for systems where tagged pointer is a 64-bit value. 99 template <> 100 struct SmiTagging<8> { 101 enum { kSmiShiftSize = 31, kSmiValueSize = 32 }; 102 103 static constexpr intptr_t kSmiMinValue = 104 static_cast<intptr_t>(kUintptrAllBitsSet << (kSmiValueSize - 1)); 105 static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1); 106 107 V8_INLINE static int SmiToInt(const internal::Address value) { 108 int shift_bits = kSmiTagSize + kSmiShiftSize; 109 // Shift down and throw away top 32 bits. 110 return static_cast<int>(static_cast<intptr_t>(value) >> shift_bits); 111 } 112 V8_INLINE static constexpr bool IsValidSmi(intptr_t value) { 113 // To be representable as a long smi, the value must be a 32-bit integer. 114 return (value == static_cast<int32_t>(value)); 115 } 116 }; 117 118 #ifdef V8_COMPRESS_POINTERS 119 // See v8:7703 or src/common/ptr-compr-inl.h for details about pointer 120 // compression. 121 constexpr size_t kPtrComprCageReservationSize = size_t{1} << 32; 122 constexpr size_t kPtrComprCageBaseAlignment = size_t{1} << 32; 123 124 static_assert( 125 kApiSystemPointerSize == kApiInt64Size, 126 "Pointer compression can be enabled only for 64-bit architectures"); 127 const int kApiTaggedSize = kApiInt32Size; 128 #else 129 const int kApiTaggedSize = kApiSystemPointerSize; 130 #endif 131 132 constexpr bool PointerCompressionIsEnabled() { 133 return kApiTaggedSize != kApiSystemPointerSize; 134 } 135 136 #ifdef V8_31BIT_SMIS_ON_64BIT_ARCH 137 using PlatformSmiTagging = SmiTagging<kApiInt32Size>; 138 #else 139 using PlatformSmiTagging = SmiTagging<kApiTaggedSize>; 140 #endif 141 142 // TODO(ishell): Consinder adding kSmiShiftBits = kSmiShiftSize + kSmiTagSize 143 // since it's used much more often than the inividual constants. 144 const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize; 145 const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize; 146 const int kSmiMinValue = static_cast<int>(PlatformSmiTagging::kSmiMinValue); 147 const int kSmiMaxValue = static_cast<int>(PlatformSmiTagging::kSmiMaxValue); 148 constexpr bool SmiValuesAre31Bits() { return kSmiValueSize == 31; } 149 constexpr bool SmiValuesAre32Bits() { return kSmiValueSize == 32; } 150 151 V8_INLINE static constexpr internal::Address IntToSmi(int value) { 152 return (static_cast<Address>(value) << (kSmiTagSize + kSmiShiftSize)) | 153 kSmiTag; 154 } 155 156 /* 157 * Sandbox related types, constants, and functions. 158 */ 159 constexpr bool SandboxIsEnabled() { 160 #ifdef V8_SANDBOX 161 return true; 162 #else 163 return false; 164 #endif 165 } 166 167 constexpr bool SandboxedExternalPointersAreEnabled() { 168 #ifdef V8_SANDBOXED_EXTERNAL_POINTERS 169 return true; 170 #else 171 return false; 172 #endif 173 } 174 175 // SandboxedPointers are guaranteed to point into the sandbox. This is achieved 176 // for example by storing them as offset rather than as raw pointers. 177 using SandboxedPointer_t = Address; 178 179 // ExternalPointers point to objects located outside the sandbox. When sandboxed 180 // external pointers are enabled, these are stored in an external pointer table 181 // and referenced from HeapObjects through indices. 182 #ifdef V8_SANDBOXED_EXTERNAL_POINTERS 183 using ExternalPointer_t = uint32_t; 184 #else 185 using ExternalPointer_t = Address; 186 #endif 187 188 #ifdef V8_SANDBOX_IS_AVAILABLE 189 190 // Size of the sandbox, excluding the guard regions surrounding it. 191 constexpr size_t kSandboxSizeLog2 = 40; // 1 TB 192 constexpr size_t kSandboxSize = 1ULL << kSandboxSizeLog2; 193 194 // Required alignment of the sandbox. For simplicity, we require the 195 // size of the guard regions to be a multiple of this, so that this specifies 196 // the alignment of the sandbox including and excluding surrounding guard 197 // regions. The alignment requirement is due to the pointer compression cage 198 // being located at the start of the sandbox. 199 constexpr size_t kSandboxAlignment = kPtrComprCageBaseAlignment; 200 201 // Sandboxed pointers are stored inside the heap as offset from the sandbox 202 // base shifted to the left. This way, it is guaranteed that the offset is 203 // smaller than the sandbox size after shifting it to the right again. This 204 // constant specifies the shift amount. 205 constexpr uint64_t kSandboxedPointerShift = 64 - kSandboxSizeLog2; 206 207 // Size of the guard regions surrounding the sandbox. This assumes a worst-case 208 // scenario of a 32-bit unsigned index used to access an array of 64-bit 209 // values. 210 constexpr size_t kSandboxGuardRegionSize = 32ULL * GB; 211 212 static_assert((kSandboxGuardRegionSize % kSandboxAlignment) == 0, 213 "The size of the guard regions around the sandbox must be a " 214 "multiple of its required alignment."); 215 216 // Minimum size of the sandbox, excluding the guard regions surrounding it. If 217 // the virtual memory reservation for the sandbox fails, its size is currently 218 // halved until either the reservation succeeds or the minimum size is reached. 219 // A minimum of 32GB allows the 4GB pointer compression region as well as the 220 // ArrayBuffer partition and two 10GB Wasm memory cages to fit into the 221 // sandbox. 32GB should also be the minimum possible size of the userspace 222 // address space as there are some machine configurations with only 36 virtual 223 // address bits. 224 constexpr size_t kSandboxMinimumSize = 32ULL * GB; 225 226 static_assert(kSandboxMinimumSize <= kSandboxSize, 227 "The minimal size of the sandbox must be smaller or equal to the " 228 "regular size."); 229 230 // On OSes where reserving virtual memory is too expensive to reserve the 231 // entire address space backing the sandbox, notably Windows pre 8.1, we create 232 // a partially reserved sandbox that doesn't actually reserve most of the 233 // memory, and so doesn't have the desired security properties as unrelated 234 // memory allocations could end up inside of it, but which still ensures that 235 // objects that should be located inside the sandbox are allocated within 236 // kSandboxSize bytes from the start of the sandbox. The minimum size of the 237 // region that is actually reserved for such a sandbox is specified by this 238 // constant and should be big enough to contain the pointer compression cage as 239 // well as the ArrayBuffer partition. 240 constexpr size_t kSandboxMinimumReservationSize = 8ULL * GB; 241 242 static_assert(kSandboxMinimumSize > kPtrComprCageReservationSize, 243 "The sandbox must be larger than the pointer compression cage " 244 "contained within it."); 245 static_assert(kSandboxMinimumReservationSize > kPtrComprCageReservationSize, 246 "The minimum reservation size for a sandbox must be larger than " 247 "the pointer compression cage contained within it."); 248 249 // For now, even if the sandbox is enabled, we still allow backing stores to be 250 // allocated outside of it as fallback. This will simplify the initial rollout. 251 // However, if sandboxed pointers are also enabled, we must always place 252 // backing stores inside the sandbox as they will be referenced though them. 253 #ifdef V8_SANDBOXED_POINTERS 254 constexpr bool kAllowBackingStoresOutsideSandbox = false; 255 #else 256 constexpr bool kAllowBackingStoresOutsideSandbox = true; 257 #endif // V8_SANDBOXED_POINTERS 258 259 // The size of the virtual memory reservation for an external pointer table. 260 // This determines the maximum number of entries in a table. Using a maximum 261 // size allows omitting bounds checks on table accesses if the indices are 262 // guaranteed (e.g. through shifting) to be below the maximum index. This 263 // value must be a power of two. 264 static const size_t kExternalPointerTableReservationSize = 128 * MB; 265 266 // The maximum number of entries in an external pointer table. 267 static const size_t kMaxSandboxedExternalPointers = 268 kExternalPointerTableReservationSize / kApiSystemPointerSize; 269 270 // The external pointer table indices stored in HeapObjects as external 271 // pointers are shifted to the left by this amount to guarantee that they are 272 // smaller than the maximum table size. 273 static const uint32_t kExternalPointerIndexShift = 8; 274 static_assert((1 << (32 - kExternalPointerIndexShift)) == 275 kMaxSandboxedExternalPointers, 276 "kExternalPointerTableReservationSize and " 277 "kExternalPointerIndexShift don't match"); 278 279 #endif // V8_SANDBOX_IS_AVAILABLE 280 281 // If sandboxed external pointers are enabled, these tag values will be ORed 282 // with the external pointers in the external pointer table to prevent use of 283 // pointers of the wrong type. When a pointer is loaded, it is ANDed with the 284 // inverse of the expected type's tag. The tags are constructed in a way that 285 // guarantees that a failed type check will result in one or more of the top 286 // bits of the pointer to be set, rendering the pointer inacessible. Besides 287 // the type tag bits (48 through 62), the tags also have the GC mark bit (63) 288 // set, so that the mark bit is automatically set when a pointer is written 289 // into the external pointer table (in which case it is clearly alive) and is 290 // cleared when the pointer is loaded. The exception to this is the free entry 291 // tag, which doesn't have the mark bit set, as the entry is not alive. This 292 // construction allows performing the type check and removing GC marking bits 293 // (the MSB) from the pointer at the same time. 294 // Note: this scheme assumes a 48-bit address space and will likely break if 295 // more virtual address bits are used. 296 constexpr uint64_t kExternalPointerTagMask = 0xffff000000000000; 297 constexpr uint64_t kExternalPointerTagShift = 48; 298 #define MAKE_TAG(v) (static_cast<uint64_t>(v) << kExternalPointerTagShift) 299 // clang-format off 300 enum ExternalPointerTag : uint64_t { 301 kExternalPointerNullTag = MAKE_TAG(0b0000000000000000), 302 kExternalPointerFreeEntryTag = MAKE_TAG(0b0111111110000000), 303 kExternalStringResourceTag = MAKE_TAG(0b1000000011111111), 304 kExternalStringResourceDataTag = MAKE_TAG(0b1000000101111111), 305 kForeignForeignAddressTag = MAKE_TAG(0b1000000110111111), 306 kNativeContextMicrotaskQueueTag = MAKE_TAG(0b1000000111011111), 307 kEmbedderDataSlotPayloadTag = MAKE_TAG(0b1000000111101111), 308 kCodeEntryPointTag = MAKE_TAG(0b1000000111110111), 309 kExternalObjectValueTag = MAKE_TAG(0b1000000111111011), 310 }; 311 // clang-format on 312 #undef MAKE_TAG 313 314 // Converts encoded external pointer to address. 315 V8_EXPORT Address DecodeExternalPointerImpl(const Isolate* isolate, 316 ExternalPointer_t pointer, 317 ExternalPointerTag tag); 318 319 // {obj} must be the raw tagged pointer representation of a HeapObject 320 // that's guaranteed to never be in ReadOnlySpace. 321 V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj); 322 323 // Returns if we need to throw when an error occurs. This infers the language 324 // mode based on the current context and the closure. This returns true if the 325 // language mode is strict. 326 V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate); 327 328 V8_EXPORT bool CanHaveInternalField(int instance_type); 329 330 /** 331 * This class exports constants and functionality from within v8 that 332 * is necessary to implement inline functions in the v8 api. Don't 333 * depend on functions and constants defined here. 334 */ 335 class Internals { 336 #ifdef V8_MAP_PACKING 337 V8_INLINE static constexpr internal::Address UnpackMapWord( 338 internal::Address mapword) { 339 // TODO(wenyuzhao): Clear header metadata. 340 return mapword ^ kMapWordXorMask; 341 } 342 #endif 343 344 public: 345 // These values match non-compiler-dependent values defined within 346 // the implementation of v8. 347 static const int kHeapObjectMapOffset = 0; 348 static const int kMapInstanceTypeOffset = 1 * kApiTaggedSize + kApiInt32Size; 349 static const int kStringResourceOffset = 350 1 * kApiTaggedSize + 2 * kApiInt32Size; 351 352 static const int kOddballKindOffset = 4 * kApiTaggedSize + kApiDoubleSize; 353 static const int kJSObjectHeaderSize = 3 * kApiTaggedSize; 354 static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize; 355 static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize; 356 static const int kEmbedderDataSlotSize = kApiSystemPointerSize; 357 #ifdef V8_SANDBOXED_EXTERNAL_POINTERS 358 static const int kEmbedderDataSlotRawPayloadOffset = kApiTaggedSize; 359 #endif 360 static const int kNativeContextEmbedderDataOffset = 6 * kApiTaggedSize; 361 static const int kStringRepresentationAndEncodingMask = 0x0f; 362 static const int kStringEncodingMask = 0x8; 363 static const int kExternalTwoByteRepresentationTag = 0x02; 364 static const int kExternalOneByteRepresentationTag = 0x0a; 365 366 static const uint32_t kNumIsolateDataSlots = 4; 367 static const int kStackGuardSize = 7 * kApiSystemPointerSize; 368 static const int kBuiltinTier0EntryTableSize = 10 * kApiSystemPointerSize; 369 static const int kBuiltinTier0TableSize = 10 * kApiSystemPointerSize; 370 371 // IsolateData layout guarantees. 372 static const int kIsolateCageBaseOffset = 0; 373 static const int kIsolateStackGuardOffset = 374 kIsolateCageBaseOffset + kApiSystemPointerSize; 375 static const int kBuiltinTier0EntryTableOffset = 376 kIsolateStackGuardOffset + kStackGuardSize; 377 static const int kBuiltinTier0TableOffset = 378 kBuiltinTier0EntryTableOffset + kBuiltinTier0EntryTableSize; 379 static const int kIsolateEmbedderDataOffset = 380 kBuiltinTier0TableOffset + kBuiltinTier0TableSize; 381 static const int kIsolateFastCCallCallerFpOffset = 382 kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize; 383 static const int kIsolateFastCCallCallerPcOffset = 384 kIsolateFastCCallCallerFpOffset + kApiSystemPointerSize; 385 static const int kIsolateFastApiCallTargetOffset = 386 kIsolateFastCCallCallerPcOffset + kApiSystemPointerSize; 387 static const int kIsolateLongTaskStatsCounterOffset = 388 kIsolateFastApiCallTargetOffset + kApiSystemPointerSize; 389 static const int kIsolateRootsOffset = 390 kIsolateLongTaskStatsCounterOffset + kApiSizetSize; 391 392 static const int kExternalPointerTableBufferOffset = 0; 393 static const int kExternalPointerTableCapacityOffset = 394 kExternalPointerTableBufferOffset + kApiSystemPointerSize; 395 static const int kExternalPointerTableFreelistHeadOffset = 396 kExternalPointerTableCapacityOffset + kApiInt32Size; 397 398 static const int kUndefinedValueRootIndex = 4; 399 static const int kTheHoleValueRootIndex = 5; 400 static const int kNullValueRootIndex = 6; 401 static const int kTrueValueRootIndex = 7; 402 static const int kFalseValueRootIndex = 8; 403 static const int kEmptyStringRootIndex = 9; 404 405 static const int kNodeClassIdOffset = 1 * kApiSystemPointerSize; 406 static const int kNodeFlagsOffset = 1 * kApiSystemPointerSize + 3; 407 static const int kNodeStateMask = 0x7; 408 static const int kNodeStateIsWeakValue = 2; 409 static const int kNodeStateIsPendingValue = 3; 410 411 static const int kFirstNonstringType = 0x80; 412 static const int kOddballType = 0x83; 413 static const int kForeignType = 0xcc; 414 static const int kJSSpecialApiObjectType = 0x410; 415 static const int kJSObjectType = 0x421; 416 static const int kFirstJSApiObjectType = 0x422; 417 static const int kLastJSApiObjectType = 0x80A; 418 419 static const int kUndefinedOddballKind = 5; 420 static const int kNullOddballKind = 3; 421 422 // Constants used by PropertyCallbackInfo to check if we should throw when an 423 // error occurs. 424 static const int kThrowOnError = 0; 425 static const int kDontThrow = 1; 426 static const int kInferShouldThrowMode = 2; 427 428 // Soft limit for AdjustAmountofExternalAllocatedMemory. Trigger an 429 // incremental GC once the external memory reaches this limit. 430 static constexpr int kExternalAllocationSoftLimit = 64 * 1024 * 1024; 431 432 #ifdef V8_MAP_PACKING 433 static const uintptr_t kMapWordMetadataMask = 0xffffULL << 48; 434 // The lowest two bits of mapwords are always `0b10` 435 static const uintptr_t kMapWordSignature = 0b10; 436 // XORing a (non-compressed) map with this mask ensures that the two 437 // low-order bits are 0b10. The 0 at the end makes this look like a Smi, 438 // although real Smis have all lower 32 bits unset. We only rely on these 439 // values passing as Smis in very few places. 440 static const int kMapWordXorMask = 0b11; 441 #endif 442 443 V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate); 444 V8_INLINE static void CheckInitialized(v8::Isolate* isolate) { 445 #ifdef V8_ENABLE_CHECKS 446 CheckInitializedImpl(isolate); 447 #endif 448 } 449 450 V8_INLINE static bool HasHeapObjectTag(const internal::Address value) { 451 return (value & kHeapObjectTagMask) == static_cast<Address>(kHeapObjectTag); 452 } 453 454 V8_INLINE static int SmiValue(const internal::Address value) { 455 return PlatformSmiTagging::SmiToInt(value); 456 } 457 458 V8_INLINE static constexpr internal::Address IntToSmi(int value) { 459 return internal::IntToSmi(value); 460 } 461 462 V8_INLINE static constexpr bool IsValidSmi(intptr_t value) { 463 return PlatformSmiTagging::IsValidSmi(value); 464 } 465 466 V8_INLINE static int GetInstanceType(const internal::Address obj) { 467 typedef internal::Address A; 468 A map = ReadTaggedPointerField(obj, kHeapObjectMapOffset); 469 #ifdef V8_MAP_PACKING 470 map = UnpackMapWord(map); 471 #endif 472 return ReadRawField<uint16_t>(map, kMapInstanceTypeOffset); 473 } 474 475 V8_INLINE static int GetOddballKind(const internal::Address obj) { 476 return SmiValue(ReadTaggedSignedField(obj, kOddballKindOffset)); 477 } 478 479 V8_INLINE static bool IsExternalTwoByteString(int instance_type) { 480 int representation = (instance_type & kStringRepresentationAndEncodingMask); 481 return representation == kExternalTwoByteRepresentationTag; 482 } 483 484 V8_INLINE static uint8_t GetNodeFlag(internal::Address* obj, int shift) { 485 uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset; 486 return *addr & static_cast<uint8_t>(1U << shift); 487 } 488 489 V8_INLINE static void UpdateNodeFlag(internal::Address* obj, bool value, 490 int shift) { 491 uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset; 492 uint8_t mask = static_cast<uint8_t>(1U << shift); 493 *addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift)); 494 } 495 496 V8_INLINE static uint8_t GetNodeState(internal::Address* obj) { 497 uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset; 498 return *addr & kNodeStateMask; 499 } 500 501 V8_INLINE static void UpdateNodeState(internal::Address* obj, uint8_t value) { 502 uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset; 503 *addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value); 504 } 505 506 V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, uint32_t slot, 507 void* data) { 508 internal::Address addr = reinterpret_cast<internal::Address>(isolate) + 509 kIsolateEmbedderDataOffset + 510 slot * kApiSystemPointerSize; 511 *reinterpret_cast<void**>(addr) = data; 512 } 513 514 V8_INLINE static void* GetEmbedderData(const v8::Isolate* isolate, 515 uint32_t slot) { 516 internal::Address addr = reinterpret_cast<internal::Address>(isolate) + 517 kIsolateEmbedderDataOffset + 518 slot * kApiSystemPointerSize; 519 return *reinterpret_cast<void* const*>(addr); 520 } 521 522 V8_INLINE static void IncrementLongTasksStatsCounter(v8::Isolate* isolate) { 523 internal::Address addr = reinterpret_cast<internal::Address>(isolate) + 524 kIsolateLongTaskStatsCounterOffset; 525 ++(*reinterpret_cast<size_t*>(addr)); 526 } 527 528 V8_INLINE static internal::Address* GetRoot(v8::Isolate* isolate, int index) { 529 internal::Address addr = reinterpret_cast<internal::Address>(isolate) + 530 kIsolateRootsOffset + 531 index * kApiSystemPointerSize; 532 return reinterpret_cast<internal::Address*>(addr); 533 } 534 535 template <typename T> 536 V8_INLINE static T ReadRawField(internal::Address heap_object_ptr, 537 int offset) { 538 internal::Address addr = heap_object_ptr + offset - kHeapObjectTag; 539 #ifdef V8_COMPRESS_POINTERS 540 if (sizeof(T) > kApiTaggedSize) { 541 // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size 542 // fields (external pointers, doubles and BigInt data) are only 543 // kTaggedSize aligned so we have to use unaligned pointer friendly way of 544 // accessing them in order to avoid undefined behavior in C++ code. 545 T r; 546 memcpy(&r, reinterpret_cast<void*>(addr), sizeof(T)); 547 return r; 548 } 549 #endif 550 return *reinterpret_cast<const T*>(addr); 551 } 552 553 V8_INLINE static internal::Address ReadTaggedPointerField( 554 internal::Address heap_object_ptr, int offset) { 555 #ifdef V8_COMPRESS_POINTERS 556 uint32_t value = ReadRawField<uint32_t>(heap_object_ptr, offset); 557 internal::Address base = 558 GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr); 559 return base + static_cast<internal::Address>(static_cast<uintptr_t>(value)); 560 #else 561 return ReadRawField<internal::Address>(heap_object_ptr, offset); 562 #endif 563 } 564 565 V8_INLINE static internal::Address ReadTaggedSignedField( 566 internal::Address heap_object_ptr, int offset) { 567 #ifdef V8_COMPRESS_POINTERS 568 uint32_t value = ReadRawField<uint32_t>(heap_object_ptr, offset); 569 return static_cast<internal::Address>(static_cast<uintptr_t>(value)); 570 #else 571 return ReadRawField<internal::Address>(heap_object_ptr, offset); 572 #endif 573 } 574 575 V8_INLINE static internal::Isolate* GetIsolateForSandbox( 576 internal::Address obj) { 577 #ifdef V8_SANDBOXED_EXTERNAL_POINTERS 578 return internal::IsolateFromNeverReadOnlySpaceObject(obj); 579 #else 580 // Not used in non-sandbox mode. 581 return nullptr; 582 #endif 583 } 584 585 V8_INLINE static Address DecodeExternalPointer( 586 const Isolate* isolate, ExternalPointer_t encoded_pointer, 587 ExternalPointerTag tag) { 588 #ifdef V8_SANDBOXED_EXTERNAL_POINTERS 589 return internal::DecodeExternalPointerImpl(isolate, encoded_pointer, tag); 590 #else 591 return encoded_pointer; 592 #endif 593 } 594 595 V8_INLINE static internal::Address ReadExternalPointerField( 596 internal::Isolate* isolate, internal::Address heap_object_ptr, int offset, 597 ExternalPointerTag tag) { 598 #ifdef V8_SANDBOXED_EXTERNAL_POINTERS 599 internal::ExternalPointer_t encoded_value = 600 ReadRawField<uint32_t>(heap_object_ptr, offset); 601 // We currently have to treat zero as nullptr in embedder slots. 602 return encoded_value ? DecodeExternalPointer(isolate, encoded_value, tag) 603 : 0; 604 #else 605 return ReadRawField<Address>(heap_object_ptr, offset); 606 #endif 607 } 608 609 #ifdef V8_COMPRESS_POINTERS 610 V8_INLINE static internal::Address GetPtrComprCageBaseFromOnHeapAddress( 611 internal::Address addr) { 612 return addr & -static_cast<intptr_t>(kPtrComprCageBaseAlignment); 613 } 614 615 V8_INLINE static internal::Address DecompressTaggedAnyField( 616 internal::Address heap_object_ptr, uint32_t value) { 617 internal::Address base = 618 GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr); 619 return base + static_cast<internal::Address>(static_cast<uintptr_t>(value)); 620 } 621 622 #endif // V8_COMPRESS_POINTERS 623 }; 624 625 // Only perform cast check for types derived from v8::Data since 626 // other types do not implement the Cast method. 627 template <bool PerformCheck> 628 struct CastCheck { 629 template <class T> 630 static void Perform(T* data); 631 }; 632 633 template <> 634 template <class T> 635 void CastCheck<true>::Perform(T* data) { 636 T::Cast(data); 637 } 638 639 template <> 640 template <class T> 641 void CastCheck<false>::Perform(T* data) {} 642 643 template <class T> 644 V8_INLINE void PerformCastCheck(T* data) { 645 CastCheck<std::is_base_of<Data, T>::value && 646 !std::is_same<Data, std::remove_cv_t<T>>::value>::Perform(data); 647 } 648 649 // A base class for backing stores, which is needed due to vagaries of 650 // how static casts work with std::shared_ptr. 651 class BackingStoreBase {}; 652 653 // The maximum value in enum GarbageCollectionReason, defined in heap.h. 654 // This is needed for histograms sampling garbage collection reasons. 655 constexpr int kGarbageCollectionReasonMaxValue = 25; 656 657 } // namespace internal 658 659 } // namespace v8 660 661 #endif // INCLUDE_V8_INTERNAL_H_ 662