1 // Copyright 2017 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_OBJECTS_CODE_H_ 6 #define V8_OBJECTS_CODE_H_ 7 8 #include "src/handler-table.h" 9 #include "src/objects.h" 10 #include "src/objects/fixed-array.h" 11 12 // Has to be the last include (doesn't have include guards): 13 #include "src/objects/object-macros.h" 14 15 namespace v8 { 16 namespace internal { 17 18 class ByteArray; 19 class BytecodeArray; 20 class CodeDataContainer; 21 class MaybeObject; 22 23 namespace interpreter { 24 class Register; 25 } 26 27 // Code describes objects with on-the-fly generated machine code. 28 class Code : public HeapObject, public NeverReadOnlySpaceObject { 29 public: 30 using NeverReadOnlySpaceObject::GetHeap; 31 using NeverReadOnlySpaceObject::GetIsolate; 32 // Opaque data type for encapsulating code flags like kind, inline 33 // cache state, and arguments count. 34 typedef uint32_t Flags; 35 36 #define CODE_KIND_LIST(V) \ 37 V(OPTIMIZED_FUNCTION) \ 38 V(BYTECODE_HANDLER) \ 39 V(STUB) \ 40 V(BUILTIN) \ 41 V(REGEXP) \ 42 V(WASM_FUNCTION) \ 43 V(WASM_TO_JS_FUNCTION) \ 44 V(JS_TO_WASM_FUNCTION) \ 45 V(WASM_INTERPRETER_ENTRY) \ 46 V(C_WASM_ENTRY) 47 48 enum Kind { 49 #define DEFINE_CODE_KIND_ENUM(name) name, 50 CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM) 51 #undef DEFINE_CODE_KIND_ENUM 52 NUMBER_OF_KINDS 53 }; 54 55 static const char* Kind2String(Kind kind); 56 57 #ifdef ENABLE_DISASSEMBLER 58 const char* GetName(Isolate* isolate) const; 59 void PrintBuiltinCode(Isolate* isolate, const char* name); 60 void Disassemble(const char* name, std::ostream& os, 61 Address current_pc = kNullAddress); 62 #endif 63 64 // [instruction_size]: Size of the native instructions, including embedded 65 // data such as the safepoints table. 66 inline int raw_instruction_size() const; 67 inline void set_raw_instruction_size(int value); 68 69 // Returns the size of the native instructions, including embedded 70 // data such as the safepoints table. For off-heap code objects 71 // this may from instruction_size in that this will return the size of the 72 // off-heap instruction stream rather than the on-heap trampoline located 73 // at instruction_start. 74 inline int InstructionSize() const; 75 int OffHeapInstructionSize() const; 76 77 // [relocation_info]: Code relocation information 78 DECL_ACCESSORS(relocation_info, ByteArray) 79 void InvalidateEmbeddedObjects(Heap* heap); 80 81 // [deoptimization_data]: Array containing data for deopt. 82 DECL_ACCESSORS(deoptimization_data, FixedArray) 83 84 // [source_position_table]: ByteArray for the source positions table or 85 // SourcePositionTableWithFrameCache. 86 DECL_ACCESSORS(source_position_table, Object) 87 inline ByteArray* SourcePositionTable() const; 88 89 // [code_data_container]: A container indirection for all mutable fields. 90 DECL_ACCESSORS(code_data_container, CodeDataContainer) 91 92 // [stub_key]: The major/minor key of a code stub. 93 inline uint32_t stub_key() const; 94 inline void set_stub_key(uint32_t key); 95 96 // [next_code_link]: Link for lists of optimized or deoptimized code. 97 // Note that this field is stored in the {CodeDataContainer} to be mutable. 98 inline Object* next_code_link() const; 99 inline void set_next_code_link(Object* value); 100 101 // [constant_pool offset]: Offset of the constant pool. 102 // Valid for FLAG_enable_embedded_constant_pool only 103 inline int constant_pool_offset() const; 104 inline void set_constant_pool_offset(int offset); 105 106 // Unchecked accessors to be used during GC. 107 inline ByteArray* unchecked_relocation_info() const; 108 109 inline int relocation_size() const; 110 111 // [kind]: Access to specific code kind. 112 inline Kind kind() const; 113 114 inline bool is_stub() const; 115 inline bool is_optimized_code() const; 116 inline bool is_wasm_code() const; 117 118 // Testers for interpreter builtins. 119 inline bool is_interpreter_trampoline_builtin() const; 120 121 // Tells whether the code checks the optimization marker in the function's 122 // feedback vector. 123 inline bool checks_optimization_marker() const; 124 125 // Tells whether the outgoing parameters of this code are tagged pointers. 126 inline bool has_tagged_params() const; 127 128 // [is_turbofanned]: For kind STUB or OPTIMIZED_FUNCTION, tells whether the 129 // code object was generated by the TurboFan optimizing compiler. 130 inline bool is_turbofanned() const; 131 132 // [can_have_weak_objects]: For kind OPTIMIZED_FUNCTION, tells whether the 133 // embedded objects in code should be treated weakly. 134 inline bool can_have_weak_objects() const; 135 inline void set_can_have_weak_objects(bool value); 136 137 // [is_construct_stub]: For kind BUILTIN, tells whether the code object 138 // represents a hand-written construct stub 139 // (e.g., NumberConstructor_ConstructStub). 140 inline bool is_construct_stub() const; 141 inline void set_is_construct_stub(bool value); 142 143 // [builtin_index]: For builtins, tells which builtin index the code object 144 // has. The builtin index is a non-negative integer for builtins, and -1 145 // otherwise. 146 inline int builtin_index() const; 147 inline void set_builtin_index(int id); 148 inline bool is_builtin() const; 149 150 inline bool has_safepoint_info() const; 151 152 // [stack_slots]: If {has_safepoint_info()}, the number of stack slots 153 // reserved in the code prologue. 154 inline int stack_slots() const; 155 156 // [safepoint_table_offset]: If {has_safepoint_info()}, the offset in the 157 // instruction stream where the safepoint table starts. 158 inline int safepoint_table_offset() const; 159 inline void set_safepoint_table_offset(int offset); 160 161 // [handler_table_offset]: The offset in the instruction stream where the 162 // exception handler table starts. 163 inline int handler_table_offset() const; 164 inline void set_handler_table_offset(int offset); 165 166 // [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether 167 // the code is going to be deoptimized because of dead embedded maps. 168 inline bool marked_for_deoptimization() const; 169 inline void set_marked_for_deoptimization(bool flag); 170 171 // [deopt_already_counted]: For kind OPTIMIZED_FUNCTION tells whether 172 // the code was already deoptimized. 173 inline bool deopt_already_counted() const; 174 inline void set_deopt_already_counted(bool flag); 175 176 // [is_promise_rejection]: For kind BUILTIN tells whether the 177 // exception thrown by the code will lead to promise rejection or 178 // uncaught if both this and is_exception_caught is set. 179 // Use GetBuiltinCatchPrediction to access this. 180 inline void set_is_promise_rejection(bool flag); 181 182 // [is_exception_caught]: For kind BUILTIN tells whether the 183 // exception thrown by the code will be caught internally or 184 // uncaught if both this and is_promise_rejection is set. 185 // Use GetBuiltinCatchPrediction to access this. 186 inline void set_is_exception_caught(bool flag); 187 188 // [is_off_heap_trampoline]: For kind BUILTIN tells whether 189 // this is a trampoline to an off-heap builtin. 190 inline bool is_off_heap_trampoline() const; 191 192 // [constant_pool]: The constant pool for this function. 193 inline Address constant_pool() const; 194 195 // Get the safepoint entry for the given pc. 196 SafepointEntry GetSafepointEntry(Address pc); 197 198 // The entire code object including its header is copied verbatim to the 199 // snapshot so that it can be written in one, fast, memcpy during 200 // deserialization. The deserializer will overwrite some pointers, rather 201 // like a runtime linker, but the random allocation addresses used in the 202 // mksnapshot process would still be present in the unlinked snapshot data, 203 // which would make snapshot production non-reproducible. This method wipes 204 // out the to-be-overwritten header data for reproducible snapshots. 205 inline void WipeOutHeader(); 206 207 // Clear uninitialized padding space. This ensures that the snapshot content 208 // is deterministic. 209 inline void clear_padding(); 210 // Initialize the flags field. Similar to clear_padding above this ensure that 211 // the snapshot content is deterministic. 212 inline void initialize_flags(Kind kind, bool has_unwinding_info, 213 bool is_turbofanned, int stack_slots, 214 bool is_off_heap_trampoline); 215 216 // Convert a target address into a code object. 217 static inline Code* GetCodeFromTargetAddress(Address address); 218 219 // Convert an entry address into an object. 220 static inline Object* GetObjectFromEntryAddress(Address location_of_address); 221 222 // Convert a code entry into an object. 223 static inline Object* GetObjectFromCodeEntry(Address code_entry); 224 225 // Returns the address of the first instruction. 226 inline Address raw_instruction_start() const; 227 228 // Returns the address of the first instruction. For off-heap code objects 229 // this differs from instruction_start (which would point to the off-heap 230 // trampoline instead). 231 inline Address InstructionStart() const; 232 Address OffHeapInstructionStart() const; 233 234 // Returns the address right after the last instruction. 235 inline Address raw_instruction_end() const; 236 237 // Returns the address right after the last instruction. For off-heap code 238 // objects this differs from instruction_end (which would point to the 239 // off-heap trampoline instead). 240 inline Address InstructionEnd() const; 241 Address OffHeapInstructionEnd() const; 242 243 // Returns the size of the instructions, padding, relocation and unwinding 244 // information. 245 inline int body_size() const; 246 247 // Returns the size of code and its metadata. This includes the size of code 248 // relocation information, deoptimization data and handler table. 249 inline int SizeIncludingMetadata() const; 250 251 // Returns the address of the first relocation info (read backwards!). 252 inline byte* relocation_start() const; 253 254 // Returns the address right after the relocation info (read backwards!). 255 inline byte* relocation_end() const; 256 257 // [has_unwinding_info]: Whether this code object has unwinding information. 258 // If it doesn't, unwinding_information_start() will point to invalid data. 259 // 260 // The body of all code objects has the following layout. 261 // 262 // +--------------------------+ <-- raw_instruction_start() 263 // | instructions | 264 // | ... | 265 // +--------------------------+ 266 // | relocation info | 267 // | ... | 268 // +--------------------------+ <-- raw_instruction_end() 269 // 270 // If has_unwinding_info() is false, raw_instruction_end() points to the first 271 // memory location after the end of the code object. Otherwise, the body 272 // continues as follows: 273 // 274 // +--------------------------+ 275 // | padding to the next | 276 // | 8-byte aligned address | 277 // +--------------------------+ <-- raw_instruction_end() 278 // | [unwinding_info_size] | 279 // | as uint64_t | 280 // +--------------------------+ <-- unwinding_info_start() 281 // | unwinding info | 282 // | ... | 283 // +--------------------------+ <-- unwinding_info_end() 284 // 285 // and unwinding_info_end() points to the first memory location after the end 286 // of the code object. 287 // 288 inline bool has_unwinding_info() const; 289 290 // [unwinding_info_size]: Size of the unwinding information. 291 inline int unwinding_info_size() const; 292 inline void set_unwinding_info_size(int value); 293 294 // Returns the address of the unwinding information, if any. 295 inline Address unwinding_info_start() const; 296 297 // Returns the address right after the end of the unwinding information. 298 inline Address unwinding_info_end() const; 299 300 // Code entry point. 301 inline Address entry() const; 302 303 // Returns true if pc is inside this object's instructions. 304 inline bool contains(Address pc); 305 306 // Relocate the code by delta bytes. Called to signal that this code 307 // object has been moved by delta bytes. 308 void Relocate(intptr_t delta); 309 310 // Migrate code described by desc. 311 void CopyFrom(Heap* heap, const CodeDesc& desc); 312 313 // Migrate code from desc without flushing the instruction cache. 314 void CopyFromNoFlush(Heap* heap, const CodeDesc& desc); 315 316 // Flushes the instruction cache for the executable instructions of this code 317 // object. 318 void FlushICache() const; 319 320 // Returns the object size for a given body (used for allocation). SizeFor(int body_size)321 static int SizeFor(int body_size) { 322 DCHECK_SIZE_TAG_ALIGNED(body_size); 323 return RoundUp(kHeaderSize + body_size, kCodeAlignment); 324 } 325 326 // Calculate the size of the code object to report for log events. This takes 327 // the layout of the code object into account. 328 inline int ExecutableSize() const; 329 330 DECL_CAST(Code) 331 332 // Dispatched behavior. 333 inline int CodeSize() const; 334 335 DECL_PRINTER(Code) 336 DECL_VERIFIER(Code) 337 338 void PrintDeoptLocation(FILE* out, const char* str, Address pc); 339 bool CanDeoptAt(Address pc); 340 341 void SetMarkedForDeoptimization(const char* reason); 342 343 inline HandlerTable::CatchPrediction GetBuiltinCatchPrediction(); 344 345 #ifdef DEBUG 346 enum VerifyMode { kNoContextSpecificPointers, kNoContextRetainingPointers }; 347 void VerifyEmbeddedObjects(Isolate* isolate, 348 VerifyMode mode = kNoContextRetainingPointers); 349 #endif // DEBUG 350 351 bool IsIsolateIndependent(Isolate* isolate); 352 353 inline bool CanContainWeakObjects(); 354 355 inline bool IsWeakObject(Object* object); 356 357 static inline bool IsWeakObjectInOptimizedCode(Object* object); 358 359 // Return true if the function is inlined in the code. 360 bool Inlines(SharedFunctionInfo* sfi); 361 362 class OptimizedCodeIterator { 363 public: 364 explicit OptimizedCodeIterator(Isolate* isolate); 365 Code* Next(); 366 367 private: 368 Context* next_context_; 369 Code* current_code_; 370 Isolate* isolate_; 371 372 DisallowHeapAllocation no_gc; 373 DISALLOW_COPY_AND_ASSIGN(OptimizedCodeIterator) 374 }; 375 376 static const int kConstantPoolSize = 377 FLAG_enable_embedded_constant_pool ? kIntSize : 0; 378 379 // Layout description. 380 static const int kRelocationInfoOffset = HeapObject::kHeaderSize; 381 static const int kDeoptimizationDataOffset = 382 kRelocationInfoOffset + kPointerSize; 383 static const int kSourcePositionTableOffset = 384 kDeoptimizationDataOffset + kPointerSize; 385 static const int kCodeDataContainerOffset = 386 kSourcePositionTableOffset + kPointerSize; 387 static const int kInstructionSizeOffset = 388 kCodeDataContainerOffset + kPointerSize; 389 static const int kFlagsOffset = kInstructionSizeOffset + kIntSize; 390 static const int kSafepointTableOffsetOffset = kFlagsOffset + kIntSize; 391 static const int kHandlerTableOffsetOffset = 392 kSafepointTableOffsetOffset + kIntSize; 393 static const int kStubKeyOffset = kHandlerTableOffsetOffset + kIntSize; 394 static const int kConstantPoolOffset = kStubKeyOffset + kIntSize; 395 static const int kBuiltinIndexOffset = 396 kConstantPoolOffset + kConstantPoolSize; 397 static const int kHeaderPaddingStart = kBuiltinIndexOffset + kIntSize; 398 399 // Add padding to align the instruction start following right after 400 // the Code object header. 401 static const int kHeaderSize = 402 (kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask; 403 404 // Data or code not directly visited by GC directly starts here. 405 // The serializer needs to copy bytes starting from here verbatim. 406 // Objects embedded into code is visited via reloc info. 407 static const int kDataStart = kInstructionSizeOffset; 408 409 inline int GetUnwindingInfoSizeOffset() const; 410 411 class BodyDescriptor; 412 413 // Flags layout. BitField<type, shift, size>. 414 #define CODE_FLAGS_BIT_FIELDS(V, _) \ 415 V(HasUnwindingInfoField, bool, 1, _) \ 416 V(KindField, Kind, 5, _) \ 417 V(IsTurbofannedField, bool, 1, _) \ 418 V(StackSlotsField, int, 24, _) \ 419 V(IsOffHeapTrampoline, bool, 1, _) 420 DEFINE_BIT_FIELDS(CODE_FLAGS_BIT_FIELDS) 421 #undef CODE_FLAGS_BIT_FIELDS 422 static_assert(NUMBER_OF_KINDS <= KindField::kMax, "Code::KindField size"); 423 static_assert(IsOffHeapTrampoline::kNext <= 32, 424 "Code::flags field exhausted"); 425 426 // KindSpecificFlags layout (STUB, BUILTIN and OPTIMIZED_FUNCTION) 427 #define CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS(V, _) \ 428 V(MarkedForDeoptimizationField, bool, 1, _) \ 429 V(DeoptAlreadyCountedField, bool, 1, _) \ 430 V(CanHaveWeakObjectsField, bool, 1, _) \ 431 V(IsConstructStubField, bool, 1, _) \ 432 V(IsPromiseRejectionField, bool, 1, _) \ 433 V(IsExceptionCaughtField, bool, 1, _) 434 DEFINE_BIT_FIELDS(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS) 435 #undef CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS 436 static_assert(IsExceptionCaughtField::kNext <= 32, "KindSpecificFlags full"); 437 438 // The {marked_for_deoptimization} field is accessed from generated code. 439 static const int kMarkedForDeoptimizationBit = 440 MarkedForDeoptimizationField::kShift; 441 442 static const int kArgumentsBits = 16; 443 // Reserve one argument count value as the "don't adapt arguments" sentinel. 444 static const int kMaxArguments = (1 << kArgumentsBits) - 2; 445 446 private: 447 friend class RelocIterator; 448 449 bool is_promise_rejection() const; 450 bool is_exception_caught() const; 451 452 DISALLOW_IMPLICIT_CONSTRUCTORS(Code); 453 }; 454 455 // CodeDataContainer is a container for all mutable fields associated with its 456 // referencing {Code} object. Since {Code} objects reside on write-protected 457 // pages within the heap, its header fields need to be immutable. There always 458 // is a 1-to-1 relation between {Code} and {CodeDataContainer}, the referencing 459 // field {Code::code_data_container} itself is immutable. 460 class CodeDataContainer : public HeapObject, public NeverReadOnlySpaceObject { 461 public: 462 using NeverReadOnlySpaceObject::GetHeap; 463 using NeverReadOnlySpaceObject::GetIsolate; 464 465 DECL_ACCESSORS(next_code_link, Object) 466 DECL_INT_ACCESSORS(kind_specific_flags) 467 468 // Clear uninitialized padding space. This ensures that the snapshot content 469 // is deterministic. 470 inline void clear_padding(); 471 472 DECL_CAST(CodeDataContainer) 473 474 // Dispatched behavior. 475 DECL_PRINTER(CodeDataContainer) 476 DECL_VERIFIER(CodeDataContainer) 477 478 static const int kNextCodeLinkOffset = HeapObject::kHeaderSize; 479 static const int kKindSpecificFlagsOffset = 480 kNextCodeLinkOffset + kPointerSize; 481 static const int kUnalignedSize = kKindSpecificFlagsOffset + kIntSize; 482 static const int kSize = OBJECT_POINTER_ALIGN(kUnalignedSize); 483 484 // During mark compact we need to take special care for weak fields. 485 static const int kPointerFieldsStrongEndOffset = kNextCodeLinkOffset; 486 static const int kPointerFieldsWeakEndOffset = kKindSpecificFlagsOffset; 487 488 // Ignores weakness. 489 typedef FixedBodyDescriptor<HeapObject::kHeaderSize, 490 kPointerFieldsWeakEndOffset, kSize> 491 BodyDescriptor; 492 493 // Respects weakness. 494 typedef FixedBodyDescriptor<HeapObject::kHeaderSize, 495 kPointerFieldsStrongEndOffset, kSize> 496 BodyDescriptorWeak; 497 498 private: 499 DISALLOW_IMPLICIT_CONSTRUCTORS(CodeDataContainer); 500 }; 501 502 class AbstractCode : public HeapObject, public NeverReadOnlySpaceObject { 503 public: 504 using NeverReadOnlySpaceObject::GetHeap; 505 using NeverReadOnlySpaceObject::GetIsolate; 506 507 // All code kinds and INTERPRETED_FUNCTION. 508 enum Kind { 509 #define DEFINE_CODE_KIND_ENUM(name) name, 510 CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM) 511 #undef DEFINE_CODE_KIND_ENUM 512 INTERPRETED_FUNCTION, 513 NUMBER_OF_KINDS 514 }; 515 516 static const char* Kind2String(Kind kind); 517 518 int SourcePosition(int offset); 519 int SourceStatementPosition(int offset); 520 521 // Returns the address of the first instruction. 522 inline Address raw_instruction_start(); 523 524 // Returns the address of the first instruction. For off-heap code objects 525 // this differs from instruction_start (which would point to the off-heap 526 // trampoline instead). 527 inline Address InstructionStart(); 528 529 // Returns the address right after the last instruction. 530 inline Address raw_instruction_end(); 531 532 // Returns the address right after the last instruction. For off-heap code 533 // objects this differs from instruction_end (which would point to the 534 // off-heap trampoline instead). 535 inline Address InstructionEnd(); 536 537 // Returns the size of the code instructions. 538 inline int raw_instruction_size(); 539 540 // Returns the size of the native instructions, including embedded 541 // data such as the safepoints table. For off-heap code objects 542 // this may from instruction_size in that this will return the size of the 543 // off-heap instruction stream rather than the on-heap trampoline located 544 // at instruction_start. 545 inline int InstructionSize(); 546 547 // Return the source position table. 548 inline ByteArray* source_position_table(); 549 550 inline Object* stack_frame_cache(); 551 static void SetStackFrameCache(Handle<AbstractCode> abstract_code, 552 Handle<SimpleNumberDictionary> cache); 553 void DropStackFrameCache(); 554 555 // Returns the size of instructions and the metadata. 556 inline int SizeIncludingMetadata(); 557 558 // Returns true if pc is inside this object's instructions. 559 inline bool contains(Address pc); 560 561 // Returns the AbstractCode::Kind of the code. 562 inline Kind kind(); 563 564 // Calculate the size of the code object to report for log events. This takes 565 // the layout of the code object into account. 566 inline int ExecutableSize(); 567 568 DECL_CAST(AbstractCode) 569 inline Code* GetCode(); 570 inline BytecodeArray* GetBytecodeArray(); 571 572 // Max loop nesting marker used to postpose OSR. We don't take loop 573 // nesting that is deeper than 5 levels into account. 574 static const int kMaxLoopNestingMarker = 6; 575 }; 576 577 // Dependent code is a singly linked list of weak fixed arrays. Each array 578 // contains weak pointers to code objects for one dependent group. The suffix of 579 // the array can be filled with the undefined value if the number of codes is 580 // less than the length of the array. 581 // 582 // +------+-----------------+--------+--------+-----+--------+-----------+-----+ 583 // | next | count & group 1 | code 1 | code 2 | ... | code n | undefined | ... | 584 // +------+-----------------+--------+--------+-----+--------+-----------+-----+ 585 // | 586 // V 587 // +------+-----------------+--------+--------+-----+--------+-----------+-----+ 588 // | next | count & group 2 | code 1 | code 2 | ... | code m | undefined | ... | 589 // +------+-----------------+--------+--------+-----+--------+-----------+-----+ 590 // | 591 // V 592 // empty_weak_fixed_array() 593 // 594 // The list of weak fixed arrays is ordered by dependency groups. 595 596 class DependentCode : public WeakFixedArray { 597 public: 598 DECL_CAST(DependentCode) 599 600 enum DependencyGroup { 601 // Group of code that embed a transition to this map, and depend on being 602 // deoptimized when the transition is replaced by a new version. 603 kTransitionGroup, 604 // Group of code that omit run-time prototype checks for prototypes 605 // described by this map. The group is deoptimized whenever an object 606 // described by this map changes shape (and transitions to a new map), 607 // possibly invalidating the assumptions embedded in the code. 608 kPrototypeCheckGroup, 609 // Group of code that depends on global property values in property cells 610 // not being changed. 611 kPropertyCellChangedGroup, 612 // Group of code that omit run-time checks for field(s) introduced by 613 // this map, i.e. for the field type. 614 kFieldOwnerGroup, 615 // Group of code that omit run-time type checks for initial maps of 616 // constructors. 617 kInitialMapChangedGroup, 618 // Group of code that depends on tenuring information in AllocationSites 619 // not being changed. 620 kAllocationSiteTenuringChangedGroup, 621 // Group of code that depends on element transition information in 622 // AllocationSites not being changed. 623 kAllocationSiteTransitionChangedGroup 624 }; 625 626 // Register a code dependency of {cell} on {object}. 627 static void InstallDependency(Isolate* isolate, MaybeObjectHandle code, 628 Handle<HeapObject> object, 629 DependencyGroup group); 630 631 bool Contains(DependencyGroup group, MaybeObject* code); 632 bool IsEmpty(DependencyGroup group); 633 634 void DeoptimizeDependentCodeGroup(Isolate* isolate, DependencyGroup group); 635 636 bool MarkCodeForDeoptimization(Isolate* isolate, DependencyGroup group); 637 638 // The following low-level accessors are exposed only for tests. 639 inline DependencyGroup group(); 640 inline MaybeObject* object_at(int i); 641 inline int count(); 642 inline DependentCode* next_link(); 643 644 private: 645 static const char* DependencyGroupName(DependencyGroup group); 646 647 // Get/Set {object}'s {DependentCode}. 648 static DependentCode* GetDependentCode(Handle<HeapObject> object); 649 static void SetDependentCode(Handle<HeapObject> object, 650 Handle<DependentCode> dep); 651 652 static Handle<DependentCode> New(Isolate* isolate, DependencyGroup group, 653 MaybeObjectHandle object, 654 Handle<DependentCode> next); 655 static Handle<DependentCode> EnsureSpace(Isolate* isolate, 656 Handle<DependentCode> entries); 657 static Handle<DependentCode> InsertWeakCode(Isolate* isolate, 658 Handle<DependentCode> entries, 659 DependencyGroup group, 660 MaybeObjectHandle code); 661 662 // Compact by removing cleared weak cells and return true if there was 663 // any cleared weak cell. 664 bool Compact(); 665 Grow(int number_of_entries)666 static int Grow(int number_of_entries) { 667 if (number_of_entries < 5) return number_of_entries + 1; 668 return number_of_entries * 5 / 4; 669 } 670 671 static const int kGroupCount = kAllocationSiteTransitionChangedGroup + 1; 672 static const int kNextLinkIndex = 0; 673 static const int kFlagsIndex = 1; 674 static const int kCodesStartIndex = 2; 675 676 inline void set_next_link(DependentCode* next); 677 inline void set_count(int value); 678 inline void set_object_at(int i, MaybeObject* object); 679 inline void clear_at(int i); 680 inline void copy(int from, int to); 681 682 inline int flags(); 683 inline void set_flags(int flags); 684 class GroupField : public BitField<int, 0, 3> {}; 685 class CountField : public BitField<int, 3, 27> {}; 686 STATIC_ASSERT(kGroupCount <= GroupField::kMax + 1); 687 }; 688 689 // BytecodeArray represents a sequence of interpreter bytecodes. 690 class BytecodeArray : public FixedArrayBase { 691 public: 692 enum Age { 693 kNoAgeBytecodeAge = 0, 694 kQuadragenarianBytecodeAge, 695 kQuinquagenarianBytecodeAge, 696 kSexagenarianBytecodeAge, 697 kSeptuagenarianBytecodeAge, 698 kOctogenarianBytecodeAge, 699 kAfterLastBytecodeAge, 700 kFirstBytecodeAge = kNoAgeBytecodeAge, 701 kLastBytecodeAge = kAfterLastBytecodeAge - 1, 702 kBytecodeAgeCount = kAfterLastBytecodeAge - kFirstBytecodeAge - 1, 703 kIsOldBytecodeAge = kSexagenarianBytecodeAge 704 }; 705 SizeFor(int length)706 static int SizeFor(int length) { 707 return OBJECT_POINTER_ALIGN(kHeaderSize + length); 708 } 709 710 // Setter and getter 711 inline byte get(int index); 712 inline void set(int index, byte value); 713 714 // Returns data start address. 715 inline Address GetFirstBytecodeAddress(); 716 717 // Accessors for frame size. 718 inline int frame_size() const; 719 inline void set_frame_size(int frame_size); 720 721 // Accessor for register count (derived from frame_size). 722 inline int register_count() const; 723 724 // Accessors for parameter count (including implicit 'this' receiver). 725 inline int parameter_count() const; 726 inline void set_parameter_count(int number_of_parameters); 727 728 // Register used to pass the incoming new.target or generator object from the 729 // fucntion call. 730 inline interpreter::Register incoming_new_target_or_generator_register() 731 const; 732 inline void set_incoming_new_target_or_generator_register( 733 interpreter::Register incoming_new_target_or_generator_register); 734 735 // Accessors for profiling count. 736 inline int interrupt_budget() const; 737 inline void set_interrupt_budget(int interrupt_budget); 738 739 // Accessors for OSR loop nesting level. 740 inline int osr_loop_nesting_level() const; 741 inline void set_osr_loop_nesting_level(int depth); 742 743 // Accessors for bytecode's code age. 744 inline Age bytecode_age() const; 745 inline void set_bytecode_age(Age age); 746 747 // Accessors for the constant pool. 748 DECL_ACCESSORS(constant_pool, FixedArray) 749 750 // Accessors for handler table containing offsets of exception handlers. 751 DECL_ACCESSORS(handler_table, ByteArray) 752 753 // Accessors for source position table containing mappings between byte code 754 // offset and source position or SourcePositionTableWithFrameCache. 755 DECL_ACCESSORS(source_position_table, Object) 756 757 inline ByteArray* SourcePositionTable(); 758 inline void ClearFrameCacheFromSourcePositionTable(); 759 760 DECL_CAST(BytecodeArray) 761 762 // Dispatched behavior. 763 inline int BytecodeArraySize(); 764 765 inline int raw_instruction_size(); 766 767 // Returns the size of bytecode and its metadata. This includes the size of 768 // bytecode, constant pool, source position table, and handler table. 769 inline int SizeIncludingMetadata(); 770 771 int SourcePosition(int offset); 772 int SourceStatementPosition(int offset); 773 774 DECL_PRINTER(BytecodeArray) 775 DECL_VERIFIER(BytecodeArray) 776 777 void Disassemble(std::ostream& os); 778 779 void CopyBytecodesTo(BytecodeArray* to); 780 781 // Bytecode aging 782 bool IsOld() const; 783 void MakeOlder(); 784 785 // Clear uninitialized padding space. This ensures that the snapshot content 786 // is deterministic. 787 inline void clear_padding(); 788 789 // Layout description. 790 #define BYTECODE_ARRAY_FIELDS(V) \ 791 /* Pointer fields. */ \ 792 V(kConstantPoolOffset, kPointerSize) \ 793 V(kHandlerTableOffset, kPointerSize) \ 794 V(kSourcePositionTableOffset, kPointerSize) \ 795 V(kFrameSizeOffset, kIntSize) \ 796 V(kParameterSizeOffset, kIntSize) \ 797 V(kIncomingNewTargetOrGeneratorRegisterOffset, kIntSize) \ 798 V(kInterruptBudgetOffset, kIntSize) \ 799 V(kOSRNestingLevelOffset, kCharSize) \ 800 V(kBytecodeAgeOffset, kCharSize) \ 801 /* Total size. */ \ 802 V(kHeaderSize, 0) 803 804 DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize, 805 BYTECODE_ARRAY_FIELDS) 806 #undef BYTECODE_ARRAY_FIELDS 807 808 // Maximal memory consumption for a single BytecodeArray. 809 static const int kMaxSize = 512 * MB; 810 // Maximal length of a single BytecodeArray. 811 static const int kMaxLength = kMaxSize - kHeaderSize; 812 813 class BodyDescriptor; 814 // No weak fields. 815 typedef BodyDescriptor BodyDescriptorWeak; 816 817 private: 818 DISALLOW_IMPLICIT_CONSTRUCTORS(BytecodeArray); 819 }; 820 821 // DeoptimizationData is a fixed array used to hold the deoptimization data for 822 // optimized code. It also contains information about functions that were 823 // inlined. If N different functions were inlined then the first N elements of 824 // the literal array will contain these functions. 825 // 826 // It can be empty. 827 class DeoptimizationData : public FixedArray { 828 public: 829 // Layout description. Indices in the array. 830 static const int kTranslationByteArrayIndex = 0; 831 static const int kInlinedFunctionCountIndex = 1; 832 static const int kLiteralArrayIndex = 2; 833 static const int kOsrBytecodeOffsetIndex = 3; 834 static const int kOsrPcOffsetIndex = 4; 835 static const int kOptimizationIdIndex = 5; 836 static const int kSharedFunctionInfoIndex = 6; 837 static const int kInliningPositionsIndex = 7; 838 static const int kFirstDeoptEntryIndex = 8; 839 840 // Offsets of deopt entry elements relative to the start of the entry. 841 static const int kBytecodeOffsetRawOffset = 0; 842 static const int kTranslationIndexOffset = 1; 843 static const int kPcOffset = 2; 844 static const int kDeoptEntrySize = 3; 845 846 // Simple element accessors. 847 #define DECL_ELEMENT_ACCESSORS(name, type) \ 848 inline type* name(); \ 849 inline void Set##name(type* value); 850 851 DECL_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray) 852 DECL_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi) 853 DECL_ELEMENT_ACCESSORS(LiteralArray, FixedArray) 854 DECL_ELEMENT_ACCESSORS(OsrBytecodeOffset, Smi) 855 DECL_ELEMENT_ACCESSORS(OsrPcOffset, Smi) 856 DECL_ELEMENT_ACCESSORS(OptimizationId, Smi) 857 DECL_ELEMENT_ACCESSORS(SharedFunctionInfo, Object) 858 DECL_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>) 859 860 #undef DECL_ELEMENT_ACCESSORS 861 862 // Accessors for elements of the ith deoptimization entry. 863 #define DECL_ENTRY_ACCESSORS(name, type) \ 864 inline type* name(int i); \ 865 inline void Set##name(int i, type* value); 866 867 DECL_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi) 868 DECL_ENTRY_ACCESSORS(TranslationIndex, Smi) 869 DECL_ENTRY_ACCESSORS(Pc, Smi) 870 871 #undef DECL_ENTRY_ACCESSORS 872 873 inline BailoutId BytecodeOffset(int i); 874 875 inline void SetBytecodeOffset(int i, BailoutId value); 876 877 inline int DeoptCount(); 878 879 static const int kNotInlinedIndex = -1; 880 881 // Returns the inlined function at the given position in LiteralArray, or the 882 // outer function if index == kNotInlinedIndex. 883 class SharedFunctionInfo* GetInlinedFunction(int index); 884 885 // Allocates a DeoptimizationData. 886 static Handle<DeoptimizationData> New(Isolate* isolate, int deopt_entry_count, 887 PretenureFlag pretenure); 888 889 // Return an empty DeoptimizationData. 890 static Handle<DeoptimizationData> Empty(Isolate* isolate); 891 892 DECL_CAST(DeoptimizationData) 893 894 #ifdef ENABLE_DISASSEMBLER 895 void DeoptimizationDataPrint(std::ostream& os); // NOLINT 896 #endif 897 898 private: IndexForEntry(int i)899 static int IndexForEntry(int i) { 900 return kFirstDeoptEntryIndex + (i * kDeoptEntrySize); 901 } 902 LengthFor(int entry_count)903 static int LengthFor(int entry_count) { return IndexForEntry(entry_count); } 904 }; 905 906 } // namespace internal 907 } // namespace v8 908 909 #include "src/objects/object-macros-undef.h" 910 911 #endif // V8_OBJECTS_CODE_H_ 912