// Copyright 2020 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_ #define V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_ #include #include #include "include/cppgc/allocation.h" #include "include/cppgc/internal/gc-info.h" #include "include/cppgc/internal/name-trait.h" #include "src/base/atomic-utils.h" #include "src/base/bit-field.h" #include "src/base/logging.h" #include "src/base/macros.h" #include "src/heap/cppgc/gc-info-table.h" #include "src/heap/cppgc/globals.h" #if defined(CPPGC_CAGED_HEAP) #include "src/heap/cppgc/caged-heap.h" #endif // defined(CPPGC_CAGED_HEAP) namespace cppgc { class Visitor; namespace internal { // HeapObjectHeader contains meta data per object and is prepended to each // object. // // +-----------------+------+------------------------------------------+ // | name | bits | | // +-----------------+------+------------------------------------------+ // | padding | 32 | Only present on 64-bit platform. | // +-----------------+------+------------------------------------------+ // | GCInfoIndex | 14 | | // | unused | 1 | | // | in construction | 1 | In construction encoded as |false|. | // +-----------------+------+------------------------------------------+ // | size | 15 | 17 bits because allocations are aligned. | // | mark bit | 1 | | // +-----------------+------+------------------------------------------+ // // Notes: // - See |GCInfoTable| for constraints on GCInfoIndex. // - |size| for regular objects is encoded with 15 bits but can actually // represent sizes up to |kBlinkPageSize| (2^17) because allocations are // always 4 byte aligned (see kAllocationGranularity) on 32bit. 64bit uses // 8 byte aligned allocations which leaves 1 bit unused. // - |size| for large objects is encoded as 0. The size of a large object is // stored in |LargeObjectPage::PayloadSize()|. // - |mark bit| and |in construction| bits are located in separate 16-bit halves // to allow potentially accessing them non-atomically. class HeapObjectHeader { public: static constexpr size_t kSizeLog2 = 17; static constexpr size_t kMaxSize = (size_t{1} << kSizeLog2) - 1; static constexpr uint16_t kLargeObjectSizeInHeader = 0; inline static HeapObjectHeader& FromObject(void* address); inline static const HeapObjectHeader& FromObject(const void* address); inline HeapObjectHeader(size_t size, GCInfoIndex gc_info_index); // The object starts directly after the HeapObjectHeader. inline Address ObjectStart() const; template inline Address ObjectEnd() const; template inline GCInfoIndex GetGCInfoIndex() const; template inline size_t AllocatedSize() const; inline void SetAllocatedSize(size_t size); template inline size_t ObjectSize() const; template inline bool IsLargeObject() const; template bool IsInConstruction() const; inline void MarkAsFullyConstructed(); // Use MarkObjectAsFullyConstructed() to mark an object as being constructed. template bool IsMarked() const; template void Unmark(); inline bool TryMarkAtomic(); inline void MarkNonAtomic(); template bool IsYoung() const; template bool IsFree() const; inline bool IsFinalizable() const; void Finalize(); #if defined(CPPGC_CAGED_HEAP) inline void SetNextUnfinalized(HeapObjectHeader* next); inline HeapObjectHeader* GetNextUnfinalized(uintptr_t cage_base) const; #endif // defined(CPPGC_CAGED_HEAP) V8_EXPORT_PRIVATE HeapObjectName GetName() const; template void Trace(Visitor*) const; private: enum class EncodedHalf : uint8_t { kLow, kHigh }; // Used in |encoded_high_|. using FullyConstructedField = v8::base::BitField16; using UnusedField1 = FullyConstructedField::Next; using GCInfoIndexField = UnusedField1::Next; // Used in |encoded_low_|. using MarkBitField = v8::base::BitField16; using SizeField = MarkBitField::Next; // Use EncodeSize/DecodeSize instead. static constexpr size_t DecodeSize(uint16_t encoded) { // Essentially, gets optimized to << 1. return SizeField::decode(encoded) * kAllocationGranularity; } static constexpr uint16_t EncodeSize(size_t size) { // Essentially, gets optimized to >> 1. return SizeField::encode(size / kAllocationGranularity); } V8_EXPORT_PRIVATE void CheckApiConstants(); template inline uint16_t LoadEncoded() const; template inline void StoreEncoded(uint16_t bits, uint16_t mask); #if defined(V8_TARGET_ARCH_64_BIT) // If cage is enabled, to save on space required by sweeper metadata, we store // the list of to-be-finalized objects inlined in HeapObjectHeader. #if defined(CPPGC_CAGED_HEAP) uint32_t next_unfinalized_ = 0; #else // !defined(CPPGC_CAGED_HEAP) uint32_t padding_ = 0; #endif // !defined(CPPGC_CAGED_HEAP) #endif // defined(V8_TARGET_ARCH_64_BIT) uint16_t encoded_high_; uint16_t encoded_low_; }; static_assert(kAllocationGranularity == sizeof(HeapObjectHeader), "sizeof(HeapObjectHeader) must match allocation granularity to " "guarantee alignment"); // static HeapObjectHeader& HeapObjectHeader::FromObject(void* object) { return *reinterpret_cast(static_cast
(object) - sizeof(HeapObjectHeader)); } // static const HeapObjectHeader& HeapObjectHeader::FromObject(const void* object) { return *reinterpret_cast( static_cast(object) - sizeof(HeapObjectHeader)); } HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) { #if defined(V8_TARGET_ARCH_64_BIT) && !defined(CPPGC_CAGED_HEAP) USE(padding_); #endif // defined(V8_TARGET_ARCH_64_BIT) && !defined(CPPGC_CAGED_HEAP) DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex); DCHECK_EQ(0u, size & (sizeof(HeapObjectHeader) - 1)); DCHECK_GE(kMaxSize, size); encoded_low_ = EncodeSize(size); // Objects may get published to the marker without any other synchronization // (e.g., write barrier) in which case the in-construction bit is read // concurrently which requires reading encoded_high_ atomically. It is ok if // this write is not observed by the marker, since the sweeper sets the // in-construction bit to 0 and we can rely on that to guarantee a correct // answer when checking if objects are in-construction. v8::base::AsAtomicPtr(&encoded_high_) ->store(GCInfoIndexField::encode(gc_info_index), std::memory_order_relaxed); DCHECK(IsInConstruction()); #ifdef DEBUG CheckApiConstants(); #endif // DEBUG } Address HeapObjectHeader::ObjectStart() const { return reinterpret_cast
(const_cast(this)) + sizeof(HeapObjectHeader); } template Address HeapObjectHeader::ObjectEnd() const { DCHECK(!IsLargeObject()); return reinterpret_cast
(const_cast(this)) + AllocatedSize(); } template GCInfoIndex HeapObjectHeader::GetGCInfoIndex() const { const uint16_t encoded = LoadEncoded(); return GCInfoIndexField::decode(encoded); } template size_t HeapObjectHeader::AllocatedSize() const { // Size is immutable after construction while either marking or sweeping // is running so relaxed load (if mode == kAtomic) is enough. uint16_t encoded_low_value = LoadEncoded(); const size_t size = DecodeSize(encoded_low_value); return size; } void HeapObjectHeader::SetAllocatedSize(size_t size) { #if !defined(CPPGC_YOUNG_GENERATION) // With sticky bits, marked objects correspond to old objects. // TODO(bikineev:1029379): Consider disallowing old/marked objects to be // resized. DCHECK(!IsMarked()); #endif // The object may be marked (i.e. old, in case young generation is enabled). // Make sure to not overwrite the mark bit. encoded_low_ &= ~SizeField::encode(SizeField::kMax); encoded_low_ |= EncodeSize(size); } template size_t HeapObjectHeader::ObjectSize() const { // The following DCHECK also fails for large objects. DCHECK_GT(AllocatedSize(), sizeof(HeapObjectHeader)); return AllocatedSize() - sizeof(HeapObjectHeader); } template bool HeapObjectHeader::IsLargeObject() const { return AllocatedSize() == kLargeObjectSizeInHeader; } template bool HeapObjectHeader::IsInConstruction() const { const uint16_t encoded = LoadEncoded(); return !FullyConstructedField::decode(encoded); } void HeapObjectHeader::MarkAsFullyConstructed() { MakeGarbageCollectedTraitInternal::MarkObjectAsFullyConstructed( ObjectStart()); } template bool HeapObjectHeader::IsMarked() const { const uint16_t encoded = LoadEncoded(); return MarkBitField::decode(encoded); } template void HeapObjectHeader::Unmark() { DCHECK(IsMarked()); StoreEncoded( MarkBitField::encode(false), MarkBitField::kMask); } bool HeapObjectHeader::TryMarkAtomic() { auto* atomic_encoded = v8::base::AsAtomicPtr(&encoded_low_); uint16_t old_value = atomic_encoded->load(std::memory_order_relaxed); const uint16_t new_value = old_value | MarkBitField::encode(true); if (new_value == old_value) { return false; } return atomic_encoded->compare_exchange_strong(old_value, new_value, std::memory_order_relaxed); } void HeapObjectHeader::MarkNonAtomic() { DCHECK(!IsMarked()); encoded_low_ |= MarkBitField::encode(true); } template bool HeapObjectHeader::IsYoung() const { return !IsMarked(); } template bool HeapObjectHeader::IsFree() const { return GetGCInfoIndex() == kFreeListGCInfoIndex; } bool HeapObjectHeader::IsFinalizable() const { const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex()); return gc_info.finalize; } #if defined(CPPGC_CAGED_HEAP) void HeapObjectHeader::SetNextUnfinalized(HeapObjectHeader* next) { next_unfinalized_ = CagedHeap::OffsetFromAddress(next); } HeapObjectHeader* HeapObjectHeader::GetNextUnfinalized( uintptr_t cage_base) const { DCHECK(cage_base); DCHECK_EQ(0u, CagedHeap::OffsetFromAddress(reinterpret_cast(cage_base))); return next_unfinalized_ ? reinterpret_cast( cage_base + next_unfinalized_) : nullptr; } #endif // defined(CPPGC_CAGED_HEAP) template void HeapObjectHeader::Trace(Visitor* visitor) const { const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex()); return gc_info.trace(visitor, ObjectStart()); } template uint16_t HeapObjectHeader::LoadEncoded() const { const uint16_t& half = part == EncodedHalf::kLow ? encoded_low_ : encoded_high_; if (mode == AccessMode::kNonAtomic) return half; return v8::base::AsAtomicPtr(&half)->load(memory_order); } template void HeapObjectHeader::StoreEncoded(uint16_t bits, uint16_t mask) { // Caveat: Not all changes to HeapObjectHeader's bitfields go through // StoreEncoded. The following have their own implementations and need to be // kept in sync: // - HeapObjectHeader::TryMarkAtomic // - MarkObjectAsFullyConstructed (API) DCHECK_EQ(0u, bits & ~mask); uint16_t& half = part == EncodedHalf::kLow ? encoded_low_ : encoded_high_; if (mode == AccessMode::kNonAtomic) { half = (half & ~mask) | bits; return; } // We don't perform CAS loop here assuming that only none of the info that // shares the same encoded halfs change at the same time. auto* atomic_encoded = v8::base::AsAtomicPtr(&half); uint16_t value = atomic_encoded->load(std::memory_order_relaxed); value = (value & ~mask) | bits; atomic_encoded->store(value, memory_order); } } // namespace internal } // namespace cppgc #endif // V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_