1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
6 #define V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
7
8 #include <stdint.h>
9
10 #include <atomic>
11
12 #include "include/cppgc/allocation.h"
13 #include "include/cppgc/internal/gc-info.h"
14 #include "include/cppgc/internal/name-trait.h"
15 #include "src/base/atomic-utils.h"
16 #include "src/base/bit-field.h"
17 #include "src/base/logging.h"
18 #include "src/base/macros.h"
19 #include "src/heap/cppgc/gc-info-table.h"
20 #include "src/heap/cppgc/globals.h"
21
22 namespace cppgc {
23
24 class Visitor;
25
26 namespace internal {
27
28 // HeapObjectHeader contains meta data per object and is prepended to each
29 // object.
30 //
31 // +-----------------+------+------------------------------------------+
32 // | name | bits | |
33 // +-----------------+------+------------------------------------------+
34 // | padding | 32 | Only present on 64-bit platform. |
35 // +-----------------+------+------------------------------------------+
36 // | GCInfoIndex | 14 | |
37 // | unused | 1 | |
38 // | in construction | 1 | In construction encoded as |false|. |
39 // +-----------------+------+------------------------------------------+
40 // | size | 14 | 17 bits because allocations are aligned. |
41 // | unused | 1 | |
42 // | mark bit | 1 | |
43 // +-----------------+------+------------------------------------------+
44 //
45 // Notes:
46 // - See |GCInfoTable| for constraints on GCInfoIndex.
47 // - |size| for regular objects is encoded with 14 bits but can actually
48 // represent sizes up to |kBlinkPageSize| (2^17) because allocations are
49 // always 8 byte aligned (see kAllocationGranularity).
50 // - |size| for large objects is encoded as 0. The size of a large object is
51 // stored in |LargeObjectPage::PayloadSize()|.
52 // - |mark bit| and |in construction| bits are located in separate 16-bit halves
53 // to allow potentially accessing them non-atomically.
54 class HeapObjectHeader {
55 public:
56 static constexpr size_t kSizeLog2 = 17;
57 static constexpr size_t kMaxSize = (size_t{1} << kSizeLog2) - 1;
58 static constexpr uint16_t kLargeObjectSizeInHeader = 0;
59
60 inline static HeapObjectHeader& FromPayload(void* address);
61 inline static const HeapObjectHeader& FromPayload(const void* address);
62
63 inline HeapObjectHeader(size_t size, GCInfoIndex gc_info_index);
64
65 // The payload starts directly after the HeapObjectHeader.
66 inline Address Payload() const;
67
68 template <AccessMode mode = AccessMode::kNonAtomic>
69 inline GCInfoIndex GetGCInfoIndex() const;
70
71 template <AccessMode mode = AccessMode::kNonAtomic>
72 inline size_t GetSize() const;
73 inline void SetSize(size_t size);
74
75 template <AccessMode mode = AccessMode::kNonAtomic>
76 inline bool IsLargeObject() const;
77
78 template <AccessMode = AccessMode::kNonAtomic>
79 bool IsInConstruction() const;
80 inline void MarkAsFullyConstructed();
81 // Use MarkObjectAsFullyConstructed() to mark an object as being constructed.
82
83 template <AccessMode = AccessMode::kNonAtomic>
84 bool IsMarked() const;
85 template <AccessMode = AccessMode::kNonAtomic>
86 void Unmark();
87 inline bool TryMarkAtomic();
88
89 template <AccessMode = AccessMode::kNonAtomic>
90 bool IsYoung() const;
91
92 template <AccessMode = AccessMode::kNonAtomic>
93 bool IsFree() const;
94
95 inline bool IsFinalizable() const;
96 void Finalize();
97
98 V8_EXPORT_PRIVATE HeapObjectName GetName() const;
99
100 V8_EXPORT_PRIVATE void Trace(Visitor*) const;
101
102 private:
103 enum class EncodedHalf : uint8_t { kLow, kHigh };
104
105 // Used in |encoded_high_|.
106 using FullyConstructedField = v8::base::BitField16<bool, 0, 1>;
107 using UnusedField1 = FullyConstructedField::Next<bool, 1>;
108 using GCInfoIndexField = UnusedField1::Next<GCInfoIndex, 14>;
109 // Used in |encoded_low_|.
110 using MarkBitField = v8::base::BitField16<bool, 0, 1>;
111 using UnusedField2 = MarkBitField::Next<bool, 1>;
112 using SizeField = void; // Use EncodeSize/DecodeSize instead.
113
DecodeSize(uint16_t encoded)114 static constexpr size_t DecodeSize(uint16_t encoded) {
115 // Essentially, gets optimized to << 1.
116 using SizeField = UnusedField2::Next<size_t, 14>;
117 return SizeField::decode(encoded) * kAllocationGranularity;
118 }
119
EncodeSize(size_t size)120 static constexpr uint16_t EncodeSize(size_t size) {
121 // Essentially, gets optimized to >> 1.
122 using SizeField = UnusedField2::Next<size_t, 14>;
123 return SizeField::encode(size / kAllocationGranularity);
124 }
125
126 V8_EXPORT_PRIVATE void CheckApiConstants();
127
128 template <AccessMode, EncodedHalf part,
129 std::memory_order memory_order = std::memory_order_seq_cst>
130 inline uint16_t LoadEncoded() const;
131 template <AccessMode mode, EncodedHalf part,
132 std::memory_order memory_order = std::memory_order_seq_cst>
133 inline void StoreEncoded(uint16_t bits, uint16_t mask);
134
135 #if defined(V8_TARGET_ARCH_64_BIT)
136 uint32_t padding_ = 0;
137 #endif // defined(V8_TARGET_ARCH_64_BIT)
138 uint16_t encoded_high_;
139 uint16_t encoded_low_;
140 };
141
142 // static
FromPayload(void * payload)143 HeapObjectHeader& HeapObjectHeader::FromPayload(void* payload) {
144 return *reinterpret_cast<HeapObjectHeader*>(static_cast<Address>(payload) -
145 sizeof(HeapObjectHeader));
146 }
147
148 // static
FromPayload(const void * payload)149 const HeapObjectHeader& HeapObjectHeader::FromPayload(const void* payload) {
150 return *reinterpret_cast<const HeapObjectHeader*>(
151 static_cast<ConstAddress>(payload) - sizeof(HeapObjectHeader));
152 }
153
HeapObjectHeader(size_t size,GCInfoIndex gc_info_index)154 HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
155 #if defined(V8_TARGET_ARCH_64_BIT)
156 USE(padding_);
157 #endif // defined(V8_TARGET_ARCH_64_BIT)
158 DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
159 DCHECK_EQ(0u, size & (sizeof(HeapObjectHeader) - 1));
160 DCHECK_GE(kMaxSize, size);
161 encoded_low_ = EncodeSize(size);
162 // Objects may get published to the marker without any other synchronization
163 // (e.g., write barrier) in which case the in-construction bit is read
164 // concurrently which requires reading encoded_high_ atomically. It is ok if
165 // this write is not observed by the marker, since the sweeper sets the
166 // in-construction bit to 0 and we can rely on that to guarantee a correct
167 // answer when checking if objects are in-construction.
168 v8::base::AsAtomicPtr(&encoded_high_)
169 ->store(GCInfoIndexField::encode(gc_info_index),
170 std::memory_order_relaxed);
171 DCHECK(IsInConstruction());
172 #ifdef DEBUG
173 CheckApiConstants();
174 #endif // DEBUG
175 }
176
Payload()177 Address HeapObjectHeader::Payload() const {
178 return reinterpret_cast<Address>(const_cast<HeapObjectHeader*>(this)) +
179 sizeof(HeapObjectHeader);
180 }
181
182 template <AccessMode mode>
GetGCInfoIndex()183 GCInfoIndex HeapObjectHeader::GetGCInfoIndex() const {
184 const uint16_t encoded =
185 LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
186 return GCInfoIndexField::decode(encoded);
187 }
188
189 template <AccessMode mode>
GetSize()190 size_t HeapObjectHeader::GetSize() const {
191 // Size is immutable after construction while either marking or sweeping
192 // is running so relaxed load (if mode == kAtomic) is enough.
193 uint16_t encoded_low_value =
194 LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
195 const size_t size = DecodeSize(encoded_low_value);
196 return size;
197 }
198
SetSize(size_t size)199 void HeapObjectHeader::SetSize(size_t size) {
200 DCHECK(!IsMarked());
201 encoded_low_ |= EncodeSize(size);
202 }
203
204 template <AccessMode mode>
IsLargeObject()205 bool HeapObjectHeader::IsLargeObject() const {
206 return GetSize<mode>() == kLargeObjectSizeInHeader;
207 }
208
209 template <AccessMode mode>
IsInConstruction()210 bool HeapObjectHeader::IsInConstruction() const {
211 const uint16_t encoded =
212 LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
213 return !FullyConstructedField::decode(encoded);
214 }
215
MarkAsFullyConstructed()216 void HeapObjectHeader::MarkAsFullyConstructed() {
217 MakeGarbageCollectedTraitInternal::MarkObjectAsFullyConstructed(Payload());
218 }
219
220 template <AccessMode mode>
IsMarked()221 bool HeapObjectHeader::IsMarked() const {
222 const uint16_t encoded =
223 LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
224 return MarkBitField::decode(encoded);
225 }
226
227 template <AccessMode mode>
Unmark()228 void HeapObjectHeader::Unmark() {
229 DCHECK(IsMarked<mode>());
230 StoreEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>(
231 MarkBitField::encode(false), MarkBitField::kMask);
232 }
233
TryMarkAtomic()234 bool HeapObjectHeader::TryMarkAtomic() {
235 auto* atomic_encoded = v8::base::AsAtomicPtr(&encoded_low_);
236 uint16_t old_value = atomic_encoded->load(std::memory_order_relaxed);
237 const uint16_t new_value = old_value | MarkBitField::encode(true);
238 if (new_value == old_value) {
239 return false;
240 }
241 return atomic_encoded->compare_exchange_strong(old_value, new_value,
242 std::memory_order_relaxed);
243 }
244
245 template <AccessMode mode>
IsYoung()246 bool HeapObjectHeader::IsYoung() const {
247 return !IsMarked<mode>();
248 }
249
250 template <AccessMode mode>
IsFree()251 bool HeapObjectHeader::IsFree() const {
252 return GetGCInfoIndex<mode>() == kFreeListGCInfoIndex;
253 }
254
IsFinalizable()255 bool HeapObjectHeader::IsFinalizable() const {
256 const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex());
257 return gc_info.finalize;
258 }
259
260 template <AccessMode mode, HeapObjectHeader::EncodedHalf part,
261 std::memory_order memory_order>
LoadEncoded()262 uint16_t HeapObjectHeader::LoadEncoded() const {
263 const uint16_t& half =
264 part == EncodedHalf::kLow ? encoded_low_ : encoded_high_;
265 if (mode == AccessMode::kNonAtomic) return half;
266 return v8::base::AsAtomicPtr(&half)->load(memory_order);
267 }
268
269 template <AccessMode mode, HeapObjectHeader::EncodedHalf part,
270 std::memory_order memory_order>
StoreEncoded(uint16_t bits,uint16_t mask)271 void HeapObjectHeader::StoreEncoded(uint16_t bits, uint16_t mask) {
272 // Caveat: Not all changes to HeapObjectHeader's bitfields go through
273 // StoreEncoded. The following have their own implementations and need to be
274 // kept in sync:
275 // - HeapObjectHeader::TryMarkAtomic
276 // - MarkObjectAsFullyConstructed (API)
277 DCHECK_EQ(0u, bits & ~mask);
278 uint16_t& half = part == EncodedHalf::kLow ? encoded_low_ : encoded_high_;
279 if (mode == AccessMode::kNonAtomic) {
280 half = (half & ~mask) | bits;
281 return;
282 }
283 // We don't perform CAS loop here assuming that only none of the info that
284 // shares the same encoded halfs change at the same time.
285 auto* atomic_encoded = v8::base::AsAtomicPtr(&half);
286 uint16_t value = atomic_encoded->load(std::memory_order_relaxed);
287 value = (value & ~mask) | bits;
288 atomic_encoded->store(value, memory_order);
289 }
290
291 } // namespace internal
292 } // namespace cppgc
293
294 #endif // V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
295