1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
6 #define V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
7
8 #include <stdint.h>
9
10 #include <atomic>
11
12 #include "include/cppgc/allocation.h"
13 #include "include/cppgc/internal/gc-info.h"
14 #include "include/cppgc/internal/name-trait.h"
15 #include "src/base/atomic-utils.h"
16 #include "src/base/bit-field.h"
17 #include "src/base/logging.h"
18 #include "src/base/macros.h"
19 #include "src/heap/cppgc/gc-info-table.h"
20 #include "src/heap/cppgc/globals.h"
21
22 #if defined(CPPGC_CAGED_HEAP)
23 #include "src/heap/cppgc/caged-heap.h"
24 #endif // defined(CPPGC_CAGED_HEAP)
25
26 namespace cppgc {
27
28 class Visitor;
29
30 namespace internal {
31
32 // HeapObjectHeader contains meta data per object and is prepended to each
33 // object.
34 //
35 // +-----------------+------+------------------------------------------+
36 // | name | bits | |
37 // +-----------------+------+------------------------------------------+
38 // | padding | 32 | Only present on 64-bit platform. |
39 // +-----------------+------+------------------------------------------+
40 // | GCInfoIndex | 14 | |
41 // | unused | 1 | |
42 // | in construction | 1 | In construction encoded as |false|. |
43 // +-----------------+------+------------------------------------------+
44 // | size | 15 | 17 bits because allocations are aligned. |
45 // | mark bit | 1 | |
46 // +-----------------+------+------------------------------------------+
47 //
48 // Notes:
49 // - See |GCInfoTable| for constraints on GCInfoIndex.
50 // - |size| for regular objects is encoded with 15 bits but can actually
51 // represent sizes up to |kBlinkPageSize| (2^17) because allocations are
52 // always 4 byte aligned (see kAllocationGranularity) on 32bit. 64bit uses
53 // 8 byte aligned allocations which leaves 1 bit unused.
54 // - |size| for large objects is encoded as 0. The size of a large object is
55 // stored in |LargeObjectPage::PayloadSize()|.
56 // - |mark bit| and |in construction| bits are located in separate 16-bit halves
57 // to allow potentially accessing them non-atomically.
58 class HeapObjectHeader {
59 public:
60 static constexpr size_t kSizeLog2 = 17;
61 static constexpr size_t kMaxSize = (size_t{1} << kSizeLog2) - 1;
62 static constexpr uint16_t kLargeObjectSizeInHeader = 0;
63
64 inline static HeapObjectHeader& FromObject(void* address);
65 inline static const HeapObjectHeader& FromObject(const void* address);
66
67 inline HeapObjectHeader(size_t size, GCInfoIndex gc_info_index);
68
69 // The object starts directly after the HeapObjectHeader.
70 inline Address ObjectStart() const;
71 template <AccessMode mode = AccessMode::kNonAtomic>
72 inline Address ObjectEnd() const;
73
74 template <AccessMode mode = AccessMode::kNonAtomic>
75 inline GCInfoIndex GetGCInfoIndex() const;
76
77 template <AccessMode mode = AccessMode::kNonAtomic>
78 inline size_t AllocatedSize() const;
79 inline void SetAllocatedSize(size_t size);
80
81 template <AccessMode mode = AccessMode::kNonAtomic>
82 inline size_t ObjectSize() const;
83
84 template <AccessMode mode = AccessMode::kNonAtomic>
85 inline bool IsLargeObject() const;
86
87 template <AccessMode = AccessMode::kNonAtomic>
88 bool IsInConstruction() const;
89 inline void MarkAsFullyConstructed();
90 // Use MarkObjectAsFullyConstructed() to mark an object as being constructed.
91
92 template <AccessMode = AccessMode::kNonAtomic>
93 bool IsMarked() const;
94 template <AccessMode = AccessMode::kNonAtomic>
95 void Unmark();
96 inline bool TryMarkAtomic();
97
98 inline void MarkNonAtomic();
99
100 template <AccessMode = AccessMode::kNonAtomic>
101 bool IsYoung() const;
102
103 template <AccessMode = AccessMode::kNonAtomic>
104 bool IsFree() const;
105
106 inline bool IsFinalizable() const;
107 void Finalize();
108
109 #if defined(CPPGC_CAGED_HEAP)
110 inline void SetNextUnfinalized(HeapObjectHeader* next);
111 inline HeapObjectHeader* GetNextUnfinalized(uintptr_t cage_base) const;
112 #endif // defined(CPPGC_CAGED_HEAP)
113
114 V8_EXPORT_PRIVATE HeapObjectName GetName() const;
115
116 template <AccessMode = AccessMode::kNonAtomic>
117 void Trace(Visitor*) const;
118
119 private:
120 enum class EncodedHalf : uint8_t { kLow, kHigh };
121
122 // Used in |encoded_high_|.
123 using FullyConstructedField = v8::base::BitField16<bool, 0, 1>;
124 using UnusedField1 = FullyConstructedField::Next<bool, 1>;
125 using GCInfoIndexField = UnusedField1::Next<GCInfoIndex, 14>;
126 // Used in |encoded_low_|.
127 using MarkBitField = v8::base::BitField16<bool, 0, 1>;
128 using SizeField =
129 MarkBitField::Next<size_t, 15>; // Use EncodeSize/DecodeSize instead.
130
DecodeSize(uint16_t encoded)131 static constexpr size_t DecodeSize(uint16_t encoded) {
132 // Essentially, gets optimized to << 1.
133 return SizeField::decode(encoded) * kAllocationGranularity;
134 }
135
EncodeSize(size_t size)136 static constexpr uint16_t EncodeSize(size_t size) {
137 // Essentially, gets optimized to >> 1.
138 return SizeField::encode(size / kAllocationGranularity);
139 }
140
141 V8_EXPORT_PRIVATE void CheckApiConstants();
142
143 template <AccessMode, EncodedHalf part,
144 std::memory_order memory_order = std::memory_order_seq_cst>
145 inline uint16_t LoadEncoded() const;
146 template <AccessMode mode, EncodedHalf part,
147 std::memory_order memory_order = std::memory_order_seq_cst>
148 inline void StoreEncoded(uint16_t bits, uint16_t mask);
149
150 #if defined(V8_TARGET_ARCH_64_BIT)
151 // If cage is enabled, to save on space required by sweeper metadata, we store
152 // the list of to-be-finalized objects inlined in HeapObjectHeader.
153 #if defined(CPPGC_CAGED_HEAP)
154 uint32_t next_unfinalized_ = 0;
155 #else // !defined(CPPGC_CAGED_HEAP)
156 uint32_t padding_ = 0;
157 #endif // !defined(CPPGC_CAGED_HEAP)
158 #endif // defined(V8_TARGET_ARCH_64_BIT)
159 uint16_t encoded_high_;
160 uint16_t encoded_low_;
161 };
162
163 static_assert(kAllocationGranularity == sizeof(HeapObjectHeader),
164 "sizeof(HeapObjectHeader) must match allocation granularity to "
165 "guarantee alignment");
166
167 // static
FromObject(void * object)168 HeapObjectHeader& HeapObjectHeader::FromObject(void* object) {
169 return *reinterpret_cast<HeapObjectHeader*>(static_cast<Address>(object) -
170 sizeof(HeapObjectHeader));
171 }
172
173 // static
FromObject(const void * object)174 const HeapObjectHeader& HeapObjectHeader::FromObject(const void* object) {
175 return *reinterpret_cast<const HeapObjectHeader*>(
176 static_cast<ConstAddress>(object) - sizeof(HeapObjectHeader));
177 }
178
HeapObjectHeader(size_t size,GCInfoIndex gc_info_index)179 HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
180 #if defined(V8_TARGET_ARCH_64_BIT) && !defined(CPPGC_CAGED_HEAP)
181 USE(padding_);
182 #endif // defined(V8_TARGET_ARCH_64_BIT) && !defined(CPPGC_CAGED_HEAP)
183 DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
184 DCHECK_EQ(0u, size & (sizeof(HeapObjectHeader) - 1));
185 DCHECK_GE(kMaxSize, size);
186 encoded_low_ = EncodeSize(size);
187 // Objects may get published to the marker without any other synchronization
188 // (e.g., write barrier) in which case the in-construction bit is read
189 // concurrently which requires reading encoded_high_ atomically. It is ok if
190 // this write is not observed by the marker, since the sweeper sets the
191 // in-construction bit to 0 and we can rely on that to guarantee a correct
192 // answer when checking if objects are in-construction.
193 v8::base::AsAtomicPtr(&encoded_high_)
194 ->store(GCInfoIndexField::encode(gc_info_index),
195 std::memory_order_relaxed);
196 DCHECK(IsInConstruction());
197 #ifdef DEBUG
198 CheckApiConstants();
199 #endif // DEBUG
200 }
201
ObjectStart()202 Address HeapObjectHeader::ObjectStart() const {
203 return reinterpret_cast<Address>(const_cast<HeapObjectHeader*>(this)) +
204 sizeof(HeapObjectHeader);
205 }
206
207 template <AccessMode mode>
ObjectEnd()208 Address HeapObjectHeader::ObjectEnd() const {
209 DCHECK(!IsLargeObject());
210 return reinterpret_cast<Address>(const_cast<HeapObjectHeader*>(this)) +
211 AllocatedSize<mode>();
212 }
213
214 template <AccessMode mode>
GetGCInfoIndex()215 GCInfoIndex HeapObjectHeader::GetGCInfoIndex() const {
216 const uint16_t encoded =
217 LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
218 return GCInfoIndexField::decode(encoded);
219 }
220
221 template <AccessMode mode>
AllocatedSize()222 size_t HeapObjectHeader::AllocatedSize() const {
223 // Size is immutable after construction while either marking or sweeping
224 // is running so relaxed load (if mode == kAtomic) is enough.
225 uint16_t encoded_low_value =
226 LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
227 const size_t size = DecodeSize(encoded_low_value);
228 return size;
229 }
230
SetAllocatedSize(size_t size)231 void HeapObjectHeader::SetAllocatedSize(size_t size) {
232 #if !defined(CPPGC_YOUNG_GENERATION)
233 // With sticky bits, marked objects correspond to old objects.
234 // TODO(bikineev:1029379): Consider disallowing old/marked objects to be
235 // resized.
236 DCHECK(!IsMarked());
237 #endif
238 // The object may be marked (i.e. old, in case young generation is enabled).
239 // Make sure to not overwrite the mark bit.
240 encoded_low_ &= ~SizeField::encode(SizeField::kMax);
241 encoded_low_ |= EncodeSize(size);
242 }
243
244 template <AccessMode mode>
ObjectSize()245 size_t HeapObjectHeader::ObjectSize() const {
246 // The following DCHECK also fails for large objects.
247 DCHECK_GT(AllocatedSize<mode>(), sizeof(HeapObjectHeader));
248 return AllocatedSize<mode>() - sizeof(HeapObjectHeader);
249 }
250
251 template <AccessMode mode>
IsLargeObject()252 bool HeapObjectHeader::IsLargeObject() const {
253 return AllocatedSize<mode>() == kLargeObjectSizeInHeader;
254 }
255
256 template <AccessMode mode>
IsInConstruction()257 bool HeapObjectHeader::IsInConstruction() const {
258 const uint16_t encoded =
259 LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
260 return !FullyConstructedField::decode(encoded);
261 }
262
MarkAsFullyConstructed()263 void HeapObjectHeader::MarkAsFullyConstructed() {
264 MakeGarbageCollectedTraitInternal::MarkObjectAsFullyConstructed(
265 ObjectStart());
266 }
267
268 template <AccessMode mode>
IsMarked()269 bool HeapObjectHeader::IsMarked() const {
270 const uint16_t encoded =
271 LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
272 return MarkBitField::decode(encoded);
273 }
274
275 template <AccessMode mode>
Unmark()276 void HeapObjectHeader::Unmark() {
277 DCHECK(IsMarked<mode>());
278 StoreEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>(
279 MarkBitField::encode(false), MarkBitField::kMask);
280 }
281
TryMarkAtomic()282 bool HeapObjectHeader::TryMarkAtomic() {
283 auto* atomic_encoded = v8::base::AsAtomicPtr(&encoded_low_);
284 uint16_t old_value = atomic_encoded->load(std::memory_order_relaxed);
285 const uint16_t new_value = old_value | MarkBitField::encode(true);
286 if (new_value == old_value) {
287 return false;
288 }
289 return atomic_encoded->compare_exchange_strong(old_value, new_value,
290 std::memory_order_relaxed);
291 }
292
MarkNonAtomic()293 void HeapObjectHeader::MarkNonAtomic() {
294 DCHECK(!IsMarked<AccessMode::kNonAtomic>());
295 encoded_low_ |= MarkBitField::encode(true);
296 }
297
298 template <AccessMode mode>
IsYoung()299 bool HeapObjectHeader::IsYoung() const {
300 return !IsMarked<mode>();
301 }
302
303 template <AccessMode mode>
IsFree()304 bool HeapObjectHeader::IsFree() const {
305 return GetGCInfoIndex<mode>() == kFreeListGCInfoIndex;
306 }
307
IsFinalizable()308 bool HeapObjectHeader::IsFinalizable() const {
309 const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex());
310 return gc_info.finalize;
311 }
312
313 #if defined(CPPGC_CAGED_HEAP)
SetNextUnfinalized(HeapObjectHeader * next)314 void HeapObjectHeader::SetNextUnfinalized(HeapObjectHeader* next) {
315 next_unfinalized_ = CagedHeap::OffsetFromAddress<uint32_t>(next);
316 }
317
GetNextUnfinalized(uintptr_t cage_base)318 HeapObjectHeader* HeapObjectHeader::GetNextUnfinalized(
319 uintptr_t cage_base) const {
320 DCHECK(cage_base);
321 DCHECK_EQ(0u,
322 CagedHeap::OffsetFromAddress(reinterpret_cast<void*>(cage_base)));
323 return next_unfinalized_ ? reinterpret_cast<HeapObjectHeader*>(
324 cage_base + next_unfinalized_)
325 : nullptr;
326 }
327 #endif // defined(CPPGC_CAGED_HEAP)
328
329 template <AccessMode mode>
Trace(Visitor * visitor)330 void HeapObjectHeader::Trace(Visitor* visitor) const {
331 const GCInfo& gc_info =
332 GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex<mode>());
333 return gc_info.trace(visitor, ObjectStart());
334 }
335
336 template <AccessMode mode, HeapObjectHeader::EncodedHalf part,
337 std::memory_order memory_order>
LoadEncoded()338 uint16_t HeapObjectHeader::LoadEncoded() const {
339 const uint16_t& half =
340 part == EncodedHalf::kLow ? encoded_low_ : encoded_high_;
341 if (mode == AccessMode::kNonAtomic) return half;
342 return v8::base::AsAtomicPtr(&half)->load(memory_order);
343 }
344
345 template <AccessMode mode, HeapObjectHeader::EncodedHalf part,
346 std::memory_order memory_order>
StoreEncoded(uint16_t bits,uint16_t mask)347 void HeapObjectHeader::StoreEncoded(uint16_t bits, uint16_t mask) {
348 // Caveat: Not all changes to HeapObjectHeader's bitfields go through
349 // StoreEncoded. The following have their own implementations and need to be
350 // kept in sync:
351 // - HeapObjectHeader::TryMarkAtomic
352 // - MarkObjectAsFullyConstructed (API)
353 DCHECK_EQ(0u, bits & ~mask);
354 uint16_t& half = part == EncodedHalf::kLow ? encoded_low_ : encoded_high_;
355 if (mode == AccessMode::kNonAtomic) {
356 half = (half & ~mask) | bits;
357 return;
358 }
359 // We don't perform CAS loop here assuming that only none of the info that
360 // shares the same encoded halfs change at the same time.
361 auto* atomic_encoded = v8::base::AsAtomicPtr(&half);
362 uint16_t value = atomic_encoded->load(std::memory_order_relaxed);
363 value = (value & ~mask) | bits;
364 atomic_encoded->store(value, memory_order);
365 }
366
367 } // namespace internal
368 } // namespace cppgc
369
370 #endif // V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
371