1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_HEAP_SCAVENGER_INL_H_
6 #define V8_HEAP_SCAVENGER_INL_H_
7
8 #include "src/heap/evacuation-allocator-inl.h"
9 #include "src/heap/incremental-marking-inl.h"
10 #include "src/heap/memory-chunk.h"
11 #include "src/heap/scavenger.h"
12 #include "src/objects/map.h"
13 #include "src/objects/objects-inl.h"
14 #include "src/objects/slots-inl.h"
15
16 namespace v8 {
17 namespace internal {
18
PushRegularObject(HeapObject object,int size)19 void Scavenger::PromotionList::Local::PushRegularObject(HeapObject object,
20 int size) {
21 regular_object_promotion_list_local_.Push({object, size});
22 }
23
PushLargeObject(HeapObject object,Map map,int size)24 void Scavenger::PromotionList::Local::PushLargeObject(HeapObject object,
25 Map map, int size) {
26 large_object_promotion_list_local_.Push({object, map, size});
27 }
28
LocalPushSegmentSize()29 size_t Scavenger::PromotionList::Local::LocalPushSegmentSize() const {
30 return regular_object_promotion_list_local_.PushSegmentSize() +
31 large_object_promotion_list_local_.PushSegmentSize();
32 }
33
Pop(struct PromotionListEntry * entry)34 bool Scavenger::PromotionList::Local::Pop(struct PromotionListEntry* entry) {
35 ObjectAndSize regular_object;
36 if (regular_object_promotion_list_local_.Pop(®ular_object)) {
37 entry->heap_object = regular_object.first;
38 entry->size = regular_object.second;
39 entry->map = entry->heap_object.map();
40 return true;
41 }
42 return large_object_promotion_list_local_.Pop(entry);
43 }
44
Publish()45 void Scavenger::PromotionList::Local::Publish() {
46 regular_object_promotion_list_local_.Publish();
47 large_object_promotion_list_local_.Publish();
48 }
49
IsGlobalPoolEmpty()50 bool Scavenger::PromotionList::Local::IsGlobalPoolEmpty() const {
51 return regular_object_promotion_list_local_.IsGlobalEmpty() &&
52 large_object_promotion_list_local_.IsGlobalEmpty();
53 }
54
ShouldEagerlyProcessPromotionList()55 bool Scavenger::PromotionList::Local::ShouldEagerlyProcessPromotionList()
56 const {
57 // Threshold when to prioritize processing of the promotion list. Right
58 // now we only look into the regular object list.
59 const int kProcessPromotionListThreshold =
60 kRegularObjectPromotionListSegmentSize / 2;
61 return LocalPushSegmentSize() < kProcessPromotionListThreshold;
62 }
63
IsEmpty()64 bool Scavenger::PromotionList::IsEmpty() const {
65 return regular_object_promotion_list_.IsEmpty() &&
66 large_object_promotion_list_.IsEmpty();
67 }
68
Size()69 size_t Scavenger::PromotionList::Size() const {
70 return regular_object_promotion_list_.Size() +
71 large_object_promotion_list_.Size();
72 }
73
PageMemoryFence(MaybeObject object)74 void Scavenger::PageMemoryFence(MaybeObject object) {
75 #ifdef THREAD_SANITIZER
76 // Perform a dummy acquire load to tell TSAN that there is no data race
77 // with page initialization.
78 HeapObject heap_object;
79 if (object->GetHeapObject(&heap_object)) {
80 BasicMemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
81 }
82 #endif
83 }
84
MigrateObject(Map map,HeapObject source,HeapObject target,int size,PromotionHeapChoice promotion_heap_choice)85 bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
86 int size,
87 PromotionHeapChoice promotion_heap_choice) {
88 // Copy the content of source to target.
89 target.set_map_word(MapWord::FromMap(map), kRelaxedStore);
90 heap()->CopyBlock(target.address() + kTaggedSize,
91 source.address() + kTaggedSize, size - kTaggedSize);
92
93 // This release CAS is paired with the load acquire in ScavengeObject.
94 if (!source.release_compare_and_swap_map_word(
95 MapWord::FromMap(map), MapWord::FromForwardingAddress(target))) {
96 // Other task migrated the object.
97 return false;
98 }
99
100 if (V8_UNLIKELY(is_logging_)) {
101 heap()->OnMoveEvent(target, source, size);
102 }
103
104 if (is_incremental_marking_ &&
105 promotion_heap_choice != kPromoteIntoSharedHeap) {
106 heap()->incremental_marking()->TransferColor(source, target);
107 }
108 heap()->UpdateAllocationSite(map, source, &local_pretenuring_feedback_);
109 return true;
110 }
111
112 template <typename THeapObjectSlot>
SemiSpaceCopyObject(Map map,THeapObjectSlot slot,HeapObject object,int object_size,ObjectFields object_fields)113 CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
114 Map map, THeapObjectSlot slot, HeapObject object, int object_size,
115 ObjectFields object_fields) {
116 static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
117 std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
118 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
119 DCHECK(heap()->AllowedToBeMigrated(map, object, NEW_SPACE));
120 AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
121 AllocationResult allocation = allocator_.Allocate(
122 NEW_SPACE, object_size, AllocationOrigin::kGC, alignment);
123
124 HeapObject target;
125 if (allocation.To(&target)) {
126 DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
127 target));
128 const bool self_success =
129 MigrateObject(map, object, target, object_size, kPromoteIntoLocalHeap);
130 if (!self_success) {
131 allocator_.FreeLast(NEW_SPACE, target, object_size);
132 MapWord map_word = object.map_word(kAcquireLoad);
133 HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
134 DCHECK(!Heap::InFromPage(*slot));
135 return Heap::InToPage(*slot)
136 ? CopyAndForwardResult::SUCCESS_YOUNG_GENERATION
137 : CopyAndForwardResult::SUCCESS_OLD_GENERATION;
138 }
139 HeapObjectReference::Update(slot, target);
140 if (object_fields == ObjectFields::kMaybePointers) {
141 copied_list_local_.Push(ObjectAndSize(target, object_size));
142 }
143 copied_size_ += object_size;
144 return CopyAndForwardResult::SUCCESS_YOUNG_GENERATION;
145 }
146 return CopyAndForwardResult::FAILURE;
147 }
148
149 template <typename THeapObjectSlot,
150 Scavenger::PromotionHeapChoice promotion_heap_choice>
PromoteObject(Map map,THeapObjectSlot slot,HeapObject object,int object_size,ObjectFields object_fields)151 CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
152 HeapObject object,
153 int object_size,
154 ObjectFields object_fields) {
155 static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
156 std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
157 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
158 DCHECK_GE(object_size, Heap::kMinObjectSizeInTaggedWords * kTaggedSize);
159 AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
160 AllocationResult allocation;
161 switch (promotion_heap_choice) {
162 case kPromoteIntoLocalHeap:
163 allocation = allocator_.Allocate(OLD_SPACE, object_size,
164 AllocationOrigin::kGC, alignment);
165 break;
166 case kPromoteIntoSharedHeap:
167 DCHECK_NOT_NULL(shared_old_allocator_);
168 allocation = shared_old_allocator_->AllocateRaw(object_size, alignment,
169 AllocationOrigin::kGC);
170 break;
171 }
172
173 HeapObject target;
174 if (allocation.To(&target)) {
175 DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
176 target));
177 const bool self_success =
178 MigrateObject(map, object, target, object_size, promotion_heap_choice);
179 if (!self_success) {
180 allocator_.FreeLast(OLD_SPACE, target, object_size);
181 MapWord map_word = object.map_word(kAcquireLoad);
182 HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
183 DCHECK(!Heap::InFromPage(*slot));
184 return Heap::InToPage(*slot)
185 ? CopyAndForwardResult::SUCCESS_YOUNG_GENERATION
186 : CopyAndForwardResult::SUCCESS_OLD_GENERATION;
187 }
188 HeapObjectReference::Update(slot, target);
189
190 // During incremental marking we want to push every object in order to
191 // record slots for map words. Necessary for map space compaction.
192 if (object_fields == ObjectFields::kMaybePointers ||
193 is_compacting_including_map_space_) {
194 promotion_list_local_.PushRegularObject(target, object_size);
195 }
196 promoted_size_ += object_size;
197 return CopyAndForwardResult::SUCCESS_OLD_GENERATION;
198 }
199 return CopyAndForwardResult::FAILURE;
200 }
201
RememberedSetEntryNeeded(CopyAndForwardResult result)202 SlotCallbackResult Scavenger::RememberedSetEntryNeeded(
203 CopyAndForwardResult result) {
204 DCHECK_NE(CopyAndForwardResult::FAILURE, result);
205 return result == CopyAndForwardResult::SUCCESS_YOUNG_GENERATION ? KEEP_SLOT
206 : REMOVE_SLOT;
207 }
208
HandleLargeObject(Map map,HeapObject object,int object_size,ObjectFields object_fields)209 bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
210 ObjectFields object_fields) {
211 // TODO(hpayer): Make this check size based, i.e.
212 // object_size > kMaxRegularHeapObjectSize
213 if (V8_UNLIKELY(
214 BasicMemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
215 DCHECK_EQ(NEW_LO_SPACE,
216 MemoryChunk::FromHeapObject(object)->owner_identity());
217 if (object.release_compare_and_swap_map_word(
218 MapWord::FromMap(map), MapWord::FromForwardingAddress(object))) {
219 surviving_new_large_objects_.insert({object, map});
220 promoted_size_ += object_size;
221 if (object_fields == ObjectFields::kMaybePointers) {
222 promotion_list_local_.PushLargeObject(object, map, object_size);
223 }
224 }
225 return true;
226 }
227 return false;
228 }
229
230 template <typename THeapObjectSlot,
231 Scavenger::PromotionHeapChoice promotion_heap_choice>
EvacuateObjectDefault(Map map,THeapObjectSlot slot,HeapObject object,int object_size,ObjectFields object_fields)232 SlotCallbackResult Scavenger::EvacuateObjectDefault(
233 Map map, THeapObjectSlot slot, HeapObject object, int object_size,
234 ObjectFields object_fields) {
235 static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
236 std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
237 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
238 SLOW_DCHECK(object.SizeFromMap(map) == object_size);
239 CopyAndForwardResult result;
240
241 if (HandleLargeObject(map, object, object_size, object_fields)) {
242 return KEEP_SLOT;
243 }
244
245 SLOW_DCHECK(static_cast<size_t>(object_size) <=
246 MemoryChunkLayout::AllocatableMemoryInDataPage());
247
248 if (!heap()->ShouldBePromoted(object.address())) {
249 // A semi-space copy may fail due to fragmentation. In that case, we
250 // try to promote the object.
251 result = SemiSpaceCopyObject(map, slot, object, object_size, object_fields);
252 if (result != CopyAndForwardResult::FAILURE) {
253 return RememberedSetEntryNeeded(result);
254 }
255 }
256
257 // We may want to promote this object if the object was already semi-space
258 // copied in a previous young generation GC or if the semi-space copy above
259 // failed.
260 result = PromoteObject<THeapObjectSlot, promotion_heap_choice>(
261 map, slot, object, object_size, object_fields);
262 if (result != CopyAndForwardResult::FAILURE) {
263 return RememberedSetEntryNeeded(result);
264 }
265
266 // If promotion failed, we try to copy the object to the other semi-space.
267 result = SemiSpaceCopyObject(map, slot, object, object_size, object_fields);
268 if (result != CopyAndForwardResult::FAILURE) {
269 return RememberedSetEntryNeeded(result);
270 }
271
272 heap()->FatalProcessOutOfMemory("Scavenger: semi-space copy");
273 UNREACHABLE();
274 }
275
276 template <typename THeapObjectSlot>
EvacuateThinString(Map map,THeapObjectSlot slot,ThinString object,int object_size)277 SlotCallbackResult Scavenger::EvacuateThinString(Map map, THeapObjectSlot slot,
278 ThinString object,
279 int object_size) {
280 static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
281 std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
282 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
283 if (!is_incremental_marking_) {
284 // The ThinString should die after Scavenge, so avoid writing the proper
285 // forwarding pointer and instead just signal the actual object as forwarded
286 // reference.
287 String actual = object.actual();
288 // ThinStrings always refer to internalized strings, which are always in old
289 // space.
290 DCHECK(!Heap::InYoungGeneration(actual));
291 HeapObjectReference::Update(slot, actual);
292 return REMOVE_SLOT;
293 }
294
295 DCHECK_EQ(ObjectFields::kMaybePointers,
296 Map::ObjectFieldsFrom(map.visitor_id()));
297 return EvacuateObjectDefault(map, slot, object, object_size,
298 ObjectFields::kMaybePointers);
299 }
300
301 template <typename THeapObjectSlot>
EvacuateShortcutCandidate(Map map,THeapObjectSlot slot,ConsString object,int object_size)302 SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
303 THeapObjectSlot slot,
304 ConsString object,
305 int object_size) {
306 static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
307 std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
308 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
309 DCHECK(IsShortcutCandidate(map.instance_type()));
310 if (!is_incremental_marking_ &&
311 object.unchecked_second() == ReadOnlyRoots(heap()).empty_string()) {
312 HeapObject first = HeapObject::cast(object.unchecked_first());
313
314 HeapObjectReference::Update(slot, first);
315
316 if (!Heap::InYoungGeneration(first)) {
317 object.set_map_word(MapWord::FromForwardingAddress(first), kReleaseStore);
318 return REMOVE_SLOT;
319 }
320
321 MapWord first_word = first.map_word(kAcquireLoad);
322 if (first_word.IsForwardingAddress()) {
323 HeapObject target = first_word.ToForwardingAddress();
324
325 HeapObjectReference::Update(slot, target);
326 object.set_map_word(MapWord::FromForwardingAddress(target),
327 kReleaseStore);
328 return Heap::InYoungGeneration(target) ? KEEP_SLOT : REMOVE_SLOT;
329 }
330 Map first_map = first_word.ToMap();
331 SlotCallbackResult result = EvacuateObjectDefault(
332 first_map, slot, first, first.SizeFromMap(first_map),
333 Map::ObjectFieldsFrom(first_map.visitor_id()));
334 object.set_map_word(MapWord::FromForwardingAddress(slot.ToHeapObject()),
335 kReleaseStore);
336 return result;
337 }
338 DCHECK_EQ(ObjectFields::kMaybePointers,
339 Map::ObjectFieldsFrom(map.visitor_id()));
340 return EvacuateObjectDefault(map, slot, object, object_size,
341 ObjectFields::kMaybePointers);
342 }
343
344 template <typename THeapObjectSlot>
EvacuateInPlaceInternalizableString(Map map,THeapObjectSlot slot,String object,int object_size,ObjectFields object_fields)345 SlotCallbackResult Scavenger::EvacuateInPlaceInternalizableString(
346 Map map, THeapObjectSlot slot, String object, int object_size,
347 ObjectFields object_fields) {
348 DCHECK(String::IsInPlaceInternalizable(map.instance_type()));
349 DCHECK_EQ(object_fields, Map::ObjectFieldsFrom(map.visitor_id()));
350 if (shared_string_table_) {
351 return EvacuateObjectDefault<THeapObjectSlot, kPromoteIntoSharedHeap>(
352 map, slot, object, object_size, object_fields);
353 }
354 return EvacuateObjectDefault(map, slot, object, object_size, object_fields);
355 }
356
357 template <typename THeapObjectSlot>
EvacuateObject(THeapObjectSlot slot,Map map,HeapObject source)358 SlotCallbackResult Scavenger::EvacuateObject(THeapObjectSlot slot, Map map,
359 HeapObject source) {
360 static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
361 std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
362 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
363 SLOW_DCHECK(Heap::InFromPage(source));
364 SLOW_DCHECK(!MapWord::FromMap(map).IsForwardingAddress());
365 int size = source.SizeFromMap(map);
366 // Cannot use ::cast() below because that would add checks in debug mode
367 // that require re-reading the map.
368 VisitorId visitor_id = map.visitor_id();
369 switch (visitor_id) {
370 case kVisitThinString:
371 // At the moment we don't allow weak pointers to thin strings.
372 DCHECK(!(*slot)->IsWeak());
373 return EvacuateThinString(map, slot, ThinString::unchecked_cast(source),
374 size);
375 case kVisitShortcutCandidate:
376 DCHECK(!(*slot)->IsWeak());
377 // At the moment we don't allow weak pointers to cons strings.
378 return EvacuateShortcutCandidate(
379 map, slot, ConsString::unchecked_cast(source), size);
380 case kVisitSeqOneByteString:
381 case kVisitSeqTwoByteString:
382 DCHECK(String::IsInPlaceInternalizable(map.instance_type()));
383 return EvacuateInPlaceInternalizableString(
384 map, slot, String::unchecked_cast(source), size,
385 ObjectFields::kMaybePointers);
386 case kVisitDataObject: // External strings have kVisitDataObject.
387 if (String::IsInPlaceInternalizableExcludingExternal(
388 map.instance_type())) {
389 return EvacuateInPlaceInternalizableString(
390 map, slot, String::unchecked_cast(source), size,
391 ObjectFields::kDataOnly);
392 }
393 V8_FALLTHROUGH;
394 default:
395 return EvacuateObjectDefault(map, slot, source, size,
396 Map::ObjectFieldsFrom(visitor_id));
397 }
398 }
399
400 template <typename THeapObjectSlot>
ScavengeObject(THeapObjectSlot p,HeapObject object)401 SlotCallbackResult Scavenger::ScavengeObject(THeapObjectSlot p,
402 HeapObject object) {
403 static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
404 std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
405 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
406 DCHECK(Heap::InFromPage(object));
407
408 // Synchronized load that consumes the publishing CAS of MigrateObject. We
409 // need memory ordering in order to read the page header of the forwarded
410 // object (using Heap::InYoungGeneration).
411 MapWord first_word = object.map_word(kAcquireLoad);
412
413 // If the first word is a forwarding address, the object has already been
414 // copied.
415 if (first_word.IsForwardingAddress()) {
416 HeapObject dest = first_word.ToForwardingAddress();
417 HeapObjectReference::Update(p, dest);
418 DCHECK_IMPLIES(Heap::InYoungGeneration(dest),
419 Heap::InToPage(dest) || Heap::IsLargeObject(dest));
420
421 // This load forces us to have memory ordering for the map load above. We
422 // need to have the page header properly initialized.
423 return Heap::InYoungGeneration(dest) ? KEEP_SLOT : REMOVE_SLOT;
424 }
425
426 Map map = first_word.ToMap();
427 // AllocationMementos are unrooted and shouldn't survive a scavenge
428 DCHECK_NE(ReadOnlyRoots(heap()).allocation_memento_map(), map);
429 // Call the slow part of scavenge object.
430 return EvacuateObject(p, map, object);
431 }
432
433 template <typename TSlot>
CheckAndScavengeObject(Heap * heap,TSlot slot)434 SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap, TSlot slot) {
435 static_assert(
436 std::is_same<TSlot, FullMaybeObjectSlot>::value ||
437 std::is_same<TSlot, MaybeObjectSlot>::value,
438 "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
439 using THeapObjectSlot = typename TSlot::THeapObjectSlot;
440 MaybeObject object = *slot;
441 if (Heap::InFromPage(object)) {
442 HeapObject heap_object = object->GetHeapObject();
443
444 SlotCallbackResult result =
445 ScavengeObject(THeapObjectSlot(slot), heap_object);
446 DCHECK_IMPLIES(result == REMOVE_SLOT,
447 !heap->InYoungGeneration((*slot)->GetHeapObject()));
448 return result;
449 } else if (Heap::InToPage(object)) {
450 // Already updated slot. This can happen when processing of the work list
451 // is interleaved with processing roots.
452 return KEEP_SLOT;
453 }
454 // Slots can point to "to" space if the slot has been recorded multiple
455 // times in the remembered set. We remove the redundant slot now.
456 return REMOVE_SLOT;
457 }
458
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)459 void ScavengeVisitor::VisitPointers(HeapObject host, ObjectSlot start,
460 ObjectSlot end) {
461 return VisitPointersImpl(host, start, end);
462 }
463
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)464 void ScavengeVisitor::VisitPointers(HeapObject host, MaybeObjectSlot start,
465 MaybeObjectSlot end) {
466 return VisitPointersImpl(host, start, end);
467 }
468
VisitCodePointer(HeapObject host,CodeObjectSlot slot)469 void ScavengeVisitor::VisitCodePointer(HeapObject host, CodeObjectSlot slot) {
470 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
471 // Code slots never appear in new space because CodeDataContainers, the
472 // only object that can contain code pointers, are always allocated in
473 // the old space.
474 UNREACHABLE();
475 }
476
VisitCodeTarget(Code host,RelocInfo * rinfo)477 void ScavengeVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
478 Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
479 #ifdef DEBUG
480 Code old_target = target;
481 #endif
482 FullObjectSlot slot(&target);
483 VisitHeapObjectImpl(slot, target);
484 // Code objects are never in new-space, so the slot contents must not change.
485 DCHECK_EQ(old_target, target);
486 }
487
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)488 void ScavengeVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
489 HeapObject heap_object = rinfo->target_object(cage_base());
490 #ifdef DEBUG
491 HeapObject old_heap_object = heap_object;
492 #endif
493 FullObjectSlot slot(&heap_object);
494 VisitHeapObjectImpl(slot, heap_object);
495 // We don't embed new-space objects into code, so the slot contents must not
496 // change.
497 DCHECK_EQ(old_heap_object, heap_object);
498 }
499
500 template <typename TSlot>
VisitHeapObjectImpl(TSlot slot,HeapObject heap_object)501 void ScavengeVisitor::VisitHeapObjectImpl(TSlot slot, HeapObject heap_object) {
502 if (Heap::InYoungGeneration(heap_object)) {
503 using THeapObjectSlot = typename TSlot::THeapObjectSlot;
504 scavenger_->ScavengeObject(THeapObjectSlot(slot), heap_object);
505 }
506 }
507
508 template <typename TSlot>
VisitPointersImpl(HeapObject host,TSlot start,TSlot end)509 void ScavengeVisitor::VisitPointersImpl(HeapObject host, TSlot start,
510 TSlot end) {
511 for (TSlot slot = start; slot < end; ++slot) {
512 typename TSlot::TObject object = *slot;
513 HeapObject heap_object;
514 // Treat weak references as strong.
515 if (object.GetHeapObject(&heap_object)) {
516 VisitHeapObjectImpl(slot, heap_object);
517 }
518 }
519 }
520
VisitJSArrayBuffer(Map map,JSArrayBuffer object)521 int ScavengeVisitor::VisitJSArrayBuffer(Map map, JSArrayBuffer object) {
522 object.YoungMarkExtension();
523 int size = JSArrayBuffer::BodyDescriptor::SizeOf(map, object);
524 JSArrayBuffer::BodyDescriptor::IterateBody(map, object, size, this);
525 return size;
526 }
527
VisitEphemeronHashTable(Map map,EphemeronHashTable table)528 int ScavengeVisitor::VisitEphemeronHashTable(Map map,
529 EphemeronHashTable table) {
530 // Register table with the scavenger, so it can take care of the weak keys
531 // later. This allows to only iterate the tables' values, which are treated
532 // as strong independetly of whether the key is live.
533 scavenger_->AddEphemeronHashTable(table);
534 for (InternalIndex i : table.IterateEntries()) {
535 ObjectSlot value_slot =
536 table.RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
537 VisitPointer(table, value_slot);
538 }
539
540 return table.SizeFromMap(map);
541 }
542
543 } // namespace internal
544 } // namespace v8
545
546 #endif // V8_HEAP_SCAVENGER_INL_H_
547