1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_HEAP_SCAVENGER_INL_H_
6 #define V8_HEAP_SCAVENGER_INL_H_
7
8 #include "src/heap/incremental-marking-inl.h"
9 #include "src/heap/local-allocator-inl.h"
10 #include "src/heap/memory-chunk.h"
11 #include "src/heap/scavenger.h"
12 #include "src/objects/map.h"
13 #include "src/objects/objects-inl.h"
14 #include "src/objects/slots-inl.h"
15
16 namespace v8 {
17 namespace internal {
18
PushRegularObject(HeapObject object,int size)19 void Scavenger::PromotionList::View::PushRegularObject(HeapObject object,
20 int size) {
21 promotion_list_->PushRegularObject(task_id_, object, size);
22 }
23
PushLargeObject(HeapObject object,Map map,int size)24 void Scavenger::PromotionList::View::PushLargeObject(HeapObject object, Map map,
25 int size) {
26 promotion_list_->PushLargeObject(task_id_, object, map, size);
27 }
28
IsEmpty()29 bool Scavenger::PromotionList::View::IsEmpty() {
30 return promotion_list_->IsEmpty();
31 }
32
LocalPushSegmentSize()33 size_t Scavenger::PromotionList::View::LocalPushSegmentSize() {
34 return promotion_list_->LocalPushSegmentSize(task_id_);
35 }
36
Pop(struct PromotionListEntry * entry)37 bool Scavenger::PromotionList::View::Pop(struct PromotionListEntry* entry) {
38 return promotion_list_->Pop(task_id_, entry);
39 }
40
FlushToGlobal()41 void Scavenger::PromotionList::View::FlushToGlobal() {
42 promotion_list_->FlushToGlobal(task_id_);
43 }
44
IsGlobalPoolEmpty()45 bool Scavenger::PromotionList::View::IsGlobalPoolEmpty() {
46 return promotion_list_->IsGlobalPoolEmpty();
47 }
48
ShouldEagerlyProcessPromotionList()49 bool Scavenger::PromotionList::View::ShouldEagerlyProcessPromotionList() {
50 return promotion_list_->ShouldEagerlyProcessPromotionList(task_id_);
51 }
52
PushRegularObject(int task_id,HeapObject object,int size)53 void Scavenger::PromotionList::PushRegularObject(int task_id, HeapObject object,
54 int size) {
55 regular_object_promotion_list_.Push(task_id, ObjectAndSize(object, size));
56 }
57
PushLargeObject(int task_id,HeapObject object,Map map,int size)58 void Scavenger::PromotionList::PushLargeObject(int task_id, HeapObject object,
59 Map map, int size) {
60 large_object_promotion_list_.Push(task_id, {object, map, size});
61 }
62
IsEmpty()63 bool Scavenger::PromotionList::IsEmpty() {
64 return regular_object_promotion_list_.IsEmpty() &&
65 large_object_promotion_list_.IsEmpty();
66 }
67
LocalPushSegmentSize(int task_id)68 size_t Scavenger::PromotionList::LocalPushSegmentSize(int task_id) {
69 return regular_object_promotion_list_.LocalPushSegmentSize(task_id) +
70 large_object_promotion_list_.LocalPushSegmentSize(task_id);
71 }
72
Pop(int task_id,struct PromotionListEntry * entry)73 bool Scavenger::PromotionList::Pop(int task_id,
74 struct PromotionListEntry* entry) {
75 ObjectAndSize regular_object;
76 if (regular_object_promotion_list_.Pop(task_id, ®ular_object)) {
77 entry->heap_object = regular_object.first;
78 entry->size = regular_object.second;
79 entry->map = entry->heap_object.map();
80 return true;
81 }
82 return large_object_promotion_list_.Pop(task_id, entry);
83 }
84
FlushToGlobal(int task_id)85 void Scavenger::PromotionList::FlushToGlobal(int task_id) {
86 regular_object_promotion_list_.FlushToGlobal(task_id);
87 large_object_promotion_list_.FlushToGlobal(task_id);
88 }
89
GlobalPoolSize()90 size_t Scavenger::PromotionList::GlobalPoolSize() const {
91 return regular_object_promotion_list_.GlobalPoolSize() +
92 large_object_promotion_list_.GlobalPoolSize();
93 }
94
IsGlobalPoolEmpty()95 bool Scavenger::PromotionList::IsGlobalPoolEmpty() {
96 return regular_object_promotion_list_.IsGlobalPoolEmpty() &&
97 large_object_promotion_list_.IsGlobalPoolEmpty();
98 }
99
ShouldEagerlyProcessPromotionList(int task_id)100 bool Scavenger::PromotionList::ShouldEagerlyProcessPromotionList(int task_id) {
101 // Threshold when to prioritize processing of the promotion list. Right
102 // now we only look into the regular object list.
103 const int kProcessPromotionListThreshold =
104 kRegularObjectPromotionListSegmentSize / 2;
105 return LocalPushSegmentSize(task_id) < kProcessPromotionListThreshold;
106 }
107
PageMemoryFence(MaybeObject object)108 void Scavenger::PageMemoryFence(MaybeObject object) {
109 #ifdef THREAD_SANITIZER
110 // Perform a dummy acquire load to tell TSAN that there is no data race
111 // with page initialization.
112 HeapObject heap_object;
113 if (object->GetHeapObject(&heap_object)) {
114 BasicMemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
115 }
116 #endif
117 }
118
MigrateObject(Map map,HeapObject source,HeapObject target,int size)119 bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
120 int size) {
121 // Copy the content of source to target.
122 target.set_map_word(MapWord::FromMap(map));
123 heap()->CopyBlock(target.address() + kTaggedSize,
124 source.address() + kTaggedSize, size - kTaggedSize);
125
126 if (!source.release_compare_and_swap_map_word(
127 MapWord::FromMap(map), MapWord::FromForwardingAddress(target))) {
128 // Other task migrated the object.
129 return false;
130 }
131
132 if (V8_UNLIKELY(is_logging_)) {
133 heap()->OnMoveEvent(target, source, size);
134 }
135
136 if (is_incremental_marking_) {
137 heap()->incremental_marking()->TransferColor(source, target);
138 }
139 heap()->UpdateAllocationSite(map, source, &local_pretenuring_feedback_);
140 return true;
141 }
142
143 template <typename THeapObjectSlot>
SemiSpaceCopyObject(Map map,THeapObjectSlot slot,HeapObject object,int object_size,ObjectFields object_fields)144 CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
145 Map map, THeapObjectSlot slot, HeapObject object, int object_size,
146 ObjectFields object_fields) {
147 static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
148 std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
149 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
150 DCHECK(heap()->AllowedToBeMigrated(map, object, NEW_SPACE));
151 AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
152 AllocationResult allocation = allocator_.Allocate(
153 NEW_SPACE, object_size, AllocationOrigin::kGC, alignment);
154
155 HeapObject target;
156 if (allocation.To(&target)) {
157 DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
158 target));
159 const bool self_success = MigrateObject(map, object, target, object_size);
160 if (!self_success) {
161 allocator_.FreeLast(NEW_SPACE, target, object_size);
162 MapWord map_word = object.synchronized_map_word();
163 HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
164 DCHECK(!Heap::InFromPage(*slot));
165 return Heap::InToPage(*slot)
166 ? CopyAndForwardResult::SUCCESS_YOUNG_GENERATION
167 : CopyAndForwardResult::SUCCESS_OLD_GENERATION;
168 }
169 HeapObjectReference::Update(slot, target);
170 if (object_fields == ObjectFields::kMaybePointers) {
171 copied_list_.Push(ObjectAndSize(target, object_size));
172 }
173 copied_size_ += object_size;
174 return CopyAndForwardResult::SUCCESS_YOUNG_GENERATION;
175 }
176 return CopyAndForwardResult::FAILURE;
177 }
178
179 template <typename THeapObjectSlot>
PromoteObject(Map map,THeapObjectSlot slot,HeapObject object,int object_size,ObjectFields object_fields)180 CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
181 HeapObject object,
182 int object_size,
183 ObjectFields object_fields) {
184 static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
185 std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
186 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
187 AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
188 AllocationResult allocation = allocator_.Allocate(
189 OLD_SPACE, object_size, AllocationOrigin::kGC, alignment);
190
191 HeapObject target;
192 if (allocation.To(&target)) {
193 DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
194 target));
195 const bool self_success = MigrateObject(map, object, target, object_size);
196 if (!self_success) {
197 allocator_.FreeLast(OLD_SPACE, target, object_size);
198 MapWord map_word = object.synchronized_map_word();
199 HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
200 DCHECK(!Heap::InFromPage(*slot));
201 return Heap::InToPage(*slot)
202 ? CopyAndForwardResult::SUCCESS_YOUNG_GENERATION
203 : CopyAndForwardResult::SUCCESS_OLD_GENERATION;
204 }
205 HeapObjectReference::Update(slot, target);
206 if (object_fields == ObjectFields::kMaybePointers) {
207 promotion_list_.PushRegularObject(target, object_size);
208 }
209 promoted_size_ += object_size;
210 return CopyAndForwardResult::SUCCESS_OLD_GENERATION;
211 }
212 return CopyAndForwardResult::FAILURE;
213 }
214
RememberedSetEntryNeeded(CopyAndForwardResult result)215 SlotCallbackResult Scavenger::RememberedSetEntryNeeded(
216 CopyAndForwardResult result) {
217 DCHECK_NE(CopyAndForwardResult::FAILURE, result);
218 return result == CopyAndForwardResult::SUCCESS_YOUNG_GENERATION ? KEEP_SLOT
219 : REMOVE_SLOT;
220 }
221
HandleLargeObject(Map map,HeapObject object,int object_size,ObjectFields object_fields)222 bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
223 ObjectFields object_fields) {
224 // TODO(hpayer): Make this check size based, i.e.
225 // object_size > kMaxRegularHeapObjectSize
226 if (V8_UNLIKELY(
227 FLAG_young_generation_large_objects &&
228 BasicMemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
229 DCHECK_EQ(NEW_LO_SPACE,
230 MemoryChunk::FromHeapObject(object)->owner_identity());
231 if (object.release_compare_and_swap_map_word(
232 MapWord::FromMap(map), MapWord::FromForwardingAddress(object))) {
233 surviving_new_large_objects_.insert({object, map});
234 promoted_size_ += object_size;
235 if (object_fields == ObjectFields::kMaybePointers) {
236 promotion_list_.PushLargeObject(object, map, object_size);
237 }
238 }
239 return true;
240 }
241 return false;
242 }
243
244 template <typename THeapObjectSlot>
EvacuateObjectDefault(Map map,THeapObjectSlot slot,HeapObject object,int object_size,ObjectFields object_fields)245 SlotCallbackResult Scavenger::EvacuateObjectDefault(
246 Map map, THeapObjectSlot slot, HeapObject object, int object_size,
247 ObjectFields object_fields) {
248 static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
249 std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
250 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
251 SLOW_DCHECK(object.SizeFromMap(map) == object_size);
252 CopyAndForwardResult result;
253
254 if (HandleLargeObject(map, object, object_size, object_fields)) {
255 return KEEP_SLOT;
256 }
257
258 SLOW_DCHECK(static_cast<size_t>(object_size) <=
259 MemoryChunkLayout::AllocatableMemoryInDataPage());
260
261 if (!heap()->ShouldBePromoted(object.address())) {
262 // A semi-space copy may fail due to fragmentation. In that case, we
263 // try to promote the object.
264 result = SemiSpaceCopyObject(map, slot, object, object_size, object_fields);
265 if (result != CopyAndForwardResult::FAILURE) {
266 return RememberedSetEntryNeeded(result);
267 }
268 }
269
270 // We may want to promote this object if the object was already semi-space
271 // copied in a previes young generation GC or if the semi-space copy above
272 // failed.
273 result = PromoteObject(map, slot, object, object_size, object_fields);
274 if (result != CopyAndForwardResult::FAILURE) {
275 return RememberedSetEntryNeeded(result);
276 }
277
278 // If promotion failed, we try to copy the object to the other semi-space.
279 result = SemiSpaceCopyObject(map, slot, object, object_size, object_fields);
280 if (result != CopyAndForwardResult::FAILURE) {
281 return RememberedSetEntryNeeded(result);
282 }
283
284 heap()->FatalProcessOutOfMemory("Scavenger: semi-space copy");
285 UNREACHABLE();
286 }
287
288 template <typename THeapObjectSlot>
EvacuateThinString(Map map,THeapObjectSlot slot,ThinString object,int object_size)289 SlotCallbackResult Scavenger::EvacuateThinString(Map map, THeapObjectSlot slot,
290 ThinString object,
291 int object_size) {
292 static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
293 std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
294 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
295 if (!is_incremental_marking_) {
296 // The ThinString should die after Scavenge, so avoid writing the proper
297 // forwarding pointer and instead just signal the actual object as forwarded
298 // reference.
299 String actual = object.actual();
300 // ThinStrings always refer to internalized strings, which are always in old
301 // space.
302 DCHECK(!Heap::InYoungGeneration(actual));
303 HeapObjectReference::Update(slot, actual);
304 return REMOVE_SLOT;
305 }
306
307 DCHECK_EQ(ObjectFields::kMaybePointers,
308 Map::ObjectFieldsFrom(map.visitor_id()));
309 return EvacuateObjectDefault(map, slot, object, object_size,
310 ObjectFields::kMaybePointers);
311 }
312
313 template <typename THeapObjectSlot>
EvacuateShortcutCandidate(Map map,THeapObjectSlot slot,ConsString object,int object_size)314 SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
315 THeapObjectSlot slot,
316 ConsString object,
317 int object_size) {
318 static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
319 std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
320 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
321 DCHECK(IsShortcutCandidate(map.instance_type()));
322 if (!is_incremental_marking_ &&
323 object.unchecked_second() == ReadOnlyRoots(heap()).empty_string()) {
324 HeapObject first = HeapObject::cast(object.unchecked_first());
325
326 HeapObjectReference::Update(slot, first);
327
328 if (!Heap::InYoungGeneration(first)) {
329 object.synchronized_set_map_word(MapWord::FromForwardingAddress(first));
330 return REMOVE_SLOT;
331 }
332
333 MapWord first_word = first.synchronized_map_word();
334 if (first_word.IsForwardingAddress()) {
335 HeapObject target = first_word.ToForwardingAddress();
336
337 HeapObjectReference::Update(slot, target);
338 object.synchronized_set_map_word(MapWord::FromForwardingAddress(target));
339 return Heap::InYoungGeneration(target) ? KEEP_SLOT : REMOVE_SLOT;
340 }
341 Map map = first_word.ToMap();
342 SlotCallbackResult result =
343 EvacuateObjectDefault(map, slot, first, first.SizeFromMap(map),
344 Map::ObjectFieldsFrom(map.visitor_id()));
345 object.synchronized_set_map_word(
346 MapWord::FromForwardingAddress(slot.ToHeapObject()));
347 return result;
348 }
349 DCHECK_EQ(ObjectFields::kMaybePointers,
350 Map::ObjectFieldsFrom(map.visitor_id()));
351 return EvacuateObjectDefault(map, slot, object, object_size,
352 ObjectFields::kMaybePointers);
353 }
354
355 template <typename THeapObjectSlot>
EvacuateObject(THeapObjectSlot slot,Map map,HeapObject source)356 SlotCallbackResult Scavenger::EvacuateObject(THeapObjectSlot slot, Map map,
357 HeapObject source) {
358 static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
359 std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
360 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
361 SLOW_DCHECK(Heap::InFromPage(source));
362 SLOW_DCHECK(!MapWord::FromMap(map).IsForwardingAddress());
363 int size = source.SizeFromMap(map);
364 // Cannot use ::cast() below because that would add checks in debug mode
365 // that require re-reading the map.
366 VisitorId visitor_id = map.visitor_id();
367 switch (visitor_id) {
368 case kVisitThinString:
369 // At the moment we don't allow weak pointers to thin strings.
370 DCHECK(!(*slot)->IsWeak());
371 return EvacuateThinString(map, slot, ThinString::unchecked_cast(source),
372 size);
373 case kVisitShortcutCandidate:
374 DCHECK(!(*slot)->IsWeak());
375 // At the moment we don't allow weak pointers to cons strings.
376 return EvacuateShortcutCandidate(
377 map, slot, ConsString::unchecked_cast(source), size);
378 default:
379 return EvacuateObjectDefault(map, slot, source, size,
380 Map::ObjectFieldsFrom(visitor_id));
381 }
382 }
383
384 template <typename THeapObjectSlot>
ScavengeObject(THeapObjectSlot p,HeapObject object)385 SlotCallbackResult Scavenger::ScavengeObject(THeapObjectSlot p,
386 HeapObject object) {
387 static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
388 std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
389 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
390 DCHECK(Heap::InFromPage(object));
391
392 // Synchronized load that consumes the publishing CAS of MigrateObject.
393 MapWord first_word = object.synchronized_map_word();
394
395 // If the first word is a forwarding address, the object has already been
396 // copied.
397 if (first_word.IsForwardingAddress()) {
398 HeapObject dest = first_word.ToForwardingAddress();
399 HeapObjectReference::Update(p, dest);
400 DCHECK_IMPLIES(Heap::InYoungGeneration(dest),
401 Heap::InToPage(dest) || Heap::IsLargeObject(dest));
402
403 return Heap::InYoungGeneration(dest) ? KEEP_SLOT : REMOVE_SLOT;
404 }
405
406 Map map = first_word.ToMap();
407 // AllocationMementos are unrooted and shouldn't survive a scavenge
408 DCHECK_NE(ReadOnlyRoots(heap()).allocation_memento_map(), map);
409 // Call the slow part of scavenge object.
410 return EvacuateObject(p, map, object);
411 }
412
413 template <typename TSlot>
CheckAndScavengeObject(Heap * heap,TSlot slot)414 SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap, TSlot slot) {
415 static_assert(
416 std::is_same<TSlot, FullMaybeObjectSlot>::value ||
417 std::is_same<TSlot, MaybeObjectSlot>::value,
418 "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
419 using THeapObjectSlot = typename TSlot::THeapObjectSlot;
420 MaybeObject object = *slot;
421 if (Heap::InFromPage(object)) {
422 HeapObject heap_object = object->GetHeapObject();
423
424 SlotCallbackResult result =
425 ScavengeObject(THeapObjectSlot(slot), heap_object);
426 DCHECK_IMPLIES(result == REMOVE_SLOT,
427 !heap->InYoungGeneration((*slot)->GetHeapObject()));
428 return result;
429 } else if (Heap::InToPage(object)) {
430 // Already updated slot. This can happen when processing of the work list
431 // is interleaved with processing roots.
432 return KEEP_SLOT;
433 }
434 // Slots can point to "to" space if the slot has been recorded multiple
435 // times in the remembered set. We remove the redundant slot now.
436 return REMOVE_SLOT;
437 }
438
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)439 void ScavengeVisitor::VisitPointers(HeapObject host, ObjectSlot start,
440 ObjectSlot end) {
441 return VisitPointersImpl(host, start, end);
442 }
443
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)444 void ScavengeVisitor::VisitPointers(HeapObject host, MaybeObjectSlot start,
445 MaybeObjectSlot end) {
446 return VisitPointersImpl(host, start, end);
447 }
448
VisitCodeTarget(Code host,RelocInfo * rinfo)449 void ScavengeVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
450 Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
451 #ifdef DEBUG
452 Code old_target = target;
453 #endif
454 FullObjectSlot slot(&target);
455 VisitHeapObjectImpl(slot, target);
456 // Code objects are never in new-space, so the slot contents must not change.
457 DCHECK_EQ(old_target, target);
458 }
459
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)460 void ScavengeVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
461 HeapObject heap_object = rinfo->target_object();
462 #ifdef DEBUG
463 HeapObject old_heap_object = heap_object;
464 #endif
465 FullObjectSlot slot(&heap_object);
466 VisitHeapObjectImpl(slot, heap_object);
467 // We don't embed new-space objects into code, so the slot contents must not
468 // change.
469 DCHECK_EQ(old_heap_object, heap_object);
470 }
471
472 template <typename TSlot>
VisitHeapObjectImpl(TSlot slot,HeapObject heap_object)473 void ScavengeVisitor::VisitHeapObjectImpl(TSlot slot, HeapObject heap_object) {
474 if (Heap::InYoungGeneration(heap_object)) {
475 using THeapObjectSlot = typename TSlot::THeapObjectSlot;
476 scavenger_->ScavengeObject(THeapObjectSlot(slot), heap_object);
477 }
478 }
479
480 template <typename TSlot>
VisitPointersImpl(HeapObject host,TSlot start,TSlot end)481 void ScavengeVisitor::VisitPointersImpl(HeapObject host, TSlot start,
482 TSlot end) {
483 for (TSlot slot = start; slot < end; ++slot) {
484 typename TSlot::TObject object = *slot;
485 HeapObject heap_object;
486 // Treat weak references as strong.
487 if (object.GetHeapObject(&heap_object)) {
488 VisitHeapObjectImpl(slot, heap_object);
489 }
490 }
491 }
492
VisitJSArrayBuffer(Map map,JSArrayBuffer object)493 int ScavengeVisitor::VisitJSArrayBuffer(Map map, JSArrayBuffer object) {
494 object.YoungMarkExtension();
495 int size = JSArrayBuffer::BodyDescriptor::SizeOf(map, object);
496 JSArrayBuffer::BodyDescriptor::IterateBody(map, object, size, this);
497 return size;
498 }
499
VisitEphemeronHashTable(Map map,EphemeronHashTable table)500 int ScavengeVisitor::VisitEphemeronHashTable(Map map,
501 EphemeronHashTable table) {
502 // Register table with the scavenger, so it can take care of the weak keys
503 // later. This allows to only iterate the tables' values, which are treated
504 // as strong independetly of whether the key is live.
505 scavenger_->AddEphemeronHashTable(table);
506 for (InternalIndex i : table.IterateEntries()) {
507 ObjectSlot value_slot =
508 table.RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
509 VisitPointer(table, value_slot);
510 }
511
512 return table.SizeFromMap(map);
513 }
514
515 } // namespace internal
516 } // namespace v8
517
518 #endif // V8_HEAP_SCAVENGER_INL_H_
519