1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/scavenger.h"
6
7 #include "src/heap/array-buffer-sweeper.h"
8 #include "src/heap/barrier.h"
9 #include "src/heap/gc-tracer.h"
10 #include "src/heap/heap-inl.h"
11 #include "src/heap/invalidated-slots-inl.h"
12 #include "src/heap/item-parallel-job.h"
13 #include "src/heap/mark-compact-inl.h"
14 #include "src/heap/memory-chunk-inl.h"
15 #include "src/heap/objects-visiting-inl.h"
16 #include "src/heap/remembered-set-inl.h"
17 #include "src/heap/scavenger-inl.h"
18 #include "src/heap/sweeper.h"
19 #include "src/objects/data-handler-inl.h"
20 #include "src/objects/embedder-data-array-inl.h"
21 #include "src/objects/objects-body-descriptors-inl.h"
22 #include "src/objects/transitions-inl.h"
23 #include "src/utils/utils-inl.h"
24
25 namespace v8 {
26 namespace internal {
27
28 class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
29 public:
IterateAndScavengePromotedObjectsVisitor(Scavenger * scavenger,bool record_slots)30 IterateAndScavengePromotedObjectsVisitor(Scavenger* scavenger,
31 bool record_slots)
32 : scavenger_(scavenger), record_slots_(record_slots) {}
33
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)34 V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
35 ObjectSlot end) final {
36 VisitPointersImpl(host, start, end);
37 }
38
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)39 V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
40 MaybeObjectSlot end) final {
41 VisitPointersImpl(host, start, end);
42 }
43
VisitCodeTarget(Code host,RelocInfo * rinfo)44 V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
45 Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
46 HandleSlot(host, FullHeapObjectSlot(&target), target);
47 }
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)48 V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
49 HeapObject heap_object = rinfo->target_object();
50 HandleSlot(host, FullHeapObjectSlot(&heap_object), heap_object);
51 }
52
VisitEphemeron(HeapObject obj,int entry,ObjectSlot key,ObjectSlot value)53 inline void VisitEphemeron(HeapObject obj, int entry, ObjectSlot key,
54 ObjectSlot value) override {
55 DCHECK(Heap::IsLargeObject(obj) || obj.IsEphemeronHashTable());
56 VisitPointer(obj, value);
57
58 if (ObjectInYoungGeneration(*key)) {
59 // We cannot check the map here, as it might be a large object.
60 scavenger_->RememberPromotedEphemeron(
61 EphemeronHashTable::unchecked_cast(obj), entry);
62 } else {
63 VisitPointer(obj, key);
64 }
65 }
66
67 private:
68 template <typename TSlot>
VisitPointersImpl(HeapObject host,TSlot start,TSlot end)69 V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
70 using THeapObjectSlot = typename TSlot::THeapObjectSlot;
71 // Treat weak references as strong.
72 // TODO(marja): Proper weakness handling in the young generation.
73 for (TSlot slot = start; slot < end; ++slot) {
74 typename TSlot::TObject object = *slot;
75 HeapObject heap_object;
76 if (object.GetHeapObject(&heap_object)) {
77 HandleSlot(host, THeapObjectSlot(slot), heap_object);
78 }
79 }
80 }
81
82 template <typename THeapObjectSlot>
HandleSlot(HeapObject host,THeapObjectSlot slot,HeapObject target)83 V8_INLINE void HandleSlot(HeapObject host, THeapObjectSlot slot,
84 HeapObject target) {
85 static_assert(
86 std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
87 std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
88 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
89 scavenger_->PageMemoryFence(MaybeObject::FromObject(target));
90
91 if (Heap::InFromPage(target)) {
92 SlotCallbackResult result = scavenger_->ScavengeObject(slot, target);
93 bool success = (*slot)->GetHeapObject(&target);
94 USE(success);
95 DCHECK(success);
96
97 if (result == KEEP_SLOT) {
98 SLOW_DCHECK(target.IsHeapObject());
99 MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
100
101 // Sweeper is stopped during scavenge, so we can directly
102 // insert into its remembered set here.
103 if (chunk->sweeping_slot_set()) {
104 RememberedSetSweeping::Insert<AccessMode::ATOMIC>(chunk,
105 slot.address());
106 } else {
107 RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(chunk,
108 slot.address());
109 }
110 }
111 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
112 HeapObject::cast(target)));
113 } else if (record_slots_ && MarkCompactCollector::IsOnEvacuationCandidate(
114 HeapObject::cast(target))) {
115 // We should never try to record off-heap slots.
116 DCHECK((std::is_same<THeapObjectSlot, HeapObjectSlot>::value));
117 // We cannot call MarkCompactCollector::RecordSlot because that checks
118 // that the host page is not in young generation, which does not hold
119 // for pending large pages.
120 RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(
121 MemoryChunk::FromHeapObject(host), slot.address());
122 }
123 }
124
125 Scavenger* const scavenger_;
126 const bool record_slots_;
127 };
128
129 namespace {
130
IsUnscavengedHeapObject(Heap * heap,Object object)131 V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, Object object) {
132 return Heap::InFromPage(object) &&
133 !HeapObject::cast(object).map_word().IsForwardingAddress();
134 }
135
136 // Same as IsUnscavengedHeapObject() above but specialized for HeapObjects.
IsUnscavengedHeapObject(Heap * heap,HeapObject heap_object)137 V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, HeapObject heap_object) {
138 return Heap::InFromPage(heap_object) &&
139 !heap_object.map_word().IsForwardingAddress();
140 }
141
IsUnscavengedHeapObjectSlot(Heap * heap,FullObjectSlot p)142 bool IsUnscavengedHeapObjectSlot(Heap* heap, FullObjectSlot p) {
143 return IsUnscavengedHeapObject(heap, *p);
144 }
145
146 } // namespace
147
148 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
149 public:
RetainAs(Object object)150 Object RetainAs(Object object) override {
151 if (!Heap::InFromPage(object)) {
152 return object;
153 }
154
155 MapWord map_word = HeapObject::cast(object).map_word();
156 if (map_word.IsForwardingAddress()) {
157 return map_word.ToForwardingAddress();
158 }
159 return Object();
160 }
161 };
162
JobTask(ScavengerCollector * outer,std::vector<std::unique_ptr<Scavenger>> * scavengers,std::vector<std::pair<ParallelWorkItem,MemoryChunk * >> memory_chunks,Scavenger::CopiedList * copied_list,Scavenger::PromotionList * promotion_list)163 ScavengerCollector::JobTask::JobTask(
164 ScavengerCollector* outer,
165 std::vector<std::unique_ptr<Scavenger>>* scavengers,
166 std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks,
167 Scavenger::CopiedList* copied_list,
168 Scavenger::PromotionList* promotion_list)
169 : outer_(outer),
170 scavengers_(scavengers),
171 memory_chunks_(std::move(memory_chunks)),
172 remaining_memory_chunks_(memory_chunks_.size()),
173 generator_(memory_chunks_.size()),
174 copied_list_(copied_list),
175 promotion_list_(promotion_list) {}
176
Run(JobDelegate * delegate)177 void ScavengerCollector::JobTask::Run(JobDelegate* delegate) {
178 DCHECK_LT(delegate->GetTaskId(), scavengers_->size());
179 Scavenger* scavenger = (*scavengers_)[delegate->GetTaskId()].get();
180 if (delegate->IsJoiningThread()) {
181 TRACE_GC(outer_->heap_->tracer(),
182 GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
183 ProcessItems(delegate, scavenger);
184 } else {
185 TRACE_BACKGROUND_GC(
186 outer_->heap_->tracer(),
187 GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
188 ProcessItems(delegate, scavenger);
189 }
190 }
191
GetMaxConcurrency(size_t worker_count) const192 size_t ScavengerCollector::JobTask::GetMaxConcurrency(
193 size_t worker_count) const {
194 // We need to account for local segments held by worker_count in addition to
195 // GlobalPoolSize() of copied_list_ and promotion_list_.
196 return std::min<size_t>(
197 scavengers_->size(),
198 std::max<size_t>(remaining_memory_chunks_.load(std::memory_order_relaxed),
199 worker_count + copied_list_->GlobalPoolSize() +
200 promotion_list_->GlobalPoolSize()));
201 }
202
ProcessItems(JobDelegate * delegate,Scavenger * scavenger)203 void ScavengerCollector::JobTask::ProcessItems(JobDelegate* delegate,
204 Scavenger* scavenger) {
205 double scavenging_time = 0.0;
206 {
207 TimedScope scope(&scavenging_time);
208 ConcurrentScavengePages(scavenger);
209 scavenger->Process(delegate);
210 }
211 if (FLAG_trace_parallel_scavenge) {
212 PrintIsolate(outer_->heap_->isolate(),
213 "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
214 static_cast<void*>(this), scavenging_time,
215 scavenger->bytes_copied(), scavenger->bytes_promoted());
216 }
217 }
218
ConcurrentScavengePages(Scavenger * scavenger)219 void ScavengerCollector::JobTask::ConcurrentScavengePages(
220 Scavenger* scavenger) {
221 while (remaining_memory_chunks_.load(std::memory_order_relaxed) > 0) {
222 base::Optional<size_t> index = generator_.GetNext();
223 if (!index) return;
224 for (size_t i = *index; i < memory_chunks_.size(); ++i) {
225 auto& work_item = memory_chunks_[i];
226 if (!work_item.first.TryAcquire()) break;
227 scavenger->ScavengePage(work_item.second);
228 if (remaining_memory_chunks_.fetch_sub(1, std::memory_order_relaxed) <=
229 1) {
230 return;
231 }
232 }
233 }
234 }
235
ScavengerCollector(Heap * heap)236 ScavengerCollector::ScavengerCollector(Heap* heap)
237 : isolate_(heap->isolate()), heap_(heap) {}
238
239 // Remove this crashkey after chromium:1010312 is fixed.
240 class ScopedFullHeapCrashKey {
241 public:
ScopedFullHeapCrashKey(Isolate * isolate)242 explicit ScopedFullHeapCrashKey(Isolate* isolate) : isolate_(isolate) {
243 isolate_->AddCrashKey(v8::CrashKeyId::kDumpType, "heap");
244 }
~ScopedFullHeapCrashKey()245 ~ScopedFullHeapCrashKey() {
246 isolate_->AddCrashKey(v8::CrashKeyId::kDumpType, "");
247 }
248
249 private:
250 Isolate* isolate_ = nullptr;
251 };
252
CollectGarbage()253 void ScavengerCollector::CollectGarbage() {
254 ScopedFullHeapCrashKey collect_full_heap_dump_if_crash(isolate_);
255
256 {
257 TRACE_GC(heap_->tracer(),
258 GCTracer::Scope::SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS);
259 heap_->array_buffer_sweeper()->EnsureFinished();
260 }
261
262 DCHECK(surviving_new_large_objects_.empty());
263 std::vector<std::unique_ptr<Scavenger>> scavengers;
264 Worklist<MemoryChunk*, 64> empty_chunks;
265 const int num_scavenge_tasks = NumberOfScavengeTasks();
266 Scavenger::CopiedList copied_list(num_scavenge_tasks);
267 Scavenger::PromotionList promotion_list(num_scavenge_tasks);
268 EphemeronTableList ephemeron_table_list(num_scavenge_tasks);
269
270 {
271 Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
272
273 // Try to finish sweeping here, such that the following code doesn't need to
274 // pause & resume sweeping.
275 if (sweeper->sweeping_in_progress() && FLAG_concurrent_sweeping &&
276 !sweeper->AreSweeperTasksRunning()) {
277 // At this point we know that all concurrent sweeping tasks have run
278 // out-of-work and quit: all pages are swept. The main thread still needs
279 // to complete sweeping though.
280 heap_->mark_compact_collector()->EnsureSweepingCompleted();
281 }
282
283 // Pause the concurrent sweeper.
284 Sweeper::PauseOrCompleteScope pause_scope(sweeper);
285 // Filter out pages from the sweeper that need to be processed for old to
286 // new slots by the Scavenger. After processing, the Scavenger adds back
287 // pages that are still unsweeped. This way the Scavenger has exclusive
288 // access to the slots of a page and can completely avoid any locks on
289 // the page itself.
290 Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
291 filter_scope.FilterOldSpaceSweepingPages([](Page* page) {
292 return !page->ContainsSlots<OLD_TO_NEW>() && !page->sweeping_slot_set();
293 });
294
295 const bool is_logging = isolate_->LogObjectRelocation();
296 for (int i = 0; i < num_scavenge_tasks; ++i) {
297 scavengers.emplace_back(
298 new Scavenger(this, heap_, is_logging, &empty_chunks, &copied_list,
299 &promotion_list, &ephemeron_table_list, i));
300 }
301
302 std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks;
303 RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
304 heap_, [&memory_chunks](MemoryChunk* chunk) {
305 memory_chunks.emplace_back(ParallelWorkItem{}, chunk);
306 });
307
308 RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId].get());
309
310 {
311 // Identify weak unmodified handles. Requires an unmodified graph.
312 TRACE_GC(
313 heap_->tracer(),
314 GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
315 isolate_->global_handles()->IdentifyWeakUnmodifiedObjects(
316 &JSObject::IsUnmodifiedApiObject);
317 }
318 {
319 // Copy roots.
320 TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
321 // Scavenger treats all weak roots except for global handles as strong.
322 // That is why we don't set skip_weak = true here and instead visit
323 // global handles separately.
324 base::EnumSet<SkipRoot> options({SkipRoot::kExternalStringTable,
325 SkipRoot::kGlobalHandles,
326 SkipRoot::kOldGeneration});
327 if (V8_UNLIKELY(FLAG_scavenge_separate_stack_scanning)) {
328 options.Add(SkipRoot::kStack);
329 }
330 heap_->IterateRoots(&root_scavenge_visitor, options);
331 isolate_->global_handles()->IterateYoungStrongAndDependentRoots(
332 &root_scavenge_visitor);
333 scavengers[kMainThreadId]->Flush();
334 }
335 {
336 // Parallel phase scavenging all copied and promoted objects.
337 TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
338 V8::GetCurrentPlatform()
339 ->PostJob(v8::TaskPriority::kUserBlocking,
340 std::make_unique<JobTask>(this, &scavengers,
341 std::move(memory_chunks),
342 &copied_list, &promotion_list))
343 ->Join();
344 DCHECK(copied_list.IsEmpty());
345 DCHECK(promotion_list.IsEmpty());
346 }
347
348 if (V8_UNLIKELY(FLAG_scavenge_separate_stack_scanning)) {
349 IterateStackAndScavenge(&root_scavenge_visitor, &scavengers,
350 kMainThreadId);
351 DCHECK(copied_list.IsEmpty());
352 DCHECK(promotion_list.IsEmpty());
353 }
354
355 {
356 // Scavenge weak global handles.
357 TRACE_GC(heap_->tracer(),
358 GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
359 isolate_->global_handles()->MarkYoungWeakDeadObjectsPending(
360 &IsUnscavengedHeapObjectSlot);
361 isolate_->global_handles()->IterateYoungWeakDeadObjectsForFinalizers(
362 &root_scavenge_visitor);
363 scavengers[kMainThreadId]->Process();
364
365 DCHECK(copied_list.IsEmpty());
366 DCHECK(promotion_list.IsEmpty());
367 isolate_->global_handles()->IterateYoungWeakObjectsForPhantomHandles(
368 &root_scavenge_visitor, &IsUnscavengedHeapObjectSlot);
369 }
370
371 {
372 // Finalize parallel scavenging.
373 TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_FINALIZE);
374
375 DCHECK(surviving_new_large_objects_.empty());
376
377 for (auto& scavenger : scavengers) {
378 scavenger->Finalize();
379 }
380 scavengers.clear();
381
382 HandleSurvivingNewLargeObjects();
383 }
384 }
385
386 {
387 // Update references into new space
388 TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_UPDATE_REFS);
389 heap_->UpdateYoungReferencesInExternalStringTable(
390 &Heap::UpdateYoungReferenceInExternalStringTableEntry);
391
392 heap_->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
393 }
394
395 if (FLAG_concurrent_marking) {
396 // Ensure that concurrent marker does not track pages that are
397 // going to be unmapped.
398 for (Page* p :
399 PageRange(heap_->new_space()->from_space().first_page(), nullptr)) {
400 heap_->concurrent_marking()->ClearMemoryChunkData(p);
401 }
402 }
403
404 ProcessWeakReferences(&ephemeron_table_list);
405
406 // Set age mark.
407 heap_->new_space_->set_age_mark(heap_->new_space()->top());
408
409 // Since we promote all surviving large objects immediatelly, all remaining
410 // large objects must be dead.
411 // TODO(hpayer): Don't free all as soon as we have an intermediate generation.
412 heap_->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
413
414 {
415 TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_FREE_REMEMBERED_SET);
416 MemoryChunk* chunk;
417
418 while (empty_chunks.Pop(kMainThreadId, &chunk)) {
419 RememberedSet<OLD_TO_NEW>::CheckPossiblyEmptyBuckets(chunk);
420 }
421
422 #ifdef DEBUG
423 RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
424 heap_, [](MemoryChunk* chunk) {
425 DCHECK(chunk->possibly_empty_buckets()->IsEmpty());
426 });
427 #endif
428 }
429
430 {
431 TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SWEEP_ARRAY_BUFFERS);
432 SweepArrayBufferExtensions();
433 }
434
435 // Update how much has survived scavenge.
436 heap_->IncrementYoungSurvivorsCounter(heap_->SurvivedYoungObjectSize());
437 }
438
IterateStackAndScavenge(RootScavengeVisitor * root_scavenge_visitor,std::vector<std::unique_ptr<Scavenger>> * scavengers,int main_thread_id)439 void ScavengerCollector::IterateStackAndScavenge(
440
441 RootScavengeVisitor* root_scavenge_visitor,
442 std::vector<std::unique_ptr<Scavenger>>* scavengers, int main_thread_id) {
443 // Scan the stack, scavenge the newly discovered objects, and report
444 // the survival statistics before and afer the stack scanning.
445 // This code is not intended for production.
446 TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_STACK_ROOTS);
447 size_t survived_bytes_before = 0;
448 for (auto& scavenger : *scavengers) {
449 survived_bytes_before +=
450 scavenger->bytes_copied() + scavenger->bytes_promoted();
451 }
452 heap_->IterateStackRoots(root_scavenge_visitor);
453 (*scavengers)[main_thread_id]->Process();
454 size_t survived_bytes_after = 0;
455 for (auto& scavenger : *scavengers) {
456 survived_bytes_after +=
457 scavenger->bytes_copied() + scavenger->bytes_promoted();
458 }
459 TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
460 "V8.GCScavengerStackScanning", "survived_bytes_before",
461 survived_bytes_before, "survived_bytes_after",
462 survived_bytes_after);
463 if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
464 isolate_->PrintWithTimestamp(
465 "Scavenge stack scanning: survived_before=%4zuKB, "
466 "survived_after=%4zuKB delta=%.1f%%\n",
467 survived_bytes_before / KB, survived_bytes_after / KB,
468 (survived_bytes_after - survived_bytes_before) * 100.0 /
469 survived_bytes_after);
470 }
471 }
472
SweepArrayBufferExtensions()473 void ScavengerCollector::SweepArrayBufferExtensions() {
474 heap_->array_buffer_sweeper()->RequestSweepYoung();
475 }
476
HandleSurvivingNewLargeObjects()477 void ScavengerCollector::HandleSurvivingNewLargeObjects() {
478 for (SurvivingNewLargeObjectMapEntry update_info :
479 surviving_new_large_objects_) {
480 HeapObject object = update_info.first;
481 Map map = update_info.second;
482 // Order is important here. We have to re-install the map to have access
483 // to meta-data like size during page promotion.
484 object.set_map_word(MapWord::FromMap(map));
485 LargePage* page = LargePage::FromHeapObject(object);
486 heap_->lo_space()->PromoteNewLargeObject(page);
487 }
488 surviving_new_large_objects_.clear();
489 }
490
MergeSurvivingNewLargeObjects(const SurvivingNewLargeObjectsMap & objects)491 void ScavengerCollector::MergeSurvivingNewLargeObjects(
492 const SurvivingNewLargeObjectsMap& objects) {
493 for (SurvivingNewLargeObjectMapEntry object : objects) {
494 bool success = surviving_new_large_objects_.insert(object).second;
495 USE(success);
496 DCHECK(success);
497 }
498 }
499
NumberOfScavengeTasks()500 int ScavengerCollector::NumberOfScavengeTasks() {
501 if (!FLAG_parallel_scavenge) return 1;
502 const int num_scavenge_tasks =
503 static_cast<int>(heap_->new_space()->TotalCapacity()) / MB + 1;
504 static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
505 int tasks =
506 Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
507 if (!heap_->CanPromoteYoungAndExpandOldGeneration(
508 static_cast<size_t>(tasks * Page::kPageSize))) {
509 // Optimize for memory usage near the heap limit.
510 tasks = 1;
511 }
512 return tasks;
513 }
514
Scavenger(ScavengerCollector * collector,Heap * heap,bool is_logging,Worklist<MemoryChunk *,64> * empty_chunks,CopiedList * copied_list,PromotionList * promotion_list,EphemeronTableList * ephemeron_table_list,int task_id)515 Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
516 Worklist<MemoryChunk*, 64>* empty_chunks,
517 CopiedList* copied_list, PromotionList* promotion_list,
518 EphemeronTableList* ephemeron_table_list, int task_id)
519 : collector_(collector),
520 heap_(heap),
521 empty_chunks_(empty_chunks, task_id),
522 promotion_list_(promotion_list, task_id),
523 copied_list_(copied_list, task_id),
524 ephemeron_table_list_(ephemeron_table_list, task_id),
525 local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
526 copied_size_(0),
527 promoted_size_(0),
528 allocator_(heap, LocalSpaceKind::kCompactionSpaceForScavenge),
529 is_logging_(is_logging),
530 is_incremental_marking_(heap->incremental_marking()->IsMarking()),
531 is_compacting_(heap->incremental_marking()->IsCompacting()) {}
532
IterateAndScavengePromotedObject(HeapObject target,Map map,int size)533 void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
534 int size) {
535 // We are not collecting slots on new space objects during mutation thus we
536 // have to scan for pointers to evacuation candidates when we promote
537 // objects. But we should not record any slots in non-black objects. Grey
538 // object's slots would be rescanned. White object might not survive until
539 // the end of collection it would be a violation of the invariant to record
540 // its slots.
541 const bool record_slots =
542 is_compacting_ &&
543 heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
544
545 IterateAndScavengePromotedObjectsVisitor visitor(this, record_slots);
546 target.IterateBodyFast(map, size, &visitor);
547
548 if (map.IsJSArrayBufferMap()) {
549 DCHECK(!BasicMemoryChunk::FromHeapObject(target)->IsLargePage());
550 JSArrayBuffer::cast(target).YoungMarkExtensionPromoted();
551 }
552 }
553
RememberPromotedEphemeron(EphemeronHashTable table,int entry)554 void Scavenger::RememberPromotedEphemeron(EphemeronHashTable table, int entry) {
555 auto indices =
556 ephemeron_remembered_set_.insert({table, std::unordered_set<int>()});
557 indices.first->second.insert(entry);
558 }
559
AddPageToSweeperIfNecessary(MemoryChunk * page)560 void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
561 AllocationSpace space = page->owner_identity();
562 if ((space == OLD_SPACE) && !page->SweepingDone()) {
563 heap()->mark_compact_collector()->sweeper()->AddPage(
564 space, reinterpret_cast<Page*>(page),
565 Sweeper::READD_TEMPORARY_REMOVED_PAGE);
566 }
567 }
568
ScavengePage(MemoryChunk * page)569 void Scavenger::ScavengePage(MemoryChunk* page) {
570 CodePageMemoryModificationScope memory_modification_scope(page);
571
572 if (page->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
573 InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
574 RememberedSet<OLD_TO_NEW>::IterateAndTrackEmptyBuckets(
575 page,
576 [this, &filter](MaybeObjectSlot slot) {
577 if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
578 return CheckAndScavengeObject(heap_, slot);
579 },
580 empty_chunks_);
581 }
582
583 if (page->sweeping_slot_set<AccessMode::NON_ATOMIC>() != nullptr) {
584 InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
585 RememberedSetSweeping::Iterate(
586 page,
587 [this, &filter](MaybeObjectSlot slot) {
588 if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
589 return CheckAndScavengeObject(heap_, slot);
590 },
591 SlotSet::KEEP_EMPTY_BUCKETS);
592 }
593
594 if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
595 // The invalidated slots are not needed after old-to-new slots were
596 // processed.
597 page->ReleaseInvalidatedSlots<OLD_TO_NEW>();
598 }
599
600 RememberedSet<OLD_TO_NEW>::IterateTyped(
601 page, [=](SlotType type, Address addr) {
602 return UpdateTypedSlotHelper::UpdateTypedSlot(
603 heap_, type, addr, [this](FullMaybeObjectSlot slot) {
604 return CheckAndScavengeObject(heap(), slot);
605 });
606 });
607
608 AddPageToSweeperIfNecessary(page);
609 }
610
Process(JobDelegate * delegate)611 void Scavenger::Process(JobDelegate* delegate) {
612 ScavengeVisitor scavenge_visitor(this);
613
614 bool done;
615 size_t objects = 0;
616 do {
617 done = true;
618 ObjectAndSize object_and_size;
619 while (promotion_list_.ShouldEagerlyProcessPromotionList() &&
620 copied_list_.Pop(&object_and_size)) {
621 scavenge_visitor.Visit(object_and_size.first);
622 done = false;
623 if (delegate && ((++objects % kInterruptThreshold) == 0)) {
624 if (!copied_list_.IsGlobalPoolEmpty()) {
625 delegate->NotifyConcurrencyIncrease();
626 }
627 }
628 }
629
630 struct PromotionListEntry entry;
631 while (promotion_list_.Pop(&entry)) {
632 HeapObject target = entry.heap_object;
633 IterateAndScavengePromotedObject(target, entry.map, entry.size);
634 done = false;
635 if (delegate && ((++objects % kInterruptThreshold) == 0)) {
636 if (!promotion_list_.IsGlobalPoolEmpty()) {
637 delegate->NotifyConcurrencyIncrease();
638 }
639 }
640 }
641 } while (!done);
642 }
643
ProcessWeakReferences(EphemeronTableList * ephemeron_table_list)644 void ScavengerCollector::ProcessWeakReferences(
645 EphemeronTableList* ephemeron_table_list) {
646 ScavengeWeakObjectRetainer weak_object_retainer;
647 heap_->ProcessYoungWeakReferences(&weak_object_retainer);
648 ClearYoungEphemerons(ephemeron_table_list);
649 ClearOldEphemerons();
650 }
651
652 // Clear ephemeron entries from EphemeronHashTables in new-space whenever the
653 // entry has a dead new-space key.
ClearYoungEphemerons(EphemeronTableList * ephemeron_table_list)654 void ScavengerCollector::ClearYoungEphemerons(
655 EphemeronTableList* ephemeron_table_list) {
656 ephemeron_table_list->Iterate([this](EphemeronHashTable table) {
657 for (InternalIndex i : table.IterateEntries()) {
658 // Keys in EphemeronHashTables must be heap objects.
659 HeapObjectSlot key_slot(
660 table.RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i)));
661 HeapObject key = key_slot.ToHeapObject();
662 if (IsUnscavengedHeapObject(heap_, key)) {
663 table.RemoveEntry(i);
664 } else {
665 HeapObject forwarded = ForwardingAddress(key);
666 key_slot.StoreHeapObject(forwarded);
667 }
668 }
669 });
670 ephemeron_table_list->Clear();
671 }
672
673 // Clear ephemeron entries from EphemeronHashTables in old-space whenever the
674 // entry has a dead new-space key.
ClearOldEphemerons()675 void ScavengerCollector::ClearOldEphemerons() {
676 for (auto it = heap_->ephemeron_remembered_set_.begin();
677 it != heap_->ephemeron_remembered_set_.end();) {
678 EphemeronHashTable table = it->first;
679 auto& indices = it->second;
680 for (auto iti = indices.begin(); iti != indices.end();) {
681 // Keys in EphemeronHashTables must be heap objects.
682 HeapObjectSlot key_slot(table.RawFieldOfElementAt(
683 EphemeronHashTable::EntryToIndex(InternalIndex(*iti))));
684 HeapObject key = key_slot.ToHeapObject();
685 if (IsUnscavengedHeapObject(heap_, key)) {
686 table.RemoveEntry(InternalIndex(*iti));
687 iti = indices.erase(iti);
688 } else {
689 HeapObject forwarded = ForwardingAddress(key);
690 key_slot.StoreHeapObject(forwarded);
691 if (!Heap::InYoungGeneration(forwarded)) {
692 iti = indices.erase(iti);
693 } else {
694 ++iti;
695 }
696 }
697 }
698
699 if (indices.size() == 0) {
700 it = heap_->ephemeron_remembered_set_.erase(it);
701 } else {
702 ++it;
703 }
704 }
705 }
706
Finalize()707 void Scavenger::Finalize() {
708 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
709 heap()->IncrementSemiSpaceCopiedObjectSize(copied_size_);
710 heap()->IncrementPromotedObjectsSize(promoted_size_);
711 collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
712 allocator_.Finalize();
713 empty_chunks_.FlushToGlobal();
714 ephemeron_table_list_.FlushToGlobal();
715 for (auto it = ephemeron_remembered_set_.begin();
716 it != ephemeron_remembered_set_.end(); ++it) {
717 auto insert_result = heap()->ephemeron_remembered_set_.insert(
718 {it->first, std::unordered_set<int>()});
719 for (int entry : it->second) {
720 insert_result.first->second.insert(entry);
721 }
722 }
723 }
724
Flush()725 void Scavenger::Flush() {
726 copied_list_.FlushToGlobal();
727 promotion_list_.FlushToGlobal();
728 }
729
AddEphemeronHashTable(EphemeronHashTable table)730 void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
731 ephemeron_table_list_.Push(table);
732 }
733
VisitRootPointer(Root root,const char * description,FullObjectSlot p)734 void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
735 FullObjectSlot p) {
736 DCHECK(!HasWeakHeapObjectTag(*p));
737 ScavengePointer(p);
738 }
739
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)740 void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
741 FullObjectSlot start,
742 FullObjectSlot end) {
743 // Copy all HeapObject pointers in [start, end)
744 for (FullObjectSlot p = start; p < end; ++p) ScavengePointer(p);
745 }
746
ScavengePointer(FullObjectSlot p)747 void RootScavengeVisitor::ScavengePointer(FullObjectSlot p) {
748 Object object = *p;
749 DCHECK(!HasWeakHeapObjectTag(object));
750 if (Heap::InYoungGeneration(object)) {
751 scavenger_->ScavengeObject(FullHeapObjectSlot(p), HeapObject::cast(object));
752 }
753 }
754
RootScavengeVisitor(Scavenger * scavenger)755 RootScavengeVisitor::RootScavengeVisitor(Scavenger* scavenger)
756 : scavenger_(scavenger) {}
757
ScavengeVisitor(Scavenger * scavenger)758 ScavengeVisitor::ScavengeVisitor(Scavenger* scavenger)
759 : scavenger_(scavenger) {}
760
761 } // namespace internal
762 } // namespace v8
763