• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/sweeper.h"
6 
7 #include "src/execution/vm-state-inl.h"
8 #include "src/heap/code-object-registry.h"
9 #include "src/heap/free-list-inl.h"
10 #include "src/heap/gc-tracer.h"
11 #include "src/heap/invalidated-slots-inl.h"
12 #include "src/heap/mark-compact-inl.h"
13 #include "src/heap/remembered-set.h"
14 #include "src/objects/objects-inl.h"
15 
16 namespace v8 {
17 namespace internal {
18 
Sweeper(Heap * heap,MajorNonAtomicMarkingState * marking_state)19 Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
20     : heap_(heap),
21       marking_state_(marking_state),
22       num_tasks_(0),
23       pending_sweeper_tasks_semaphore_(0),
24       incremental_sweeper_pending_(false),
25       sweeping_in_progress_(false),
26       num_sweeping_tasks_(0),
27       stop_sweeper_tasks_(false),
28       iterability_task_semaphore_(0),
29       iterability_in_progress_(false),
30       iterability_task_started_(false),
31       should_reduce_memory_(false) {}
32 
PauseOrCompleteScope(Sweeper * sweeper)33 Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
34     : sweeper_(sweeper) {
35   sweeper_->stop_sweeper_tasks_ = true;
36   if (!sweeper_->sweeping_in_progress()) return;
37 
38   sweeper_->AbortAndWaitForTasks();
39 
40   // Complete sweeping if there's nothing more to do.
41   if (sweeper_->IsDoneSweeping()) {
42     sweeper_->heap_->mark_compact_collector()->EnsureSweepingCompleted();
43     DCHECK(!sweeper_->sweeping_in_progress());
44   } else {
45     // Unless sweeping is complete the flag still indicates that the sweeper
46     // is enabled. It just cannot use tasks anymore.
47     DCHECK(sweeper_->sweeping_in_progress());
48   }
49 }
50 
~PauseOrCompleteScope()51 Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
52   sweeper_->stop_sweeper_tasks_ = false;
53   if (!sweeper_->sweeping_in_progress()) return;
54 
55   sweeper_->StartSweeperTasks();
56 }
57 
FilterSweepingPagesScope(Sweeper * sweeper,const PauseOrCompleteScope & pause_or_complete_scope)58 Sweeper::FilterSweepingPagesScope::FilterSweepingPagesScope(
59     Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope)
60     : sweeper_(sweeper),
61       pause_or_complete_scope_(pause_or_complete_scope),
62       sweeping_in_progress_(sweeper_->sweeping_in_progress()) {
63   USE(pause_or_complete_scope_);
64   if (!sweeping_in_progress_) return;
65 
66   int old_space_index = GetSweepSpaceIndex(OLD_SPACE);
67   old_space_sweeping_list_ =
68       std::move(sweeper_->sweeping_list_[old_space_index]);
69   sweeper_->sweeping_list_[old_space_index].clear();
70 }
71 
~FilterSweepingPagesScope()72 Sweeper::FilterSweepingPagesScope::~FilterSweepingPagesScope() {
73   DCHECK_EQ(sweeping_in_progress_, sweeper_->sweeping_in_progress());
74   if (!sweeping_in_progress_) return;
75 
76   sweeper_->sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)] =
77       std::move(old_space_sweeping_list_);
78   // old_space_sweeping_list_ does not need to be cleared as we don't use it.
79 }
80 
81 class Sweeper::SweeperTask final : public CancelableTask {
82  public:
SweeperTask(Isolate * isolate,Sweeper * sweeper,base::Semaphore * pending_sweeper_tasks,std::atomic<intptr_t> * num_sweeping_tasks,AllocationSpace space_to_start)83   SweeperTask(Isolate* isolate, Sweeper* sweeper,
84               base::Semaphore* pending_sweeper_tasks,
85               std::atomic<intptr_t>* num_sweeping_tasks,
86               AllocationSpace space_to_start)
87       : CancelableTask(isolate),
88         sweeper_(sweeper),
89         pending_sweeper_tasks_(pending_sweeper_tasks),
90         num_sweeping_tasks_(num_sweeping_tasks),
91         space_to_start_(space_to_start),
92         tracer_(isolate->heap()->tracer()) {}
93 
94   ~SweeperTask() override = default;
95 
96  private:
RunInternal()97   void RunInternal() final {
98     TRACE_BACKGROUND_GC(tracer_,
99                         GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
100     DCHECK(IsValidSweepingSpace(space_to_start_));
101     const int offset = space_to_start_ - FIRST_GROWABLE_PAGED_SPACE;
102     for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
103       const AllocationSpace space_id = static_cast<AllocationSpace>(
104           FIRST_GROWABLE_PAGED_SPACE +
105           ((i + offset) % kNumberOfSweepingSpaces));
106       // Do not sweep code space concurrently.
107       if (space_id == CODE_SPACE) continue;
108       DCHECK(IsValidSweepingSpace(space_id));
109       sweeper_->SweepSpaceFromTask(space_id);
110     }
111     (*num_sweeping_tasks_)--;
112     pending_sweeper_tasks_->Signal();
113   }
114 
115   Sweeper* const sweeper_;
116   base::Semaphore* const pending_sweeper_tasks_;
117   std::atomic<intptr_t>* const num_sweeping_tasks_;
118   AllocationSpace space_to_start_;
119   GCTracer* const tracer_;
120 
121   DISALLOW_COPY_AND_ASSIGN(SweeperTask);
122 };
123 
124 class Sweeper::IncrementalSweeperTask final : public CancelableTask {
125  public:
IncrementalSweeperTask(Isolate * isolate,Sweeper * sweeper)126   IncrementalSweeperTask(Isolate* isolate, Sweeper* sweeper)
127       : CancelableTask(isolate), isolate_(isolate), sweeper_(sweeper) {}
128 
129   ~IncrementalSweeperTask() override = default;
130 
131  private:
RunInternal()132   void RunInternal() final {
133     VMState<GC> state(isolate_);
134     TRACE_EVENT_CALL_STATS_SCOPED(isolate_, "v8", "V8.Task");
135 
136     sweeper_->incremental_sweeper_pending_ = false;
137 
138     if (sweeper_->sweeping_in_progress()) {
139       if (!sweeper_->SweepSpaceIncrementallyFromTask(CODE_SPACE)) {
140         sweeper_->ScheduleIncrementalSweepingTask();
141       }
142     }
143   }
144 
145   Isolate* const isolate_;
146   Sweeper* const sweeper_;
147   DISALLOW_COPY_AND_ASSIGN(IncrementalSweeperTask);
148 };
149 
StartSweeping()150 void Sweeper::StartSweeping() {
151   CHECK(!stop_sweeper_tasks_);
152   sweeping_in_progress_ = true;
153   iterability_in_progress_ = true;
154   should_reduce_memory_ = heap_->ShouldReduceMemory();
155   MajorNonAtomicMarkingState* marking_state =
156       heap_->mark_compact_collector()->non_atomic_marking_state();
157   ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
158     // Sorting is done in order to make compaction more efficient: by sweeping
159     // pages with the most free bytes first, we make it more likely that when
160     // evacuating a page, already swept pages will have enough free bytes to
161     // hold the objects to move (and therefore, we won't need to wait for more
162     // pages to be swept in order to move those objects).
163     // Since maps don't move, there is no need to sort the pages from MAP_SPACE
164     // before sweeping them.
165     if (space != MAP_SPACE) {
166       int space_index = GetSweepSpaceIndex(space);
167       std::sort(
168           sweeping_list_[space_index].begin(),
169           sweeping_list_[space_index].end(), [marking_state](Page* a, Page* b) {
170             return marking_state->live_bytes(a) > marking_state->live_bytes(b);
171           });
172     }
173   });
174 }
175 
StartSweeperTasks()176 void Sweeper::StartSweeperTasks() {
177   DCHECK_EQ(0, num_tasks_);
178   DCHECK_EQ(0, num_sweeping_tasks_);
179   if (FLAG_concurrent_sweeping && sweeping_in_progress_ &&
180       !heap_->delay_sweeper_tasks_for_testing_) {
181     ForAllSweepingSpaces([this](AllocationSpace space) {
182       DCHECK(IsValidSweepingSpace(space));
183       num_sweeping_tasks_++;
184       auto task = std::make_unique<SweeperTask>(
185           heap_->isolate(), this, &pending_sweeper_tasks_semaphore_,
186           &num_sweeping_tasks_, space);
187       DCHECK_LT(num_tasks_, kMaxSweeperTasks);
188       task_ids_[num_tasks_++] = task->id();
189       V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
190     });
191     ScheduleIncrementalSweepingTask();
192   }
193 }
194 
GetSweptPageSafe(PagedSpace * space)195 Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
196   base::MutexGuard guard(&mutex_);
197   SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
198   if (!list.empty()) {
199     auto last_page = list.back();
200     list.pop_back();
201     return last_page;
202   }
203   return nullptr;
204 }
205 
MergeOldToNewRememberedSetsForSweptPages()206 void Sweeper::MergeOldToNewRememberedSetsForSweptPages() {
207   base::MutexGuard guard(&mutex_);
208 
209   ForAllSweepingSpaces([this](AllocationSpace space) {
210     SweptList& swept_list = swept_list_[GetSweepSpaceIndex(space)];
211     for (Page* p : swept_list) p->MergeOldToNewRememberedSets();
212   });
213 }
214 
AbortAndWaitForTasks()215 void Sweeper::AbortAndWaitForTasks() {
216   if (!FLAG_concurrent_sweeping) return;
217 
218   for (int i = 0; i < num_tasks_; i++) {
219     if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
220         TryAbortResult::kTaskAborted) {
221       pending_sweeper_tasks_semaphore_.Wait();
222     } else {
223       // Aborted case.
224       num_sweeping_tasks_--;
225     }
226   }
227   num_tasks_ = 0;
228   DCHECK_EQ(0, num_sweeping_tasks_);
229 }
230 
EnsureCompleted()231 void Sweeper::EnsureCompleted() {
232   if (!sweeping_in_progress_) return;
233 
234   EnsureIterabilityCompleted();
235 
236   // If sweeping is not completed or not running at all, we try to complete it
237   // here.
238   ForAllSweepingSpaces(
239       [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
240 
241   AbortAndWaitForTasks();
242 
243   ForAllSweepingSpaces([this](AllocationSpace space) {
244     CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
245   });
246   sweeping_in_progress_ = false;
247 }
248 
DrainSweepingWorklists()249 void Sweeper::DrainSweepingWorklists() {
250   if (!sweeping_in_progress_) return;
251 
252   ForAllSweepingSpaces(
253       [this](AllocationSpace space) { DrainSweepingWorklistForSpace(space); });
254 }
255 
DrainSweepingWorklistForSpace(AllocationSpace space)256 void Sweeper::DrainSweepingWorklistForSpace(AllocationSpace space) {
257   if (!sweeping_in_progress_) return;
258   ParallelSweepSpace(space, 0);
259 }
260 
SupportConcurrentSweeping()261 void Sweeper::SupportConcurrentSweeping() {
262   ForAllSweepingSpaces([this](AllocationSpace space) {
263     const int kMaxPagesToSweepPerSpace = 1;
264     ParallelSweepSpace(space, 0, kMaxPagesToSweepPerSpace);
265   });
266 }
267 
AreSweeperTasksRunning()268 bool Sweeper::AreSweeperTasksRunning() { return num_sweeping_tasks_ != 0; }
269 
FreeAndProcessFreedMemory(Address free_start,Address free_end,Page * page,Space * space,bool non_empty_typed_slots,FreeListRebuildingMode free_list_mode,FreeSpaceTreatmentMode free_space_mode)270 V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
271     Address free_start, Address free_end, Page* page, Space* space,
272     bool non_empty_typed_slots, FreeListRebuildingMode free_list_mode,
273     FreeSpaceTreatmentMode free_space_mode) {
274   CHECK_GT(free_end, free_start);
275   size_t freed_bytes = 0;
276   size_t size = static_cast<size_t>(free_end - free_start);
277   if (free_space_mode == ZAP_FREE_SPACE) {
278     ZapCode(free_start, size);
279   }
280   ClearFreedMemoryMode clear_memory_mode =
281       (free_list_mode == REBUILD_FREE_LIST)
282           ? ClearFreedMemoryMode::kDontClearFreedMemory
283           : ClearFreedMemoryMode::kClearFreedMemory;
284   page->heap()->CreateFillerObjectAtBackground(
285       free_start, static_cast<int>(size), clear_memory_mode);
286   if (free_list_mode == REBUILD_FREE_LIST) {
287     freed_bytes =
288         reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(free_start, size);
289   }
290   if (should_reduce_memory_) page->DiscardUnusedMemory(free_start, size);
291 
292   return freed_bytes;
293 }
294 
CleanupRememberedSetEntriesForFreedMemory(Address free_start,Address free_end,Page * page,bool non_empty_typed_slots,FreeRangesMap * free_ranges_map,InvalidatedSlotsCleanup * old_to_new_cleanup)295 V8_INLINE void Sweeper::CleanupRememberedSetEntriesForFreedMemory(
296     Address free_start, Address free_end, Page* page,
297     bool non_empty_typed_slots, FreeRangesMap* free_ranges_map,
298     InvalidatedSlotsCleanup* old_to_new_cleanup) {
299   DCHECK_LE(free_start, free_end);
300   RememberedSetSweeping::RemoveRange(page, free_start, free_end,
301                                      SlotSet::KEEP_EMPTY_BUCKETS);
302   RememberedSet<OLD_TO_OLD>::RemoveRange(page, free_start, free_end,
303                                          SlotSet::KEEP_EMPTY_BUCKETS);
304   if (non_empty_typed_slots) {
305     free_ranges_map->insert(std::pair<uint32_t, uint32_t>(
306         static_cast<uint32_t>(free_start - page->address()),
307         static_cast<uint32_t>(free_end - page->address())));
308   }
309 
310   old_to_new_cleanup->Free(free_start, free_end);
311 }
312 
CleanupInvalidTypedSlotsOfFreeRanges(Page * page,const FreeRangesMap & free_ranges_map)313 void Sweeper::CleanupInvalidTypedSlotsOfFreeRanges(
314     Page* page, const FreeRangesMap& free_ranges_map) {
315   if (!free_ranges_map.empty()) {
316     TypedSlotSet* old_to_new = page->typed_slot_set<OLD_TO_NEW>();
317     if (old_to_new != nullptr) {
318       old_to_new->ClearInvalidSlots(free_ranges_map);
319     }
320     TypedSlotSet* old_to_old = page->typed_slot_set<OLD_TO_OLD>();
321     if (old_to_old != nullptr) {
322       old_to_old->ClearInvalidSlots(free_ranges_map);
323     }
324   }
325 }
326 
ClearMarkBitsAndHandleLivenessStatistics(Page * page,size_t live_bytes,FreeListRebuildingMode free_list_mode)327 void Sweeper::ClearMarkBitsAndHandleLivenessStatistics(
328     Page* page, size_t live_bytes, FreeListRebuildingMode free_list_mode) {
329   marking_state_->bitmap(page)->Clear();
330   if (free_list_mode == IGNORE_FREE_LIST) {
331     marking_state_->SetLiveBytes(page, 0);
332     // We did not free memory, so have to adjust allocated bytes here.
333     intptr_t freed_bytes = page->area_size() - live_bytes;
334     page->DecreaseAllocatedBytes(freed_bytes);
335   } else {
336     // Keep the old live bytes counter of the page until RefillFreeList, where
337     // the space size is refined.
338     // The allocated_bytes() counter is precisely the total size of objects.
339     DCHECK_EQ(live_bytes, page->allocated_bytes());
340   }
341 }
342 
RawSweep(Page * p,FreeListRebuildingMode free_list_mode,FreeSpaceTreatmentMode free_space_mode,FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space,const base::MutexGuard & page_guard)343 int Sweeper::RawSweep(
344     Page* p, FreeListRebuildingMode free_list_mode,
345     FreeSpaceTreatmentMode free_space_mode,
346     FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space,
347     const base::MutexGuard& page_guard) {
348   Space* space = p->owner();
349   DCHECK_NOT_NULL(space);
350   DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
351          space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
352   DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
353 
354   // Phase 1: Prepare the page for sweeping.
355 
356   // Set the allocated_bytes_ counter to area_size and clear the wasted_memory_
357   // counter. The free operations below will decrease allocated_bytes_ to actual
358   // live bytes and keep track of wasted_memory_.
359   p->ResetAllocationStatistics();
360 
361   CodeObjectRegistry* code_object_registry = p->GetCodeObjectRegistry();
362   if (code_object_registry) code_object_registry->Clear();
363 
364   // Phase 2: Free the non-live memory and clean-up the regular remembered set
365   // entires.
366 
367   // Liveness and freeing statistics.
368   size_t live_bytes = 0;
369   size_t max_freed_bytes = 0;
370 
371   // TODO(ulan): we don't have to clear type old-to-old slots in code space
372   // because the concurrent marker doesn't mark code objects. This requires
373   // the write barrier for code objects to check the color of the code object.
374   bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
375                                p->typed_slot_set<OLD_TO_OLD>() != nullptr;
376 
377   // Clean invalidated slots during the final atomic pause. After resuming
378   // execution this isn't necessary, invalid old-to-new refs were already
379   // removed by mark compact's update pointers phase.
380   InvalidatedSlotsCleanup old_to_new_cleanup =
381       InvalidatedSlotsCleanup::NoCleanup(p);
382   if (invalidated_slots_in_free_space ==
383       FreeSpaceMayContainInvalidatedSlots::kYes)
384     old_to_new_cleanup = InvalidatedSlotsCleanup::OldToNew(p);
385 
386   // The free ranges map is used for filtering typed slots.
387   FreeRangesMap free_ranges_map;
388 
389 #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
390   p->object_start_bitmap()->Clear();
391 #endif
392 
393   // Iterate over the page using the live objects and free the memory before
394   // the given live object.
395   Address free_start = p->area_start();
396   for (auto object_and_size :
397        LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
398     HeapObject const object = object_and_size.first;
399     if (code_object_registry)
400       code_object_registry->RegisterAlreadyExistingCodeObject(object.address());
401     DCHECK(marking_state_->IsBlack(object));
402     Address free_end = object.address();
403     if (free_end != free_start) {
404       max_freed_bytes =
405           Max(max_freed_bytes,
406               FreeAndProcessFreedMemory(free_start, free_end, p, space,
407                                         non_empty_typed_slots, free_list_mode,
408                                         free_space_mode));
409       CleanupRememberedSetEntriesForFreedMemory(
410           free_start, free_end, p, non_empty_typed_slots, &free_ranges_map,
411           &old_to_new_cleanup);
412     }
413     Map map = object.synchronized_map();
414     int size = object.SizeFromMap(map);
415     live_bytes += size;
416     free_start = free_end + size;
417 
418 #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
419     p->object_start_bitmap()->SetBit(object.address());
420 #endif
421   }
422 
423   // If there is free memory after the last live object also free that.
424   Address free_end = p->area_end();
425   if (free_end != free_start) {
426     max_freed_bytes =
427         Max(max_freed_bytes,
428             FreeAndProcessFreedMemory(free_start, free_end, p, space,
429                                       non_empty_typed_slots, free_list_mode,
430                                       free_space_mode));
431     CleanupRememberedSetEntriesForFreedMemory(
432         free_start, free_end, p, non_empty_typed_slots, &free_ranges_map,
433         &old_to_new_cleanup);
434   }
435 
436   // Phase 3: Post process the page.
437   CleanupInvalidTypedSlotsOfFreeRanges(p, free_ranges_map);
438   ClearMarkBitsAndHandleLivenessStatistics(p, live_bytes, free_list_mode);
439 
440   p->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kDone);
441   if (code_object_registry) code_object_registry->Finalize();
442   if (free_list_mode == IGNORE_FREE_LIST) return 0;
443 
444   return static_cast<int>(
445       p->owner()->free_list()->GuaranteedAllocatable(max_freed_bytes));
446 }
447 
SweepSpaceFromTask(AllocationSpace identity)448 void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
449   Page* page = nullptr;
450   while (!stop_sweeper_tasks_ &&
451          ((page = GetSweepingPageSafe(identity)) != nullptr)) {
452     // Typed slot sets are only recorded on code pages. Code pages
453     // are not swept concurrently to the application to ensure W^X.
454     DCHECK(!page->typed_slot_set<OLD_TO_NEW>() &&
455            !page->typed_slot_set<OLD_TO_OLD>());
456     ParallelSweepPage(page, identity);
457   }
458 }
459 
SweepSpaceIncrementallyFromTask(AllocationSpace identity)460 bool Sweeper::SweepSpaceIncrementallyFromTask(AllocationSpace identity) {
461   if (Page* page = GetSweepingPageSafe(identity)) {
462     ParallelSweepPage(page, identity);
463   }
464   return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
465 }
466 
ParallelSweepSpace(AllocationSpace identity,int required_freed_bytes,int max_pages,FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space)467 int Sweeper::ParallelSweepSpace(
468     AllocationSpace identity, int required_freed_bytes, int max_pages,
469     FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
470   int max_freed = 0;
471   int pages_freed = 0;
472   Page* page = nullptr;
473   while ((page = GetSweepingPageSafe(identity)) != nullptr) {
474     int freed =
475         ParallelSweepPage(page, identity, invalidated_slots_in_free_space);
476     ++pages_freed;
477     if (page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
478       // Free list of a never-allocate page will be dropped later on.
479       continue;
480     }
481     DCHECK_GE(freed, 0);
482     max_freed = Max(max_freed, freed);
483     if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
484       return max_freed;
485     if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
486   }
487   return max_freed;
488 }
489 
ParallelSweepPage(Page * page,AllocationSpace identity,FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space)490 int Sweeper::ParallelSweepPage(
491     Page* page, AllocationSpace identity,
492     FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
493   DCHECK(IsValidSweepingSpace(identity));
494 
495   // The Scavenger may add already swept pages back.
496   if (page->SweepingDone()) return 0;
497 
498   int max_freed = 0;
499   {
500     base::MutexGuard guard(page->mutex());
501     DCHECK(!page->SweepingDone());
502     // If the page is a code page, the CodePageMemoryModificationScope changes
503     // the page protection mode from rx -> rw while sweeping.
504     CodePageMemoryModificationScope code_page_scope(page);
505 
506     DCHECK_EQ(Page::ConcurrentSweepingState::kPending,
507               page->concurrent_sweeping_state());
508     page->set_concurrent_sweeping_state(
509         Page::ConcurrentSweepingState::kInProgress);
510     const FreeSpaceTreatmentMode free_space_mode =
511         Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
512     max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode,
513                          invalidated_slots_in_free_space, guard);
514     DCHECK(page->SweepingDone());
515   }
516 
517   {
518     base::MutexGuard guard(&mutex_);
519     swept_list_[GetSweepSpaceIndex(identity)].push_back(page);
520   }
521   return max_freed;
522 }
523 
ScheduleIncrementalSweepingTask()524 void Sweeper::ScheduleIncrementalSweepingTask() {
525   if (!incremental_sweeper_pending_) {
526     incremental_sweeper_pending_ = true;
527     v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap_->isolate());
528     auto taskrunner =
529         V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
530     taskrunner->PostTask(
531         std::make_unique<IncrementalSweeperTask>(heap_->isolate(), this));
532   }
533 }
534 
AddPage(AllocationSpace space,Page * page,Sweeper::AddPageMode mode)535 void Sweeper::AddPage(AllocationSpace space, Page* page,
536                       Sweeper::AddPageMode mode) {
537   base::MutexGuard guard(&mutex_);
538   DCHECK(IsValidSweepingSpace(space));
539   DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
540   if (mode == Sweeper::REGULAR) {
541     PrepareToBeSweptPage(space, page);
542   } else {
543     // Page has been temporarily removed from the sweeper. Accounting already
544     // happened when the page was initially added, so it is skipped here.
545     DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
546   }
547   DCHECK_EQ(Page::ConcurrentSweepingState::kPending,
548             page->concurrent_sweeping_state());
549   sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
550 }
551 
PrepareToBeSweptPage(AllocationSpace space,Page * page)552 void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
553 #ifdef DEBUG
554   DCHECK_GE(page->area_size(),
555             static_cast<size_t>(marking_state_->live_bytes(page)));
556   DCHECK_EQ(Page::ConcurrentSweepingState::kDone,
557             page->concurrent_sweeping_state());
558   page->ForAllFreeListCategories([page](FreeListCategory* category) {
559     DCHECK(!category->is_linked(page->owner()->free_list()));
560   });
561 #endif  // DEBUG
562   page->MoveOldToNewRememberedSetForSweeping();
563   page->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kPending);
564   heap_->paged_space(space)->IncreaseAllocatedBytes(
565       marking_state_->live_bytes(page), page);
566 }
567 
GetSweepingPageSafe(AllocationSpace space)568 Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
569   base::MutexGuard guard(&mutex_);
570   DCHECK(IsValidSweepingSpace(space));
571   int space_index = GetSweepSpaceIndex(space);
572   Page* page = nullptr;
573   if (!sweeping_list_[space_index].empty()) {
574     page = sweeping_list_[space_index].back();
575     sweeping_list_[space_index].pop_back();
576   }
577   return page;
578 }
579 
EnsureIterabilityCompleted()580 void Sweeper::EnsureIterabilityCompleted() {
581   if (!iterability_in_progress_) return;
582 
583   if (FLAG_concurrent_sweeping && iterability_task_started_) {
584     if (heap_->isolate()->cancelable_task_manager()->TryAbort(
585             iterability_task_id_) != TryAbortResult::kTaskAborted) {
586       iterability_task_semaphore_.Wait();
587     }
588     iterability_task_started_ = false;
589   }
590 
591   for (Page* page : iterability_list_) {
592     MakeIterable(page);
593   }
594   iterability_list_.clear();
595   iterability_in_progress_ = false;
596 }
597 
598 class Sweeper::IterabilityTask final : public CancelableTask {
599  public:
IterabilityTask(Isolate * isolate,Sweeper * sweeper,base::Semaphore * pending_iterability_task)600   IterabilityTask(Isolate* isolate, Sweeper* sweeper,
601                   base::Semaphore* pending_iterability_task)
602       : CancelableTask(isolate),
603         sweeper_(sweeper),
604         pending_iterability_task_(pending_iterability_task),
605         tracer_(isolate->heap()->tracer()) {}
606 
607   ~IterabilityTask() override = default;
608 
609  private:
RunInternal()610   void RunInternal() final {
611     TRACE_BACKGROUND_GC(tracer_,
612                         GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
613     for (Page* page : sweeper_->iterability_list_) {
614       sweeper_->MakeIterable(page);
615     }
616     sweeper_->iterability_list_.clear();
617     pending_iterability_task_->Signal();
618   }
619 
620   Sweeper* const sweeper_;
621   base::Semaphore* const pending_iterability_task_;
622   GCTracer* const tracer_;
623 
624   DISALLOW_COPY_AND_ASSIGN(IterabilityTask);
625 };
626 
StartIterabilityTasks()627 void Sweeper::StartIterabilityTasks() {
628   if (!iterability_in_progress_) return;
629 
630   DCHECK(!iterability_task_started_);
631   if (FLAG_concurrent_sweeping && !iterability_list_.empty()) {
632     auto task = std::make_unique<IterabilityTask>(heap_->isolate(), this,
633                                                   &iterability_task_semaphore_);
634     iterability_task_id_ = task->id();
635     iterability_task_started_ = true;
636     V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
637   }
638 }
639 
AddPageForIterability(Page * page)640 void Sweeper::AddPageForIterability(Page* page) {
641   DCHECK(sweeping_in_progress_);
642   DCHECK(iterability_in_progress_);
643   DCHECK(!iterability_task_started_);
644   DCHECK(IsValidIterabilitySpace(page->owner_identity()));
645   DCHECK_EQ(Page::ConcurrentSweepingState::kDone,
646             page->concurrent_sweeping_state());
647 
648   iterability_list_.push_back(page);
649   page->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kPending);
650 }
651 
MakeIterable(Page * page)652 void Sweeper::MakeIterable(Page* page) {
653   base::MutexGuard guard(page->mutex());
654   DCHECK(IsValidIterabilitySpace(page->owner_identity()));
655   const FreeSpaceTreatmentMode free_space_mode =
656       Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
657   RawSweep(page, IGNORE_FREE_LIST, free_space_mode,
658            FreeSpaceMayContainInvalidatedSlots::kNo, guard);
659 }
660 
661 }  // namespace internal
662 }  // namespace v8
663