• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/cppgc/sweeper.h"
6 
7 #include <atomic>
8 #include <memory>
9 #include <vector>
10 
11 #include "include/cppgc/platform.h"
12 #include "src/base/optional.h"
13 #include "src/base/platform/mutex.h"
14 #include "src/heap/cppgc/free-list.h"
15 #include "src/heap/cppgc/globals.h"
16 #include "src/heap/cppgc/heap-base.h"
17 #include "src/heap/cppgc/heap-object-header.h"
18 #include "src/heap/cppgc/heap-page.h"
19 #include "src/heap/cppgc/heap-space.h"
20 #include "src/heap/cppgc/heap-visitor.h"
21 #include "src/heap/cppgc/memory.h"
22 #include "src/heap/cppgc/object-poisoner.h"
23 #include "src/heap/cppgc/object-start-bitmap.h"
24 #include "src/heap/cppgc/raw-heap.h"
25 #include "src/heap/cppgc/stats-collector.h"
26 #include "src/heap/cppgc/task-handle.h"
27 
28 namespace cppgc {
29 namespace internal {
30 
31 namespace {
32 
33 using v8::base::Optional;
34 
35 class ObjectStartBitmapVerifier
36     : private HeapVisitor<ObjectStartBitmapVerifier> {
37   friend class HeapVisitor<ObjectStartBitmapVerifier>;
38 
39  public:
Verify(RawHeap & heap)40   void Verify(RawHeap& heap) {
41 #if DEBUG
42     Traverse(heap);
43 #endif  // DEBUG
44   }
Verify(NormalPage & page)45   void Verify(NormalPage& page) {
46 #if DEBUG
47     Traverse(page);
48 #endif  // DEBUG
49   }
50 
51  private:
VisitNormalPage(NormalPage & page)52   bool VisitNormalPage(NormalPage& page) {
53     // Remember bitmap and reset previous pointer.
54     bitmap_ = &page.object_start_bitmap();
55     prev_ = nullptr;
56     return false;
57   }
58 
VisitHeapObjectHeader(HeapObjectHeader & header)59   bool VisitHeapObjectHeader(HeapObjectHeader& header) {
60     if (header.IsLargeObject()) return true;
61 
62     auto* raw_header = reinterpret_cast<ConstAddress>(&header);
63     CHECK(bitmap_->CheckBit<AccessMode::kAtomic>(raw_header));
64     if (prev_) {
65       // No other bits in the range [prev_, raw_header) should be set.
66       CHECK_EQ(prev_, bitmap_->FindHeader<AccessMode::kAtomic>(raw_header - 1));
67     }
68     prev_ = &header;
69     return true;
70   }
71 
72   PlatformAwareObjectStartBitmap* bitmap_ = nullptr;
73   HeapObjectHeader* prev_ = nullptr;
74 };
75 
76 class FreeHandlerBase {
77  public:
78   virtual ~FreeHandlerBase() = default;
79   virtual void FreeFreeList(
80       std::vector<FreeList::Block>& unfinalized_free_list) = 0;
81 };
82 
83 class DiscardingFreeHandler : public FreeHandlerBase {
84  public:
DiscardingFreeHandler(PageAllocator & page_allocator,FreeList & free_list,BasePage & page)85   DiscardingFreeHandler(PageAllocator& page_allocator, FreeList& free_list,
86                         BasePage& page)
87       : page_allocator_(page_allocator), free_list_(free_list), page_(page) {}
88 
Free(FreeList::Block block)89   void Free(FreeList::Block block) {
90     const auto unused_range = free_list_.AddReturningUnusedBounds(block);
91     const uintptr_t aligned_begin_unused =
92         RoundUp(reinterpret_cast<uintptr_t>(unused_range.first),
93                 page_allocator_.CommitPageSize());
94     const uintptr_t aligned_end_unused =
95         RoundDown(reinterpret_cast<uintptr_t>(unused_range.second),
96                   page_allocator_.CommitPageSize());
97     if (aligned_begin_unused < aligned_end_unused) {
98       const size_t discarded_size = aligned_end_unused - aligned_begin_unused;
99       page_allocator_.DiscardSystemPages(
100           reinterpret_cast<void*>(aligned_begin_unused),
101           aligned_end_unused - aligned_begin_unused);
102       page_.IncrementDiscardedMemory(discarded_size);
103       page_.space()
104           .raw_heap()
105           ->heap()
106           ->stats_collector()
107           ->IncrementDiscardedMemory(discarded_size);
108     }
109   }
110 
FreeFreeList(std::vector<FreeList::Block> & unfinalized_free_list)111   void FreeFreeList(std::vector<FreeList::Block>& unfinalized_free_list) final {
112     for (auto entry : unfinalized_free_list) {
113       Free(std::move(entry));
114     }
115   }
116 
117  private:
118   PageAllocator& page_allocator_;
119   FreeList& free_list_;
120   BasePage& page_;
121 };
122 
123 class RegularFreeHandler : public FreeHandlerBase {
124  public:
RegularFreeHandler(PageAllocator & page_allocator,FreeList & free_list,BasePage & page)125   RegularFreeHandler(PageAllocator& page_allocator, FreeList& free_list,
126                      BasePage& page)
127       : free_list_(free_list) {}
128 
Free(FreeList::Block block)129   void Free(FreeList::Block block) { free_list_.Add(std::move(block)); }
130 
FreeFreeList(std::vector<FreeList::Block> & unfinalized_free_list)131   void FreeFreeList(std::vector<FreeList::Block>& unfinalized_free_list) final {
132     for (auto entry : unfinalized_free_list) {
133       Free(std::move(entry));
134     }
135   }
136 
137  private:
138   FreeList& free_list_;
139 };
140 
141 template <typename T>
142 class ThreadSafeStack {
143  public:
144   ThreadSafeStack() = default;
145 
Push(T t)146   void Push(T t) {
147     v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
148     vector_.push_back(std::move(t));
149     is_empty_.store(false, std::memory_order_relaxed);
150   }
151 
Pop()152   Optional<T> Pop() {
153     v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
154     if (vector_.empty()) {
155       is_empty_.store(true, std::memory_order_relaxed);
156       return v8::base::nullopt;
157     }
158     T top = std::move(vector_.back());
159     vector_.pop_back();
160     // std::move is redundant but is needed to avoid the bug in gcc-7.
161     return std::move(top);
162   }
163 
164   template <typename It>
Insert(It begin,It end)165   void Insert(It begin, It end) {
166     v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
167     vector_.insert(vector_.end(), begin, end);
168     is_empty_.store(false, std::memory_order_relaxed);
169   }
170 
IsEmpty() const171   bool IsEmpty() const { return is_empty_.load(std::memory_order_relaxed); }
172 
173  private:
174   std::vector<T> vector_;
175   mutable v8::base::Mutex mutex_;
176   std::atomic<bool> is_empty_{true};
177 };
178 
179 struct SpaceState {
180   struct SweptPageState {
181     BasePage* page = nullptr;
182 #if defined(CPPGC_CAGED_HEAP)
183     // The list of unfinalized objects may be extremely big. To save on space,
184     // if cage is enabled, the list of unfinalized objects is stored inlined in
185     // HeapObjectHeader.
186     HeapObjectHeader* unfinalized_objects_head = nullptr;
187 #else   // !defined(CPPGC_CAGED_HEAP)
188     std::vector<HeapObjectHeader*> unfinalized_objects;
189 #endif  // !defined(CPPGC_CAGED_HEAP)
190     FreeList cached_free_list;
191     std::vector<FreeList::Block> unfinalized_free_list;
192     bool is_empty = false;
193     size_t largest_new_free_list_entry = 0;
194   };
195 
196   ThreadSafeStack<BasePage*> unswept_pages;
197   ThreadSafeStack<SweptPageState> swept_unfinalized_pages;
198 };
199 
200 using SpaceStates = std::vector<SpaceState>;
201 
StickyUnmark(HeapObjectHeader * header)202 void StickyUnmark(HeapObjectHeader* header) {
203   // Young generation in Oilpan uses sticky mark bits.
204 #if !defined(CPPGC_YOUNG_GENERATION)
205   header->Unmark<AccessMode::kAtomic>();
206 #endif
207 }
208 
209 class InlinedFinalizationBuilderBase {
210  public:
211   struct ResultType {
212     bool is_empty = false;
213     size_t largest_new_free_list_entry = 0;
214   };
215 };
216 
217 // Builder that finalizes objects and adds freelist entries right away.
218 template <typename FreeHandler>
219 class InlinedFinalizationBuilder final : public InlinedFinalizationBuilderBase,
220                                          public FreeHandler {
221  public:
InlinedFinalizationBuilder(BasePage & page,PageAllocator & page_allocator)222   InlinedFinalizationBuilder(BasePage& page, PageAllocator& page_allocator)
223       : FreeHandler(page_allocator,
224                     NormalPageSpace::From(page.space()).free_list(), page) {}
225 
AddFinalizer(HeapObjectHeader * header,size_t size)226   void AddFinalizer(HeapObjectHeader* header, size_t size) {
227     header->Finalize();
228     SetMemoryInaccessible(header, size);
229   }
230 
AddFreeListEntry(Address start,size_t size)231   void AddFreeListEntry(Address start, size_t size) {
232     FreeHandler::Free({start, size});
233   }
234 
GetResult(bool is_empty,size_t largest_new_free_list_entry)235   ResultType GetResult(bool is_empty, size_t largest_new_free_list_entry) {
236     return {is_empty, largest_new_free_list_entry};
237   }
238 };
239 
240 // Builder that produces results for deferred processing.
241 template <typename FreeHandler>
242 class DeferredFinalizationBuilder final : public FreeHandler {
243  public:
244   using ResultType = SpaceState::SweptPageState;
245 
DeferredFinalizationBuilder(BasePage & page,PageAllocator & page_allocator)246   DeferredFinalizationBuilder(BasePage& page, PageAllocator& page_allocator)
247       : FreeHandler(page_allocator, result_.cached_free_list, page) {
248     result_.page = &page;
249   }
250 
AddFinalizer(HeapObjectHeader * header,size_t size)251   void AddFinalizer(HeapObjectHeader* header, size_t size) {
252     if (header->IsFinalizable()) {
253 #if defined(CPPGC_CAGED_HEAP)
254       if (!current_unfinalized_) {
255         DCHECK_NULL(result_.unfinalized_objects_head);
256         current_unfinalized_ = header;
257         result_.unfinalized_objects_head = header;
258       } else {
259         current_unfinalized_->SetNextUnfinalized(header);
260         current_unfinalized_ = header;
261       }
262 #else   // !defined(CPPGC_CAGED_HEAP)
263       result_.unfinalized_objects.push_back({header});
264 #endif  // !defined(CPPGC_CAGED_HEAP)
265       found_finalizer_ = true;
266     } else {
267       SetMemoryInaccessible(header, size);
268     }
269   }
270 
AddFreeListEntry(Address start,size_t size)271   void AddFreeListEntry(Address start, size_t size) {
272     if (found_finalizer_) {
273       result_.unfinalized_free_list.push_back({start, size});
274     } else {
275       FreeHandler::Free({start, size});
276     }
277     found_finalizer_ = false;
278   }
279 
GetResult(bool is_empty,size_t largest_new_free_list_entry)280   ResultType&& GetResult(bool is_empty, size_t largest_new_free_list_entry) {
281     result_.is_empty = is_empty;
282     result_.largest_new_free_list_entry = largest_new_free_list_entry;
283     return std::move(result_);
284   }
285 
286  private:
287   ResultType result_;
288   HeapObjectHeader* current_unfinalized_ = 0;
289   bool found_finalizer_ = false;
290 };
291 
292 template <typename FinalizationBuilder>
SweepNormalPage(NormalPage * page,PageAllocator & page_allocator)293 typename FinalizationBuilder::ResultType SweepNormalPage(
294     NormalPage* page, PageAllocator& page_allocator) {
295   constexpr auto kAtomicAccess = AccessMode::kAtomic;
296   FinalizationBuilder builder(*page, page_allocator);
297 
298   PlatformAwareObjectStartBitmap& bitmap = page->object_start_bitmap();
299 
300   size_t largest_new_free_list_entry = 0;
301   size_t live_bytes = 0;
302 
303   Address start_of_gap = page->PayloadStart();
304 
305   const auto clear_bit_if_coalesced_entry = [&bitmap,
306                                              &start_of_gap](Address address) {
307     if (address != start_of_gap) {
308       // Clear only if not the first freed entry.
309       bitmap.ClearBit<AccessMode::kAtomic>(address);
310     } else {
311       // Otherwise check that the bit is set.
312       DCHECK(bitmap.CheckBit<AccessMode::kAtomic>(address));
313     }
314   };
315 
316   for (Address begin = page->PayloadStart(), end = page->PayloadEnd();
317        begin != end;) {
318     DCHECK(bitmap.CheckBit<AccessMode::kAtomic>(begin));
319     HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(begin);
320     const size_t size = header->AllocatedSize();
321     // Check if this is a free list entry.
322     if (header->IsFree<kAtomicAccess>()) {
323       SetMemoryInaccessible(header, std::min(kFreeListEntrySize, size));
324       // This prevents memory from being discarded in configurations where
325       // `CheckMemoryIsInaccessibleIsNoop()` is false.
326       CheckMemoryIsInaccessible(header, size);
327       clear_bit_if_coalesced_entry(begin);
328       begin += size;
329       continue;
330     }
331     // Check if object is not marked (not reachable).
332     if (!header->IsMarked<kAtomicAccess>()) {
333       builder.AddFinalizer(header, size);
334       clear_bit_if_coalesced_entry(begin);
335       begin += size;
336       continue;
337     }
338     // The object is alive.
339     const Address header_address = reinterpret_cast<Address>(header);
340     if (start_of_gap != header_address) {
341       size_t new_free_list_entry_size =
342           static_cast<size_t>(header_address - start_of_gap);
343       builder.AddFreeListEntry(start_of_gap, new_free_list_entry_size);
344       DCHECK(bitmap.CheckBit<AccessMode::kAtomic>(start_of_gap));
345       largest_new_free_list_entry =
346           std::max(largest_new_free_list_entry, new_free_list_entry_size);
347     }
348     StickyUnmark(header);
349     begin += size;
350     start_of_gap = begin;
351     live_bytes += size;
352   }
353 
354   if (start_of_gap != page->PayloadStart() &&
355       start_of_gap != page->PayloadEnd()) {
356     builder.AddFreeListEntry(
357         start_of_gap, static_cast<size_t>(page->PayloadEnd() - start_of_gap));
358     DCHECK(bitmap.CheckBit<AccessMode::kAtomic>(start_of_gap));
359   }
360   page->SetAllocatedBytesAtLastGC(live_bytes);
361 
362   const bool is_empty = (start_of_gap == page->PayloadStart());
363   return builder.GetResult(is_empty, largest_new_free_list_entry);
364 }
365 
366 // SweepFinalizer is responsible for heap/space/page finalization. Finalization
367 // is defined as a step following concurrent sweeping which:
368 // - calls finalizers;
369 // - returns (unmaps) empty pages;
370 // - merges freelists to the space's freelist.
371 class SweepFinalizer final {
372   using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
373 
374  public:
SweepFinalizer(cppgc::Platform * platform,FreeMemoryHandling free_memory_handling)375   SweepFinalizer(cppgc::Platform* platform,
376                  FreeMemoryHandling free_memory_handling)
377       : platform_(platform), free_memory_handling_(free_memory_handling) {}
378 
FinalizeHeap(SpaceStates * space_states)379   void FinalizeHeap(SpaceStates* space_states) {
380     for (SpaceState& space_state : *space_states) {
381       FinalizeSpace(&space_state);
382     }
383   }
384 
FinalizeSpace(SpaceState * space_state)385   void FinalizeSpace(SpaceState* space_state) {
386     while (auto page_state = space_state->swept_unfinalized_pages.Pop()) {
387       FinalizePage(&*page_state);
388     }
389   }
390 
FinalizeSpaceWithDeadline(SpaceState * space_state,double deadline_in_seconds)391   bool FinalizeSpaceWithDeadline(SpaceState* space_state,
392                                  double deadline_in_seconds) {
393     DCHECK(platform_);
394     static constexpr size_t kDeadlineCheckInterval = 8;
395     size_t page_count = 1;
396 
397     while (auto page_state = space_state->swept_unfinalized_pages.Pop()) {
398       FinalizePage(&*page_state);
399 
400       if (page_count % kDeadlineCheckInterval == 0 &&
401           deadline_in_seconds <= platform_->MonotonicallyIncreasingTime()) {
402         return false;
403       }
404 
405       page_count++;
406     }
407 
408     return true;
409   }
410 
FinalizePage(SpaceState::SweptPageState * page_state)411   void FinalizePage(SpaceState::SweptPageState* page_state) {
412     DCHECK(page_state);
413     DCHECK(page_state->page);
414     BasePage* page = page_state->page;
415 
416     // Call finalizers.
417     const auto finalize_header = [](HeapObjectHeader* header) {
418       const size_t size = header->AllocatedSize();
419       header->Finalize();
420       SetMemoryInaccessible(header, size);
421     };
422 #if defined(CPPGC_CAGED_HEAP)
423     const uint64_t cage_base =
424         reinterpret_cast<uint64_t>(page->heap().caged_heap().base());
425     HeapObjectHeader* next_unfinalized = nullptr;
426 
427     for (auto* unfinalized_header = page_state->unfinalized_objects_head;
428          unfinalized_header; unfinalized_header = next_unfinalized) {
429       next_unfinalized = unfinalized_header->GetNextUnfinalized(cage_base);
430       finalize_header(unfinalized_header);
431     }
432 #else   // !defined(CPPGC_CAGED_HEAP)
433     for (HeapObjectHeader* unfinalized_header :
434          page_state->unfinalized_objects) {
435       finalize_header(unfinalized_header);
436     }
437 #endif  // !defined(CPPGC_CAGED_HEAP)
438 
439     // Unmap page if empty.
440     if (page_state->is_empty) {
441       BasePage::Destroy(page);
442       return;
443     }
444 
445     DCHECK(!page->is_large());
446 
447     // Merge freelists without finalizers.
448     FreeList& space_freelist = NormalPageSpace::From(page->space()).free_list();
449     space_freelist.Append(std::move(page_state->cached_free_list));
450 
451     // Merge freelist with finalizers.
452     std::unique_ptr<FreeHandlerBase> handler =
453         (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible)
454             ? std::unique_ptr<FreeHandlerBase>(new DiscardingFreeHandler(
455                   *platform_->GetPageAllocator(), space_freelist, *page))
456             : std::unique_ptr<FreeHandlerBase>(new RegularFreeHandler(
457                   *platform_->GetPageAllocator(), space_freelist, *page));
458     handler->FreeFreeList(page_state->unfinalized_free_list);
459 
460     largest_new_free_list_entry_ = std::max(
461         page_state->largest_new_free_list_entry, largest_new_free_list_entry_);
462 
463     // After the page was fully finalized and freelists have been merged, verify
464     // that the bitmap is consistent.
465     ObjectStartBitmapVerifier().Verify(static_cast<NormalPage&>(*page));
466 
467     // Add the page to the space.
468     page->space().AddPage(page);
469   }
470 
largest_new_free_list_entry() const471   size_t largest_new_free_list_entry() const {
472     return largest_new_free_list_entry_;
473   }
474 
475  private:
476   cppgc::Platform* platform_;
477   size_t largest_new_free_list_entry_ = 0;
478   const FreeMemoryHandling free_memory_handling_;
479 };
480 
481 class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
482   friend class HeapVisitor<MutatorThreadSweeper>;
483 
484   using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
485 
486  public:
MutatorThreadSweeper(SpaceStates * states,cppgc::Platform * platform,FreeMemoryHandling free_memory_handling)487   MutatorThreadSweeper(SpaceStates* states, cppgc::Platform* platform,
488                        FreeMemoryHandling free_memory_handling)
489       : states_(states),
490         platform_(platform),
491         free_memory_handling_(free_memory_handling) {}
492 
Sweep()493   void Sweep() {
494     for (SpaceState& state : *states_) {
495       while (auto page = state.unswept_pages.Pop()) {
496         SweepPage(**page);
497       }
498     }
499   }
500 
SweepPage(BasePage & page)501   void SweepPage(BasePage& page) { Traverse(page); }
502 
SweepWithDeadline(double deadline_in_seconds)503   bool SweepWithDeadline(double deadline_in_seconds) {
504     DCHECK(platform_);
505     static constexpr double kSlackInSeconds = 0.001;
506     for (SpaceState& state : *states_) {
507       // FinalizeSpaceWithDeadline() and SweepSpaceWithDeadline() won't check
508       // the deadline until it sweeps 10 pages. So we give a small slack for
509       // safety.
510       const double remaining_budget = deadline_in_seconds - kSlackInSeconds -
511                                       platform_->MonotonicallyIncreasingTime();
512       if (remaining_budget <= 0.) return false;
513 
514       // First, prioritize finalization of pages that were swept concurrently.
515       SweepFinalizer finalizer(platform_, free_memory_handling_);
516       if (!finalizer.FinalizeSpaceWithDeadline(&state, deadline_in_seconds)) {
517         return false;
518       }
519 
520       // Help out the concurrent sweeper.
521       if (!SweepSpaceWithDeadline(&state, deadline_in_seconds)) {
522         return false;
523       }
524     }
525     return true;
526   }
527 
largest_new_free_list_entry() const528   size_t largest_new_free_list_entry() const {
529     return largest_new_free_list_entry_;
530   }
531 
532  private:
SweepSpaceWithDeadline(SpaceState * state,double deadline_in_seconds)533   bool SweepSpaceWithDeadline(SpaceState* state, double deadline_in_seconds) {
534     static constexpr size_t kDeadlineCheckInterval = 8;
535     size_t page_count = 1;
536     while (auto page = state->unswept_pages.Pop()) {
537       Traverse(**page);
538       if (page_count % kDeadlineCheckInterval == 0 &&
539           deadline_in_seconds <= platform_->MonotonicallyIncreasingTime()) {
540         return false;
541       }
542       page_count++;
543     }
544 
545     return true;
546   }
547 
VisitNormalPage(NormalPage & page)548   bool VisitNormalPage(NormalPage& page) {
549     if (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible) {
550       page.ResetDiscardedMemory();
551     }
552     const auto result =
553         (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible)
554             ? SweepNormalPage<
555                   InlinedFinalizationBuilder<DiscardingFreeHandler>>(
556                   &page, *platform_->GetPageAllocator())
557             : SweepNormalPage<InlinedFinalizationBuilder<RegularFreeHandler>>(
558                   &page, *platform_->GetPageAllocator());
559     if (result.is_empty) {
560       NormalPage::Destroy(&page);
561     } else {
562       // The page was eagerly finalized and all the freelist have been merged.
563       // Verify that the bitmap is consistent with headers.
564       ObjectStartBitmapVerifier().Verify(page);
565       page.space().AddPage(&page);
566       largest_new_free_list_entry_ = std::max(
567           result.largest_new_free_list_entry, largest_new_free_list_entry_);
568     }
569     return true;
570   }
571 
VisitLargePage(LargePage & page)572   bool VisitLargePage(LargePage& page) {
573     HeapObjectHeader* header = page.ObjectHeader();
574     if (header->IsMarked()) {
575       StickyUnmark(header);
576       page.space().AddPage(&page);
577     } else {
578       header->Finalize();
579       LargePage::Destroy(&page);
580     }
581     return true;
582   }
583 
584   SpaceStates* states_;
585   cppgc::Platform* platform_;
586   size_t largest_new_free_list_entry_ = 0;
587   const FreeMemoryHandling free_memory_handling_;
588 };
589 
590 class ConcurrentSweepTask final : public cppgc::JobTask,
591                                   private HeapVisitor<ConcurrentSweepTask> {
592   friend class HeapVisitor<ConcurrentSweepTask>;
593 
594   using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
595 
596  public:
ConcurrentSweepTask(HeapBase & heap,SpaceStates * states,Platform * platform,FreeMemoryHandling free_memory_handling)597   ConcurrentSweepTask(HeapBase& heap, SpaceStates* states, Platform* platform,
598                       FreeMemoryHandling free_memory_handling)
599       : heap_(heap),
600         states_(states),
601         platform_(platform),
602         free_memory_handling_(free_memory_handling) {}
603 
Run(cppgc::JobDelegate * delegate)604   void Run(cppgc::JobDelegate* delegate) final {
605     StatsCollector::EnabledConcurrentScope stats_scope(
606         heap_.stats_collector(), StatsCollector::kConcurrentSweep);
607 
608     for (SpaceState& state : *states_) {
609       while (auto page = state.unswept_pages.Pop()) {
610         Traverse(**page);
611         if (delegate->ShouldYield()) return;
612       }
613     }
614     is_completed_.store(true, std::memory_order_relaxed);
615   }
616 
GetMaxConcurrency(size_t) const617   size_t GetMaxConcurrency(size_t /* active_worker_count */) const final {
618     return is_completed_.load(std::memory_order_relaxed) ? 0 : 1;
619   }
620 
621  private:
VisitNormalPage(NormalPage & page)622   bool VisitNormalPage(NormalPage& page) {
623     if (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible) {
624       page.ResetDiscardedMemory();
625     }
626     SpaceState::SweptPageState sweep_result =
627         (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible)
628             ? SweepNormalPage<
629                   DeferredFinalizationBuilder<DiscardingFreeHandler>>(
630                   &page, *platform_->GetPageAllocator())
631             : SweepNormalPage<DeferredFinalizationBuilder<RegularFreeHandler>>(
632                   &page, *platform_->GetPageAllocator());
633     const size_t space_index = page.space().index();
634     DCHECK_GT(states_->size(), space_index);
635     SpaceState& space_state = (*states_)[space_index];
636     space_state.swept_unfinalized_pages.Push(std::move(sweep_result));
637     return true;
638   }
639 
VisitLargePage(LargePage & page)640   bool VisitLargePage(LargePage& page) {
641     HeapObjectHeader* header = page.ObjectHeader();
642     if (header->IsMarked()) {
643       StickyUnmark(header);
644       page.space().AddPage(&page);
645       return true;
646     }
647 #if defined(CPPGC_CAGED_HEAP)
648     HeapObjectHeader* const unfinalized_objects =
649         header->IsFinalizable() ? page.ObjectHeader() : nullptr;
650 #else   // !defined(CPPGC_CAGED_HEAP)
651     std::vector<HeapObjectHeader*> unfinalized_objects;
652     if (header->IsFinalizable()) {
653       unfinalized_objects.push_back(page.ObjectHeader());
654     }
655 #endif  // !defined(CPPGC_CAGED_HEAP)
656     const size_t space_index = page.space().index();
657     DCHECK_GT(states_->size(), space_index);
658     SpaceState& state = (*states_)[space_index];
659     // Avoid directly destroying large pages here as counter updates and
660     // backend access in BasePage::Destroy() are not concurrency safe.
661     state.swept_unfinalized_pages.Push(
662         {&page, std::move(unfinalized_objects), {}, {}, true});
663     return true;
664   }
665 
666   HeapBase& heap_;
667   SpaceStates* states_;
668   Platform* platform_;
669   std::atomic_bool is_completed_{false};
670   const FreeMemoryHandling free_memory_handling_;
671 };
672 
673 // This visitor:
674 // - clears free lists for all spaces;
675 // - moves all Heap pages to local Sweeper's state (SpaceStates).
676 // - ASAN: Poisons all unmarked object payloads.
677 class PrepareForSweepVisitor final
678     : protected HeapVisitor<PrepareForSweepVisitor> {
679   friend class HeapVisitor<PrepareForSweepVisitor>;
680   using CompactableSpaceHandling =
681       Sweeper::SweepingConfig::CompactableSpaceHandling;
682 
683  public:
PrepareForSweepVisitor(SpaceStates * states,CompactableSpaceHandling compactable_space_handling)684   PrepareForSweepVisitor(SpaceStates* states,
685                          CompactableSpaceHandling compactable_space_handling)
686       : states_(states),
687         compactable_space_handling_(compactable_space_handling) {
688     DCHECK_NOT_NULL(states);
689   }
690 
Run(RawHeap & raw_heap)691   void Run(RawHeap& raw_heap) {
692     DCHECK(states_->empty());
693     *states_ = SpaceStates(raw_heap.size());
694     Traverse(raw_heap);
695   }
696 
697  protected:
VisitNormalPageSpace(NormalPageSpace & space)698   bool VisitNormalPageSpace(NormalPageSpace& space) {
699     if ((compactable_space_handling_ == CompactableSpaceHandling::kIgnore) &&
700         space.is_compactable())
701       return true;
702     DCHECK(!space.linear_allocation_buffer().size());
703     space.free_list().Clear();
704 #ifdef V8_USE_ADDRESS_SANITIZER
705     UnmarkedObjectsPoisoner().Traverse(space);
706 #endif  // V8_USE_ADDRESS_SANITIZER
707     ExtractPages(space);
708     return true;
709   }
710 
VisitLargePageSpace(LargePageSpace & space)711   bool VisitLargePageSpace(LargePageSpace& space) {
712 #ifdef V8_USE_ADDRESS_SANITIZER
713     UnmarkedObjectsPoisoner().Traverse(space);
714 #endif  // V8_USE_ADDRESS_SANITIZER
715     ExtractPages(space);
716     return true;
717   }
718 
719  private:
ExtractPages(BaseSpace & space)720   void ExtractPages(BaseSpace& space) {
721     BaseSpace::Pages space_pages = space.RemoveAllPages();
722     (*states_)[space.index()].unswept_pages.Insert(space_pages.begin(),
723                                                    space_pages.end());
724   }
725 
726   SpaceStates* states_;
727   CompactableSpaceHandling compactable_space_handling_;
728 };
729 
730 }  // namespace
731 
732 class Sweeper::SweeperImpl final {
733   using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
734 
735  public:
SweeperImpl(RawHeap & heap,StatsCollector * stats_collector)736   SweeperImpl(RawHeap& heap, StatsCollector* stats_collector)
737       : heap_(heap), stats_collector_(stats_collector) {}
738 
~SweeperImpl()739   ~SweeperImpl() { CancelSweepers(); }
740 
Start(SweepingConfig config,cppgc::Platform * platform)741   void Start(SweepingConfig config, cppgc::Platform* platform) {
742     StatsCollector::EnabledScope stats_scope(stats_collector_,
743                                              StatsCollector::kAtomicSweep);
744     is_in_progress_ = true;
745     platform_ = platform;
746     config_ = config;
747 
748     // Verify bitmap for all spaces regardless of |compactable_space_handling|.
749     ObjectStartBitmapVerifier().Verify(heap_);
750 
751     // If inaccessible memory is touched to check whether it is set up
752     // correctly it cannot be discarded.
753     if (!CanDiscardMemory()) {
754       config_.free_memory_handling = FreeMemoryHandling::kDoNotDiscard;
755     }
756 
757     if (config_.free_memory_handling ==
758         FreeMemoryHandling::kDiscardWherePossible) {
759       // The discarded counter will be recomputed.
760       heap_.heap()->stats_collector()->ResetDiscardedMemory();
761     }
762 
763     PrepareForSweepVisitor(&space_states_, config.compactable_space_handling)
764         .Run(heap_);
765 
766     if (config.sweeping_type == SweepingConfig::SweepingType::kAtomic) {
767       Finish();
768     } else {
769       ScheduleIncrementalSweeping();
770       ScheduleConcurrentSweeping();
771     }
772   }
773 
SweepForAllocationIfRunning(NormalPageSpace * space,size_t size)774   bool SweepForAllocationIfRunning(NormalPageSpace* space, size_t size) {
775     if (!is_in_progress_) return false;
776 
777     // Bail out for recursive sweeping calls. This can happen when finalizers
778     // allocate new memory.
779     if (is_sweeping_on_mutator_thread_) return false;
780 
781     SpaceState& space_state = space_states_[space->index()];
782 
783     // Bail out if there's no pages to be processed for the space at this
784     // moment.
785     if (space_state.swept_unfinalized_pages.IsEmpty() &&
786         space_state.unswept_pages.IsEmpty()) {
787       return false;
788     }
789 
790     StatsCollector::EnabledScope stats_scope(stats_collector_,
791                                              StatsCollector::kIncrementalSweep);
792     StatsCollector::EnabledScope inner_scope(
793         stats_collector_, StatsCollector::kSweepOnAllocation);
794     MutatorThreadSweepingScope sweeping_in_progresss(*this);
795 
796     {
797       // First, process unfinalized pages as finalizing a page is faster than
798       // sweeping.
799       SweepFinalizer finalizer(platform_, config_.free_memory_handling);
800       while (auto page = space_state.swept_unfinalized_pages.Pop()) {
801         finalizer.FinalizePage(&*page);
802         if (size <= finalizer.largest_new_free_list_entry()) return true;
803       }
804     }
805     {
806       // Then, if no matching slot is found in the unfinalized pages, search the
807       // unswept page. This also helps out the concurrent sweeper.
808       MutatorThreadSweeper sweeper(&space_states_, platform_,
809                                    config_.free_memory_handling);
810       while (auto page = space_state.unswept_pages.Pop()) {
811         sweeper.SweepPage(**page);
812         if (size <= sweeper.largest_new_free_list_entry()) return true;
813       }
814     }
815 
816     return false;
817   }
818 
FinishIfRunning()819   void FinishIfRunning() {
820     if (!is_in_progress_) return;
821 
822     // Bail out for recursive sweeping calls. This can happen when finalizers
823     // allocate new memory.
824     if (is_sweeping_on_mutator_thread_) return;
825 
826     {
827       StatsCollector::EnabledScope stats_scope(
828           stats_collector_, StatsCollector::kIncrementalSweep);
829       StatsCollector::EnabledScope inner_scope(stats_collector_,
830                                                StatsCollector::kSweepFinalize);
831       if (concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid() &&
832           concurrent_sweeper_handle_->UpdatePriorityEnabled()) {
833         concurrent_sweeper_handle_->UpdatePriority(
834             cppgc::TaskPriority::kUserBlocking);
835       }
836       Finish();
837     }
838     NotifyDone();
839   }
840 
FinishIfOutOfWork()841   void FinishIfOutOfWork() {
842     if (is_in_progress_ && !is_sweeping_on_mutator_thread_ &&
843         concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid() &&
844         !concurrent_sweeper_handle_->IsActive()) {
845       // At this point we know that the concurrent sweeping task has run
846       // out-of-work: all pages are swept. The main thread still needs to finish
847       // sweeping though.
848       DCHECK(std::all_of(space_states_.begin(), space_states_.end(),
849                          [](const SpaceState& state) {
850                            return state.unswept_pages.IsEmpty();
851                          }));
852       FinishIfRunning();
853     }
854   }
855 
Finish()856   void Finish() {
857     DCHECK(is_in_progress_);
858 
859     MutatorThreadSweepingScope sweeping_in_progress(*this);
860 
861     // First, call finalizers on the mutator thread.
862     SweepFinalizer finalizer(platform_, config_.free_memory_handling);
863     finalizer.FinalizeHeap(&space_states_);
864 
865     // Then, help out the concurrent thread.
866     MutatorThreadSweeper sweeper(&space_states_, platform_,
867                                  config_.free_memory_handling);
868     sweeper.Sweep();
869 
870     FinalizeSweep();
871   }
872 
FinalizeSweep()873   void FinalizeSweep() {
874     // Synchronize with the concurrent sweeper and call remaining finalizers.
875     SynchronizeAndFinalizeConcurrentSweeping();
876 
877     // Clear space taken up by sweeper metadata.
878     space_states_.clear();
879 
880     platform_ = nullptr;
881     is_in_progress_ = false;
882     notify_done_pending_ = true;
883   }
884 
NotifyDone()885   void NotifyDone() {
886     DCHECK(!is_in_progress_);
887     DCHECK(notify_done_pending_);
888     notify_done_pending_ = false;
889     stats_collector_->NotifySweepingCompleted();
890   }
891 
NotifyDoneIfNeeded()892   void NotifyDoneIfNeeded() {
893     if (!notify_done_pending_) return;
894     NotifyDone();
895   }
896 
WaitForConcurrentSweepingForTesting()897   void WaitForConcurrentSweepingForTesting() {
898     if (concurrent_sweeper_handle_) concurrent_sweeper_handle_->Join();
899   }
900 
IsSweepingOnMutatorThread() const901   bool IsSweepingOnMutatorThread() const {
902     return is_sweeping_on_mutator_thread_;
903   }
904 
IsSweepingInProgress() const905   bool IsSweepingInProgress() const { return is_in_progress_; }
906 
PerformSweepOnMutatorThread(double deadline_in_seconds,StatsCollector::ScopeId internal_scope_id)907   bool PerformSweepOnMutatorThread(double deadline_in_seconds,
908                                    StatsCollector::ScopeId internal_scope_id) {
909     if (!is_in_progress_) return true;
910 
911     MutatorThreadSweepingScope sweeping_in_progresss(*this);
912 
913     bool sweep_complete;
914     {
915       StatsCollector::EnabledScope stats_scope(
916           stats_collector_, StatsCollector::kIncrementalSweep);
917 
918       MutatorThreadSweeper sweeper(&space_states_, platform_,
919                                    config_.free_memory_handling);
920       {
921         StatsCollector::EnabledScope inner_stats_scope(
922             stats_collector_, internal_scope_id, "deltaInSeconds",
923             deadline_in_seconds - platform_->MonotonicallyIncreasingTime());
924 
925         sweep_complete = sweeper.SweepWithDeadline(deadline_in_seconds);
926       }
927       if (sweep_complete) {
928         FinalizeSweep();
929       }
930     }
931     if (sweep_complete) NotifyDone();
932     return sweep_complete;
933   }
934 
935  private:
936   class MutatorThreadSweepingScope final {
937    public:
MutatorThreadSweepingScope(SweeperImpl & sweeper)938     explicit MutatorThreadSweepingScope(SweeperImpl& sweeper)
939         : sweeper_(sweeper) {
940       DCHECK(!sweeper_.is_sweeping_on_mutator_thread_);
941       sweeper_.is_sweeping_on_mutator_thread_ = true;
942     }
~MutatorThreadSweepingScope()943     ~MutatorThreadSweepingScope() {
944       sweeper_.is_sweeping_on_mutator_thread_ = false;
945     }
946 
947     MutatorThreadSweepingScope(const MutatorThreadSweepingScope&) = delete;
948     MutatorThreadSweepingScope& operator=(const MutatorThreadSweepingScope&) =
949         delete;
950 
951    private:
952     SweeperImpl& sweeper_;
953   };
954 
955   class IncrementalSweepTask : public cppgc::IdleTask {
956    public:
957     using Handle = SingleThreadedHandle;
958 
IncrementalSweepTask(SweeperImpl * sweeper)959     explicit IncrementalSweepTask(SweeperImpl* sweeper)
960         : sweeper_(sweeper), handle_(Handle::NonEmptyTag{}) {}
961 
Post(SweeperImpl * sweeper,cppgc::TaskRunner * runner)962     static Handle Post(SweeperImpl* sweeper, cppgc::TaskRunner* runner) {
963       auto task = std::make_unique<IncrementalSweepTask>(sweeper);
964       auto handle = task->GetHandle();
965       runner->PostIdleTask(std::move(task));
966       return handle;
967     }
968 
969    private:
Run(double deadline_in_seconds)970     void Run(double deadline_in_seconds) override {
971       if (handle_.IsCanceled()) return;
972 
973       if (!sweeper_->PerformSweepOnMutatorThread(
974               deadline_in_seconds, StatsCollector::kSweepIdleStep)) {
975         sweeper_->ScheduleIncrementalSweeping();
976       }
977     }
978 
GetHandle() const979     Handle GetHandle() const { return handle_; }
980 
981     SweeperImpl* sweeper_;
982     // TODO(chromium:1056170): Change to CancelableTask.
983     Handle handle_;
984   };
985 
ScheduleIncrementalSweeping()986   void ScheduleIncrementalSweeping() {
987     DCHECK(platform_);
988     auto runner = platform_->GetForegroundTaskRunner();
989     if (!runner || !runner->IdleTasksEnabled()) return;
990 
991     incremental_sweeper_handle_ =
992         IncrementalSweepTask::Post(this, runner.get());
993   }
994 
ScheduleConcurrentSweeping()995   void ScheduleConcurrentSweeping() {
996     DCHECK(platform_);
997 
998     if (config_.sweeping_type !=
999         SweepingConfig::SweepingType::kIncrementalAndConcurrent)
1000       return;
1001 
1002     concurrent_sweeper_handle_ =
1003         platform_->PostJob(cppgc::TaskPriority::kUserVisible,
1004                            std::make_unique<ConcurrentSweepTask>(
1005                                *heap_.heap(), &space_states_, platform_,
1006                                config_.free_memory_handling));
1007   }
1008 
CancelSweepers()1009   void CancelSweepers() {
1010     if (incremental_sweeper_handle_) incremental_sweeper_handle_.Cancel();
1011     if (concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid())
1012       concurrent_sweeper_handle_->Cancel();
1013   }
1014 
SynchronizeAndFinalizeConcurrentSweeping()1015   void SynchronizeAndFinalizeConcurrentSweeping() {
1016     CancelSweepers();
1017 
1018     SweepFinalizer finalizer(platform_, config_.free_memory_handling);
1019     finalizer.FinalizeHeap(&space_states_);
1020   }
1021 
1022   RawHeap& heap_;
1023   StatsCollector* const stats_collector_;
1024   SpaceStates space_states_;
1025   cppgc::Platform* platform_;
1026   SweepingConfig config_;
1027   IncrementalSweepTask::Handle incremental_sweeper_handle_;
1028   std::unique_ptr<cppgc::JobHandle> concurrent_sweeper_handle_;
1029   // Indicates whether the sweeping phase is in progress.
1030   bool is_in_progress_ = false;
1031   bool notify_done_pending_ = false;
1032   // Indicates whether whether the sweeper (or its finalization) is currently
1033   // running on the main thread.
1034   bool is_sweeping_on_mutator_thread_ = false;
1035 };
1036 
Sweeper(HeapBase & heap)1037 Sweeper::Sweeper(HeapBase& heap)
1038     : heap_(heap),
1039       impl_(std::make_unique<SweeperImpl>(heap.raw_heap(),
1040                                           heap.stats_collector())) {}
1041 
1042 Sweeper::~Sweeper() = default;
1043 
Start(SweepingConfig config)1044 void Sweeper::Start(SweepingConfig config) {
1045   impl_->Start(config, heap_.platform());
1046 }
FinishIfRunning()1047 void Sweeper::FinishIfRunning() { impl_->FinishIfRunning(); }
FinishIfOutOfWork()1048 void Sweeper::FinishIfOutOfWork() { impl_->FinishIfOutOfWork(); }
WaitForConcurrentSweepingForTesting()1049 void Sweeper::WaitForConcurrentSweepingForTesting() {
1050   impl_->WaitForConcurrentSweepingForTesting();
1051 }
NotifyDoneIfNeeded()1052 void Sweeper::NotifyDoneIfNeeded() { impl_->NotifyDoneIfNeeded(); }
SweepForAllocationIfRunning(NormalPageSpace * space,size_t size)1053 bool Sweeper::SweepForAllocationIfRunning(NormalPageSpace* space, size_t size) {
1054   return impl_->SweepForAllocationIfRunning(space, size);
1055 }
IsSweepingOnMutatorThread() const1056 bool Sweeper::IsSweepingOnMutatorThread() const {
1057   return impl_->IsSweepingOnMutatorThread();
1058 }
1059 
IsSweepingInProgress() const1060 bool Sweeper::IsSweepingInProgress() const {
1061   return impl_->IsSweepingInProgress();
1062 }
1063 
PerformSweepOnMutatorThread(double deadline_in_seconds)1064 bool Sweeper::PerformSweepOnMutatorThread(double deadline_in_seconds) {
1065   return impl_->PerformSweepOnMutatorThread(deadline_in_seconds,
1066                                             StatsCollector::kSweepInTask);
1067 }
1068 
1069 }  // namespace internal
1070 }  // namespace cppgc
1071