• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "partition_alloc/starscan/pcscan_internal.h"
6 
7 #include <algorithm>
8 #include <array>
9 #include <chrono>
10 #include <condition_variable>
11 #include <cstdint>
12 #include <mutex>
13 #include <numeric>
14 #include <set>
15 #include <thread>
16 #include <type_traits>
17 #include <unordered_map>
18 #include <vector>
19 
20 #include "build/build_config.h"
21 #include "partition_alloc/address_pool_manager.h"
22 #include "partition_alloc/allocation_guard.h"
23 #include "partition_alloc/page_allocator.h"
24 #include "partition_alloc/page_allocator_constants.h"
25 #include "partition_alloc/partition_address_space.h"
26 #include "partition_alloc/partition_alloc.h"
27 #include "partition_alloc/partition_alloc_base/bits.h"
28 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
29 #include "partition_alloc/partition_alloc_base/cpu.h"
30 #include "partition_alloc/partition_alloc_base/debug/alias.h"
31 #include "partition_alloc/partition_alloc_base/immediate_crash.h"
32 #include "partition_alloc/partition_alloc_base/memory/ref_counted.h"
33 #include "partition_alloc/partition_alloc_base/memory/scoped_refptr.h"
34 #include "partition_alloc/partition_alloc_base/no_destructor.h"
35 #include "partition_alloc/partition_alloc_base/threading/platform_thread.h"
36 #include "partition_alloc/partition_alloc_base/time/time.h"
37 #include "partition_alloc/partition_alloc_buildflags.h"
38 #include "partition_alloc/partition_alloc_check.h"
39 #include "partition_alloc/partition_alloc_config.h"
40 #include "partition_alloc/partition_alloc_constants.h"
41 #include "partition_alloc/partition_page.h"
42 #include "partition_alloc/reservation_offset_table.h"
43 #include "partition_alloc/starscan/metadata_allocator.h"
44 #include "partition_alloc/starscan/pcscan_scheduling.h"
45 #include "partition_alloc/starscan/raceful_worklist.h"
46 #include "partition_alloc/starscan/scan_loop.h"
47 #include "partition_alloc/starscan/snapshot.h"
48 #include "partition_alloc/starscan/stack/stack.h"
49 #include "partition_alloc/starscan/stats_collector.h"
50 #include "partition_alloc/starscan/stats_reporter.h"
51 #include "partition_alloc/tagging.h"
52 #include "partition_alloc/thread_cache.h"
53 
54 #if !BUILDFLAG(HAS_64_BIT_POINTERS)
55 #include "partition_alloc/address_pool_manager_bitmap.h"
56 #endif
57 
58 #if PA_CONFIG(STARSCAN_NOINLINE_SCAN_FUNCTIONS)
59 #define PA_SCAN_INLINE PA_NOINLINE
60 #else
61 #define PA_SCAN_INLINE PA_ALWAYS_INLINE
62 #endif
63 
64 namespace partition_alloc::internal {
65 
DoubleFreeAttempt()66 [[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void DoubleFreeAttempt() {
67   PA_NO_CODE_FOLDING();
68   PA_IMMEDIATE_CRASH();
69 }
70 
71 namespace {
72 
73 #if PA_CONFIG(HAS_ALLOCATION_GUARD)
74 // Currently, check reentracy only on Linux. On Android TLS is emulated by the
75 // runtime lib, which can allocate and therefore cause reentrancy.
76 struct ReentrantScannerGuard final {
77  public:
ReentrantScannerGuardpartition_alloc::internal::__anoneefead620111::ReentrantScannerGuard78   ReentrantScannerGuard() {
79     PA_CHECK(!guard_);
80     guard_ = true;
81   }
~ReentrantScannerGuardpartition_alloc::internal::__anoneefead620111::ReentrantScannerGuard82   ~ReentrantScannerGuard() { guard_ = false; }
83 
84  private:
85   // Since this variable has hidden visibility (not referenced by other DSOs),
86   // assume that thread_local works on all supported architectures.
87   static thread_local size_t guard_;
88 };
89 thread_local size_t ReentrantScannerGuard::guard_ = 0;
90 #else
91 struct [[maybe_unused]] ReentrantScannerGuard final {};
92 #endif  // PA_CONFIG(HAS_ALLOCATION_GUARD)
93 
94 // Scope that disables MTE checks. Only used inside scanning to avoid the race:
95 // a slot tag is changed by the mutator, while the scanner sees an old value.
96 struct DisableMTEScope final {
DisableMTEScopepartition_alloc::internal::__anoneefead620111::DisableMTEScope97   DisableMTEScope() {
98     ::partition_alloc::ChangeMemoryTaggingModeForCurrentThread(
99         ::partition_alloc::TagViolationReportingMode::kDisabled);
100   }
~DisableMTEScopepartition_alloc::internal::__anoneefead620111::DisableMTEScope101   ~DisableMTEScope() {
102     ::partition_alloc::ChangeMemoryTaggingModeForCurrentThread(
103         parent_tagging_mode);
104   }
105 
106  private:
107   ::partition_alloc::TagViolationReportingMode parent_tagging_mode =
108       ::partition_alloc::internal::GetMemoryTaggingModeForCurrentThread();
109 };
110 
111 #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
112 // Bytemap that represent regions (cards) that contain quarantined slots.
113 // A single PCScan cycle consists of the following steps:
114 // 1) clearing (memset quarantine + marking cards that contain quarantine);
115 // 2) scanning;
116 // 3) sweeping (freeing + unmarking cards that contain freed slots).
117 // Marking cards on step 1) ensures that the card table stays in the consistent
118 // state while scanning. Unmarking on the step 3) ensures that unmarking
119 // actually happens (and we don't hit too many false positives).
120 //
121 // The code here relies on the fact that |address| is in the regular pool and
122 // that the card table (this object) is allocated at the very beginning of that
123 // pool.
124 class QuarantineCardTable final {
125  public:
126   // Avoid the load of the base of the regular pool.
GetFrom(uintptr_t address)127   PA_ALWAYS_INLINE static QuarantineCardTable& GetFrom(uintptr_t address) {
128     PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(address));
129     return *reinterpret_cast<QuarantineCardTable*>(
130         address & PartitionAddressSpace::RegularPoolBaseMask());
131   }
132 
Quarantine(uintptr_t begin,size_t size)133   PA_ALWAYS_INLINE void Quarantine(uintptr_t begin, size_t size) {
134     return SetImpl(begin, size, true);
135   }
136 
Unquarantine(uintptr_t begin,size_t size)137   PA_ALWAYS_INLINE void Unquarantine(uintptr_t begin, size_t size) {
138     return SetImpl(begin, size, false);
139   }
140 
141   // Returns whether the card to which |address| points to contains quarantined
142   // slots. May return false positives for but should never return false
143   // negatives, as otherwise this breaks security.
IsQuarantined(uintptr_t address) const144   PA_ALWAYS_INLINE bool IsQuarantined(uintptr_t address) const {
145     const size_t byte = Byte(address);
146     PA_SCAN_DCHECK(byte < bytes_.size());
147     return bytes_[byte];
148   }
149 
150  private:
151   static constexpr size_t kCardSize = kPoolMaxSize / kSuperPageSize;
152   static constexpr size_t kBytes = kPoolMaxSize / kCardSize;
153 
154   QuarantineCardTable() = default;
155 
Byte(uintptr_t address)156   PA_ALWAYS_INLINE static size_t Byte(uintptr_t address) {
157     return (address & ~PartitionAddressSpace::RegularPoolBaseMask()) /
158            kCardSize;
159   }
160 
SetImpl(uintptr_t begin,size_t size,bool value)161   PA_ALWAYS_INLINE void SetImpl(uintptr_t begin, size_t size, bool value) {
162     const size_t byte = Byte(begin);
163     const size_t need_bytes = (size + (kCardSize - 1)) / kCardSize;
164     PA_SCAN_DCHECK(bytes_.size() >= byte + need_bytes);
165     PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(begin));
166     for (size_t i = byte; i < byte + need_bytes; ++i) {
167       bytes_[i] = value;
168     }
169   }
170 
171   std::array<bool, kBytes> bytes_;
172 };
173 static_assert(kSuperPageSize >= sizeof(QuarantineCardTable),
174               "Card table size must be less than kSuperPageSize, since this is "
175               "what is committed");
176 #endif  // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
177 
178 template <typename T>
179 using MetadataVector = std::vector<T, MetadataAllocator<T>>;
180 template <typename T>
181 using MetadataSet = std::set<T, std::less<>, MetadataAllocator<T>>;
182 template <typename K, typename V>
183 using MetadataHashMap =
184     std::unordered_map<K,
185                        V,
186                        std::hash<K>,
187                        std::equal_to<>,
188                        MetadataAllocator<std::pair<const K, V>>>;
189 
190 struct GetSlotStartResult final {
is_foundpartition_alloc::internal::__anoneefead620111::GetSlotStartResult191   PA_ALWAYS_INLINE bool is_found() const {
192     PA_SCAN_DCHECK(!slot_start || slot_size);
193     return slot_start;
194   }
195 
196   uintptr_t slot_start = 0;
197   size_t slot_size = 0;
198 };
199 
200 // Returns the start of a slot, or 0 if |maybe_inner_address| is not inside of
201 // an existing slot span. The function may return a non-0 address even inside a
202 // decommitted or free slot span, it's the caller responsibility to check if
203 // memory is actually allocated.
204 //
205 // |maybe_inner_address| must be within a normal-bucket super page and can also
206 // point to guard pages or slot-span metadata.
207 PA_SCAN_INLINE GetSlotStartResult
GetSlotStartInSuperPage(uintptr_t maybe_inner_address)208 GetSlotStartInSuperPage(uintptr_t maybe_inner_address) {
209   PA_SCAN_DCHECK(IsManagedByNormalBuckets(maybe_inner_address));
210   // Don't use SlotSpanMetadata/PartitionPage::FromAddr() and family, because
211   // they expect an address within a super page payload area, which we don't
212   // know yet if |maybe_inner_address| is.
213   const uintptr_t super_page = maybe_inner_address & kSuperPageBaseMask;
214 
215   const uintptr_t partition_page_index =
216       (maybe_inner_address & kSuperPageOffsetMask) >> PartitionPageShift();
217   auto* page =
218       PartitionSuperPageToMetadataArea(super_page) + partition_page_index;
219   // Check if page is valid. The check also works for the guard pages and the
220   // metadata page.
221   if (!page->is_valid) {
222     return {};
223   }
224 
225   page -= page->slot_span_metadata_offset;
226   PA_SCAN_DCHECK(page->is_valid);
227   PA_SCAN_DCHECK(!page->slot_span_metadata_offset);
228   auto* slot_span = &page->slot_span_metadata;
229   // Check if the slot span is actually used and valid.
230   if (!slot_span->bucket) {
231     return {};
232   }
233 #if PA_SCAN_DCHECK_IS_ON()
234   DCheckIsValidSlotSpan(slot_span);
235 #endif
236   const uintptr_t slot_span_start =
237       SlotSpanMetadata::ToSlotSpanStart(slot_span);
238   const ptrdiff_t ptr_offset = maybe_inner_address - slot_span_start;
239   PA_SCAN_DCHECK(0 <= ptr_offset &&
240                  ptr_offset < static_cast<ptrdiff_t>(
241                                   slot_span->bucket->get_pages_per_slot_span() *
242                                   PartitionPageSize()));
243   // Slot span size in bytes is not necessarily multiple of partition page.
244   // Don't check if the pointer points outside of usable area, since checking
245   // the quarantine bit will anyway return false in this case.
246   const size_t slot_size = slot_span->bucket->slot_size;
247   const size_t slot_number = slot_span->bucket->GetSlotNumber(ptr_offset);
248   const uintptr_t slot_start = slot_span_start + (slot_number * slot_size);
249   PA_SCAN_DCHECK(slot_start <= maybe_inner_address &&
250                  maybe_inner_address < slot_start + slot_size);
251   return {.slot_start = slot_start, .slot_size = slot_size};
252 }
253 
254 #if PA_SCAN_DCHECK_IS_ON()
IsQuarantineEmptyOnSuperPage(uintptr_t super_page)255 bool IsQuarantineEmptyOnSuperPage(uintptr_t super_page) {
256   auto* bitmap = SuperPageStateBitmap(super_page);
257   size_t visited = 0;
258   bitmap->IterateQuarantined([&visited](auto) { ++visited; });
259   return !visited;
260 }
261 #endif
262 
DetectSimdSupport()263 SimdSupport DetectSimdSupport() {
264 #if PA_CONFIG(STARSCAN_NEON_SUPPORTED)
265   return SimdSupport::kNEON;
266 #else
267   const base::CPU& cpu = base::CPU::GetInstanceNoAllocation();
268   if (cpu.has_avx2()) {
269     return SimdSupport::kAVX2;
270   }
271   if (cpu.has_sse41()) {
272     return SimdSupport::kSSE41;
273   }
274   return SimdSupport::kUnvectorized;
275 #endif  // PA_CONFIG(STARSCAN_NEON_SUPPORTED)
276 }
277 
CommitCardTable()278 void CommitCardTable() {
279 #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
280   RecommitSystemPages(PartitionAddressSpace::RegularPoolBase(),
281                       sizeof(QuarantineCardTable),
282                       PageAccessibilityConfiguration(
283                           PageAccessibilityConfiguration::kReadWrite),
284                       PageAccessibilityDisposition::kRequireUpdate);
285 #endif
286 }
287 
288 template <class Function>
IterateNonEmptySlotSpans(uintptr_t super_page,size_t nonempty_slot_spans,Function function)289 void IterateNonEmptySlotSpans(uintptr_t super_page,
290                               size_t nonempty_slot_spans,
291                               Function function) {
292   PA_SCAN_DCHECK(!(super_page % kSuperPageAlignment));
293   PA_SCAN_DCHECK(nonempty_slot_spans);
294 
295   size_t slot_spans_to_visit = nonempty_slot_spans;
296 #if PA_SCAN_DCHECK_IS_ON()
297   size_t visited = 0;
298 #endif
299 
300   IterateSlotSpans(super_page, true /*with_quarantine*/,
301                    [&function, &slot_spans_to_visit
302 #if PA_SCAN_DCHECK_IS_ON()
303                     ,
304                     &visited
305 #endif
306   ](SlotSpanMetadata* slot_span) {
307                      if (slot_span->is_empty() || slot_span->is_decommitted()) {
308                        // Skip empty/decommitted slot spans.
309                        return false;
310                      }
311                      function(slot_span);
312                      --slot_spans_to_visit;
313 #if PA_SCAN_DCHECK_IS_ON()
314                      // In debug builds, scan all the slot spans to check that
315                      // number of visited slot spans is equal to the number of
316                      // nonempty_slot_spans.
317                      ++visited;
318                      return false;
319 #else
320         return slot_spans_to_visit == 0;
321 #endif
322                    });
323 #if PA_SCAN_DCHECK_IS_ON()
324   // Check that exactly all non-empty slot spans have been visited.
325   PA_DCHECK(nonempty_slot_spans == visited);
326 #endif
327 }
328 
329 // SuperPageSnapshot is used to record all slot spans that contain live slots.
330 // The class avoids dynamic allocations and is designed to be instantiated on
331 // stack. To avoid stack overflow, internal data structures are kept packed.
332 class SuperPageSnapshot final {
333   // The following constants are used to define a conservative estimate for
334   // maximum number of slot spans in a super page.
335   //
336   // For systems with runtime-defined page size, assume partition page size is
337   // at least 16kiB.
338   static constexpr size_t kMinPartitionPageSize =
339       __builtin_constant_p(PartitionPageSize()) ? PartitionPageSize() : 1 << 14;
340   static constexpr size_t kStateBitmapMinReservedSize =
341       __builtin_constant_p(ReservedStateBitmapSize())
342           ? ReservedStateBitmapSize()
343           : partition_alloc::internal::base::bits::AlignUp(
344                 sizeof(AllocationStateMap),
345                 kMinPartitionPageSize);
346   // Take into account guard partition page at the end of super-page.
347   static constexpr size_t kGuardPagesSize = 2 * kMinPartitionPageSize;
348 
349   static constexpr size_t kPayloadMaxSize =
350       kSuperPageSize - kStateBitmapMinReservedSize - kGuardPagesSize;
351   static_assert(kPayloadMaxSize % kMinPartitionPageSize == 0,
352                 "kPayloadMaxSize must be multiple of kMinPartitionPageSize");
353 
354   static constexpr size_t kMaxSlotSpansInSuperPage =
355       kPayloadMaxSize / kMinPartitionPageSize;
356 
357  public:
358   struct ScanArea {
359     // Use packed integer types to save stack space. In theory, kAlignment could
360     // be used instead of words, but it doesn't seem to bring savings.
361     uint32_t offset_within_page_in_words;
362     uint32_t size_in_words;
363     uint32_t slot_size_in_words;
364   };
365 
366   class ScanAreas : private std::array<ScanArea, kMaxSlotSpansInSuperPage> {
367     using Base = std::array<ScanArea, kMaxSlotSpansInSuperPage>;
368 
369    public:
370     using iterator = Base::iterator;
371     using const_iterator = Base::const_iterator;
372     using Base::operator[];
373 
begin()374     iterator begin() { return Base::begin(); }
begin() const375     const_iterator begin() const { return Base::begin(); }
376 
end()377     iterator end() { return std::next(begin(), size_); }
end() const378     const_iterator end() const { return std::next(begin(), size_); }
379 
set_size(size_t new_size)380     void set_size(size_t new_size) { size_ = new_size; }
381 
382    private:
383     size_t size_;
384   };
385 
386   static_assert(std::is_trivially_default_constructible_v<ScanAreas>,
387                 "ScanAreas must be trivially default constructible to ensure "
388                 "that no memsets are generated by the compiler as a "
389                 "result of value-initialization (or zero-initialization)");
390 
391   void* operator new(size_t) = delete;
392   void operator delete(void*) = delete;
393 
394   // Creates snapshot for a single super page. In theory, we could simply
395   // iterate over slot spans without taking a snapshot. However, we do this to
396   // minimize the mutex locking time. The mutex must be acquired to make sure
397   // that no mutator is concurrently changing any of the slot spans.
398   explicit SuperPageSnapshot(uintptr_t super_page_base);
399 
scan_areas() const400   const ScanAreas& scan_areas() const { return scan_areas_; }
401 
402  private:
403   ScanAreas scan_areas_;
404 };
405 
406 static_assert(
407     sizeof(SuperPageSnapshot) <= 2048,
408     "SuperPageSnapshot must stay relatively small to be allocated on stack");
409 
SuperPageSnapshot(uintptr_t super_page)410 SuperPageSnapshot::SuperPageSnapshot(uintptr_t super_page) {
411   using SlotSpan = SlotSpanMetadata;
412 
413   auto* extent_entry = PartitionSuperPageToExtent(super_page);
414 
415   ::partition_alloc::internal::ScopedGuard lock(
416       ::partition_alloc::internal::PartitionRootLock(extent_entry->root));
417 
418   const size_t nonempty_slot_spans =
419       extent_entry->number_of_nonempty_slot_spans;
420   if (!nonempty_slot_spans) {
421 #if PA_SCAN_DCHECK_IS_ON()
422     // Check that quarantine bitmap is empty for super-pages that contain
423     // only empty/decommitted slot-spans.
424     PA_CHECK(IsQuarantineEmptyOnSuperPage(super_page));
425 #endif
426     scan_areas_.set_size(0);
427     return;
428   }
429 
430   size_t current = 0;
431 
432   IterateNonEmptySlotSpans(
433       super_page, nonempty_slot_spans, [this, &current](SlotSpan* slot_span) {
434         const uintptr_t payload_begin = SlotSpan::ToSlotSpanStart(slot_span);
435         // For single-slot slot-spans, scan only utilized slot part.
436         const size_t provisioned_size =
437             PA_UNLIKELY(slot_span->CanStoreRawSize())
438                 ? slot_span->GetRawSize()
439                 : slot_span->GetProvisionedSize();
440         // Free & decommitted slot spans are skipped.
441         PA_SCAN_DCHECK(provisioned_size > 0);
442         const uintptr_t payload_end = payload_begin + provisioned_size;
443         auto& area = scan_areas_[current];
444 
445         const size_t offset_in_words =
446             (payload_begin & kSuperPageOffsetMask) / sizeof(uintptr_t);
447         const size_t size_in_words =
448             (payload_end - payload_begin) / sizeof(uintptr_t);
449         const size_t slot_size_in_words =
450             slot_span->bucket->slot_size / sizeof(uintptr_t);
451 
452 #if PA_SCAN_DCHECK_IS_ON()
453         PA_DCHECK(offset_in_words <=
454                   std::numeric_limits<
455                       decltype(area.offset_within_page_in_words)>::max());
456         PA_DCHECK(size_in_words <=
457                   std::numeric_limits<decltype(area.size_in_words)>::max());
458         PA_DCHECK(
459             slot_size_in_words <=
460             std::numeric_limits<decltype(area.slot_size_in_words)>::max());
461 #endif
462 
463         area.offset_within_page_in_words = offset_in_words;
464         area.size_in_words = size_in_words;
465         area.slot_size_in_words = slot_size_in_words;
466 
467         ++current;
468       });
469 
470   PA_SCAN_DCHECK(kMaxSlotSpansInSuperPage >= current);
471   scan_areas_.set_size(current);
472 }
473 
474 }  // namespace
475 
476 class PCScanScanLoop;
477 
478 // This class is responsible for performing the entire PCScan task.
479 // TODO(bikineev): Move PCScan algorithm out of PCScanTask.
480 class PCScanTask final : public base::RefCountedThreadSafe<PCScanTask>,
481                          public AllocatedOnPCScanMetadataPartition {
482  public:
483   // Creates and initializes a PCScan state.
484   PCScanTask(PCScan& pcscan, size_t quarantine_last_size);
485 
486   PCScanTask(PCScanTask&&) noexcept = delete;
487   PCScanTask& operator=(PCScanTask&&) noexcept = delete;
488 
489   // Execute PCScan from mutator inside safepoint.
490   void RunFromMutator();
491 
492   // Execute PCScan from the scanner thread. Must be called only once from the
493   // scanner thread.
494   void RunFromScanner();
495 
scheduler() const496   PCScanScheduler& scheduler() const { return pcscan_.scheduler(); }
497 
498  private:
499   class StackVisitor;
500   friend class PCScanScanLoop;
501 
502   using Root = PCScan::Root;
503   using SlotSpan = SlotSpanMetadata;
504 
505   // This is used:
506   // - to synchronize all scanning threads (mutators and the scanner);
507   // - for the scanner, to transition through the state machine
508   //   (kScheduled -> kScanning (ctor) -> kSweepingAndFinishing (dtor).
509   template <Context context>
510   class SyncScope final {
511    public:
SyncScope(PCScanTask & task)512     explicit SyncScope(PCScanTask& task) : task_(task) {
513       task_.number_of_scanning_threads_.fetch_add(1, std::memory_order_relaxed);
514       if (context == Context::kScanner) {
515         task_.pcscan_.state_.store(PCScan::State::kScanning,
516                                    std::memory_order_relaxed);
517         task_.pcscan_.SetJoinableIfSafepointEnabled(true);
518       }
519     }
~SyncScope()520     ~SyncScope() {
521       // First, notify the scanning thread that this thread is done.
522       NotifyThreads();
523       if (context == Context::kScanner) {
524         // The scanner thread must wait here until all safepoints leave.
525         // Otherwise, sweeping may free a page that can later be accessed by a
526         // descheduled mutator.
527         WaitForOtherThreads();
528         task_.pcscan_.state_.store(PCScan::State::kSweepingAndFinishing,
529                                    std::memory_order_relaxed);
530       }
531     }
532 
533    private:
NotifyThreads()534     void NotifyThreads() {
535       {
536         // The lock is required as otherwise there is a race between
537         // fetch_sub/notify in the mutator and checking
538         // number_of_scanning_threads_/waiting in the scanner.
539         std::lock_guard<std::mutex> lock(task_.mutex_);
540         task_.number_of_scanning_threads_.fetch_sub(1,
541                                                     std::memory_order_relaxed);
542         {
543           // Notify that scan is done and there is no need to enter
544           // the safepoint. This also helps a mutator to avoid repeating
545           // entering. Since the scanner thread waits for all threads to finish,
546           // there is no ABA problem here.
547           task_.pcscan_.SetJoinableIfSafepointEnabled(false);
548         }
549       }
550       task_.condvar_.notify_all();
551     }
552 
WaitForOtherThreads()553     void WaitForOtherThreads() {
554       std::unique_lock<std::mutex> lock(task_.mutex_);
555       task_.condvar_.wait(lock, [this] {
556         return !task_.number_of_scanning_threads_.load(
557             std::memory_order_relaxed);
558       });
559     }
560 
561     PCScanTask& task_;
562   };
563 
564   friend class base::RefCountedThreadSafe<PCScanTask>;
565   ~PCScanTask() = default;
566 
567   PA_SCAN_INLINE AllocationStateMap* TryFindScannerBitmapForPointer(
568       uintptr_t maybe_ptr) const;
569 
570   // Lookup and marking functions. Return size of the slot if marked, or zero
571   // otherwise.
572   PA_SCAN_INLINE size_t TryMarkSlotInNormalBuckets(uintptr_t maybe_ptr) const;
573 
574   // Scans stack, only called from safepoints.
575   void ScanStack();
576 
577   // Scan individual areas.
578   void ScanNormalArea(PCScanInternal& pcscan,
579                       PCScanScanLoop& scan_loop,
580                       uintptr_t begin,
581                       uintptr_t end);
582   void ScanLargeArea(PCScanInternal& pcscan,
583                      PCScanScanLoop& scan_loop,
584                      uintptr_t begin,
585                      uintptr_t end,
586                      size_t slot_size);
587 
588   // Scans all registered partitions and marks reachable quarantined slots.
589   void ScanPartitions();
590 
591   // Clear quarantined slots and prepare card table for fast lookup
592   void ClearQuarantinedSlotsAndPrepareCardTable();
593 
594   // Unprotect all slot spans from all partitions.
595   void UnprotectPartitions();
596 
597   // Sweeps (frees) unreachable quarantined entries.
598   void SweepQuarantine();
599 
600   // Finishes the scanner (updates limits, UMA, etc).
601   void FinishScanner();
602 
603   // Cache the pcscan epoch to avoid the compiler loading the atomic
604   // QuarantineData::epoch_ on each access.
605   const size_t pcscan_epoch_;
606   std::unique_ptr<StarScanSnapshot> snapshot_;
607   StatsCollector stats_;
608   // Mutex and codvar that are used to synchronize scanning threads.
609   std::mutex mutex_;
610   std::condition_variable condvar_;
611   std::atomic<size_t> number_of_scanning_threads_{0u};
612   // We can unprotect only once to reduce context-switches.
613   std::once_flag unprotect_once_flag_;
614   bool immediatelly_free_slots_{false};
615   PCScan& pcscan_;
616 };
617 
TryFindScannerBitmapForPointer(uintptr_t maybe_ptr) const618 PA_SCAN_INLINE AllocationStateMap* PCScanTask::TryFindScannerBitmapForPointer(
619     uintptr_t maybe_ptr) const {
620   PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(maybe_ptr));
621   // First, check if |maybe_ptr| points to a valid super page or a quarantined
622   // card.
623 #if BUILDFLAG(HAS_64_BIT_POINTERS)
624 #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
625   // Check if |maybe_ptr| points to a quarantined card.
626   if (PA_LIKELY(
627           !QuarantineCardTable::GetFrom(maybe_ptr).IsQuarantined(maybe_ptr))) {
628     return nullptr;
629   }
630 #else   // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
631   // Without the card table, use the reservation offset table to check if
632   // |maybe_ptr| points to a valid super-page. It's not as precise (meaning that
633   // we may have hit the slow path more frequently), but reduces the memory
634   // overhead.  Since we are certain here, that |maybe_ptr| refers to the
635   // regular pool, it's okay to use non-checking version of
636   // ReservationOffsetPointer().
637   const uintptr_t offset =
638       maybe_ptr & ~PartitionAddressSpace::RegularPoolBaseMask();
639   if (PA_LIKELY(*ReservationOffsetPointer(kRegularPoolHandle, offset) !=
640                 kOffsetTagNormalBuckets)) {
641     return nullptr;
642   }
643 #endif  // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
644 #else   // BUILDFLAG(HAS_64_BIT_POINTERS)
645   if (PA_LIKELY(!IsManagedByPartitionAllocRegularPool(maybe_ptr))) {
646     return nullptr;
647   }
648 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
649 
650   // We are certain here that |maybe_ptr| points to an allocated super-page.
651   return StateBitmapFromAddr(maybe_ptr);
652 }
653 
654 // Looks up and marks a potential dangling pointer. Returns the size of the slot
655 // (which is then accounted as quarantined), or zero if no slot is found.
656 // For normal bucket super pages, PCScan uses two quarantine bitmaps, the
657 // mutator and the scanner one. The former is used by mutators when slots are
658 // freed, while the latter is used concurrently by the PCScan thread. The
659 // bitmaps are swapped as soon as PCScan is triggered. Once a dangling pointer
660 // (which points to a slot in the scanner bitmap) is found,
661 // TryMarkSlotInNormalBuckets() marks it again in the bitmap and clears
662 // from the scanner bitmap. This way, when scanning is done, all uncleared
663 // entries in the scanner bitmap correspond to unreachable slots.
664 PA_SCAN_INLINE size_t
TryMarkSlotInNormalBuckets(uintptr_t maybe_ptr) const665 PCScanTask::TryMarkSlotInNormalBuckets(uintptr_t maybe_ptr) const {
666   // Check if |maybe_ptr| points somewhere to the heap.
667   // The caller has to make sure that |maybe_ptr| isn't MTE-tagged.
668   auto* state_map = TryFindScannerBitmapForPointer(maybe_ptr);
669   if (!state_map) {
670     return 0;
671   }
672 
673   // Beyond this point, we know that |maybe_ptr| is a pointer within a
674   // normal-bucket super page.
675   PA_SCAN_DCHECK(IsManagedByNormalBuckets(maybe_ptr));
676 
677 #if !PA_CONFIG(STARSCAN_USE_CARD_TABLE)
678   // Pointer from a normal bucket is always in the first superpage.
679   auto* root = Root::FromAddrInFirstSuperpage(maybe_ptr);
680   // Without the card table, we must make sure that |maybe_ptr| doesn't point to
681   // metadata partition.
682   // TODO(bikineev): To speed things up, consider removing the check and
683   // committing quarantine bitmaps for metadata partition.
684   // TODO(bikineev): Marking an entry in the reservation-table is not a
685   // publishing operation, meaning that the |root| pointer may not be assigned
686   // yet. This can happen as arbitrary pointers may point into a super-page
687   // during its set up. Make sure to check |root| is not null before
688   // dereferencing it.
689   if (PA_UNLIKELY(!root || !root->IsQuarantineEnabled())) {
690     return 0;
691   }
692 #endif  // !PA_CONFIG(STARSCAN_USE_CARD_TABLE)
693 
694   // Check if pointer was in the quarantine bitmap.
695   const GetSlotStartResult slot_start_result =
696       GetSlotStartInSuperPage(maybe_ptr);
697   if (!slot_start_result.is_found()) {
698     return 0;
699   }
700 
701   const uintptr_t slot_start = slot_start_result.slot_start;
702   if (PA_LIKELY(!state_map->IsQuarantined(slot_start))) {
703     return 0;
704   }
705 
706   PA_SCAN_DCHECK((maybe_ptr & kSuperPageBaseMask) ==
707                  (slot_start & kSuperPageBaseMask));
708 
709   if (PA_UNLIKELY(immediatelly_free_slots_)) {
710     return 0;
711   }
712 
713   // Now we are certain that |maybe_ptr| is a dangling pointer. Mark it again in
714   // the mutator bitmap and clear from the scanner bitmap. Note that since
715   // PCScan has exclusive access to the scanner bitmap, we can avoid atomic rmw
716   // operation for it.
717   if (PA_LIKELY(
718           state_map->MarkQuarantinedAsReachable(slot_start, pcscan_epoch_))) {
719     return slot_start_result.slot_size;
720   }
721 
722   return 0;
723 }
724 
ClearQuarantinedSlotsAndPrepareCardTable()725 void PCScanTask::ClearQuarantinedSlotsAndPrepareCardTable() {
726   const PCScan::ClearType clear_type = pcscan_.clear_type_;
727 
728 #if !PA_CONFIG(STARSCAN_USE_CARD_TABLE)
729   if (clear_type == PCScan::ClearType::kEager) {
730     return;
731   }
732 #endif
733 
734   StarScanSnapshot::ClearingView view(*snapshot_);
735   view.VisitConcurrently([clear_type](uintptr_t super_page) {
736     auto* bitmap = StateBitmapFromAddr(super_page);
737     auto* root = Root::FromFirstSuperPage(super_page);
738     bitmap->IterateQuarantined([root, clear_type](uintptr_t slot_start) {
739       auto* slot_span = SlotSpan::FromSlotStart(slot_start);
740       // Use zero as a zapping value to speed up the fast bailout check in
741       // ScanPartitions.
742       const size_t size = root->GetSlotUsableSize(slot_span);
743       if (clear_type == PCScan::ClearType::kLazy) {
744         void* object = root->SlotStartToObject(slot_start);
745         memset(object, 0, size);
746       }
747 #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
748       // Set card(s) for this quarantined slot.
749       QuarantineCardTable::GetFrom(slot_start).Quarantine(slot_start, size);
750 #endif
751     });
752   });
753 }
754 
UnprotectPartitions()755 void PCScanTask::UnprotectPartitions() {
756   auto& pcscan = PCScanInternal::Instance();
757   if (!pcscan.WriteProtectionEnabled()) {
758     return;
759   }
760 
761   StarScanSnapshot::UnprotectingView unprotect_view(*snapshot_);
762   unprotect_view.VisitConcurrently([&pcscan](uintptr_t super_page) {
763     SuperPageSnapshot super_page_snapshot(super_page);
764 
765     for (const auto& scan_area : super_page_snapshot.scan_areas()) {
766       const uintptr_t begin =
767           super_page |
768           (scan_area.offset_within_page_in_words * sizeof(uintptr_t));
769       const uintptr_t end =
770           begin + (scan_area.size_in_words * sizeof(uintptr_t));
771 
772       pcscan.UnprotectPages(begin, end - begin);
773     }
774   });
775 }
776 
777 class PCScanScanLoop final : public ScanLoop<PCScanScanLoop> {
778   friend class ScanLoop<PCScanScanLoop>;
779 
780  public:
PCScanScanLoop(const PCScanTask & task)781   explicit PCScanScanLoop(const PCScanTask& task)
782       : ScanLoop(PCScanInternal::Instance().simd_support()), task_(task) {}
783 
quarantine_size() const784   size_t quarantine_size() const { return quarantine_size_; }
785 
786  private:
787 #if BUILDFLAG(HAS_64_BIT_POINTERS)
RegularPoolBase()788   PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() {
789     return PartitionAddressSpace::RegularPoolBase();
790   }
RegularPoolMask()791   PA_ALWAYS_INLINE static uintptr_t RegularPoolMask() {
792     return PartitionAddressSpace::RegularPoolBaseMask();
793   }
794 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
795 
CheckPointer(uintptr_t maybe_ptr_maybe_tagged)796   PA_SCAN_INLINE void CheckPointer(uintptr_t maybe_ptr_maybe_tagged) {
797     // |maybe_ptr| may have an MTE tag, so remove it first.
798     quarantine_size_ +=
799         task_.TryMarkSlotInNormalBuckets(UntagAddr(maybe_ptr_maybe_tagged));
800   }
801 
802   const PCScanTask& task_;
803   DisableMTEScope disable_mte_;
804   size_t quarantine_size_ = 0;
805 };
806 
807 class PCScanTask::StackVisitor final : public internal::StackVisitor {
808  public:
StackVisitor(const PCScanTask & task)809   explicit StackVisitor(const PCScanTask& task) : task_(task) {}
810 
VisitStack(uintptr_t * stack_ptr,uintptr_t * stack_top)811   void VisitStack(uintptr_t* stack_ptr, uintptr_t* stack_top) override {
812     static constexpr size_t kMinimalAlignment = 32;
813     uintptr_t begin =
814         reinterpret_cast<uintptr_t>(stack_ptr) & ~(kMinimalAlignment - 1);
815     uintptr_t end =
816         (reinterpret_cast<uintptr_t>(stack_top) + kMinimalAlignment - 1) &
817         ~(kMinimalAlignment - 1);
818     PA_CHECK(begin < end);
819     PCScanScanLoop loop(task_);
820     loop.Run(begin, end);
821     quarantine_size_ += loop.quarantine_size();
822   }
823 
824   // Returns size of quarantined slots that are reachable from the current
825   // stack.
quarantine_size() const826   size_t quarantine_size() const { return quarantine_size_; }
827 
828  private:
829   const PCScanTask& task_;
830   size_t quarantine_size_ = 0;
831 };
832 
PCScanTask(PCScan & pcscan,size_t quarantine_last_size)833 PCScanTask::PCScanTask(PCScan& pcscan, size_t quarantine_last_size)
834     : pcscan_epoch_(pcscan.epoch() - 1),
835       snapshot_(StarScanSnapshot::Create(PCScanInternal::Instance())),
836       stats_(PCScanInternal::Instance().process_name(), quarantine_last_size),
837       immediatelly_free_slots_(
838           PCScanInternal::Instance().IsImmediateFreeingEnabled()),
839       pcscan_(pcscan) {}
840 
ScanStack()841 void PCScanTask::ScanStack() {
842   const auto& pcscan = PCScanInternal::Instance();
843   if (!pcscan.IsStackScanningEnabled()) {
844     return;
845   }
846   // Check if the stack top was registered. It may happen that it's not if the
847   // current allocation happens from pthread trampolines.
848   void* stack_top = pcscan.GetCurrentThreadStackTop();
849   if (PA_UNLIKELY(!stack_top)) {
850     return;
851   }
852 
853   Stack stack_scanner(stack_top);
854   StackVisitor visitor(*this);
855   stack_scanner.IteratePointers(&visitor);
856   stats_.IncreaseSurvivedQuarantineSize(visitor.quarantine_size());
857 }
858 
ScanNormalArea(PCScanInternal & pcscan,PCScanScanLoop & scan_loop,uintptr_t begin,uintptr_t end)859 void PCScanTask::ScanNormalArea(PCScanInternal& pcscan,
860                                 PCScanScanLoop& scan_loop,
861                                 uintptr_t begin,
862                                 uintptr_t end) {
863   // Protect slot span before scanning it.
864   pcscan.ProtectPages(begin, end - begin);
865   scan_loop.Run(begin, end);
866 }
867 
ScanLargeArea(PCScanInternal & pcscan,PCScanScanLoop & scan_loop,uintptr_t begin,uintptr_t end,size_t slot_size)868 void PCScanTask::ScanLargeArea(PCScanInternal& pcscan,
869                                PCScanScanLoop& scan_loop,
870                                uintptr_t begin,
871                                uintptr_t end,
872                                size_t slot_size) {
873   // For scanning large areas, it's worthwhile checking whether the range that
874   // is scanned contains allocated slots. It also helps to skip discarded
875   // freed slots.
876   // Protect slot span before scanning it.
877   pcscan.ProtectPages(begin, end - begin);
878 
879   auto* bitmap = StateBitmapFromAddr(begin);
880 
881   for (uintptr_t current_slot = begin; current_slot < end;
882        current_slot += slot_size) {
883     // It is okay to skip slots as the object they hold has been zapped at this
884     // point, which means that the pointers no longer retain other slots.
885     if (!bitmap->IsAllocated(current_slot)) {
886       continue;
887     }
888     uintptr_t current_slot_end = current_slot + slot_size;
889     // |slot_size| may be larger than |raw_size| for single-slot slot spans.
890     scan_loop.Run(current_slot, std::min(current_slot_end, end));
891   }
892 }
893 
ScanPartitions()894 void PCScanTask::ScanPartitions() {
895   // Threshold for which bucket size it is worthwhile in checking whether the
896   // slot is allocated and needs to be scanned. PartitionPurgeSlotSpan()
897   // purges only slots >= page-size, this helps us to avoid faulting in
898   // discarded pages. We actually lower it further to 1024, to take advantage of
899   // skipping unallocated slots, but don't want to go any lower, as this comes
900   // at a cost of expensive bitmap checking.
901   static constexpr size_t kLargeScanAreaThresholdInWords =
902       1024 / sizeof(uintptr_t);
903 
904   PCScanScanLoop scan_loop(*this);
905   auto& pcscan = PCScanInternal::Instance();
906 
907   StarScanSnapshot::ScanningView snapshot_view(*snapshot_);
908   snapshot_view.VisitConcurrently([this, &pcscan,
909                                    &scan_loop](uintptr_t super_page) {
910     SuperPageSnapshot super_page_snapshot(super_page);
911 
912     for (const auto& scan_area : super_page_snapshot.scan_areas()) {
913       const uintptr_t begin =
914           super_page |
915           (scan_area.offset_within_page_in_words * sizeof(uintptr_t));
916       PA_SCAN_DCHECK(begin ==
917                      super_page + (scan_area.offset_within_page_in_words *
918                                    sizeof(uintptr_t)));
919       const uintptr_t end = begin + scan_area.size_in_words * sizeof(uintptr_t);
920 
921       if (PA_UNLIKELY(scan_area.slot_size_in_words >=
922                       kLargeScanAreaThresholdInWords)) {
923         ScanLargeArea(pcscan, scan_loop, begin, end,
924                       scan_area.slot_size_in_words * sizeof(uintptr_t));
925       } else {
926         ScanNormalArea(pcscan, scan_loop, begin, end);
927       }
928     }
929   });
930 
931   stats_.IncreaseSurvivedQuarantineSize(scan_loop.quarantine_size());
932 }
933 
934 namespace {
935 
936 struct SweepStat {
937   // Bytes that were really swept (by calling free()).
938   size_t swept_bytes = 0;
939   // Bytes of marked quarantine memory that were discarded (by calling
940   // madvice(DONT_NEED)).
941   size_t discarded_bytes = 0;
942 };
943 
UnmarkInCardTable(uintptr_t slot_start,SlotSpanMetadata * slot_span)944 void UnmarkInCardTable(uintptr_t slot_start, SlotSpanMetadata* slot_span) {
945 #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
946   // Reset card(s) for this quarantined slot. Please note that the cards may
947   // still contain quarantined slots (which were promoted in this scan cycle),
948   // but ClearQuarantinedSlotsAndPrepareCardTable() will set them again in the
949   // next PCScan cycle.
950   QuarantineCardTable::GetFrom(slot_start)
951       .Unquarantine(slot_start, slot_span->GetUtilizedSlotSize());
952 #endif  // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
953 }
954 
FreeAndUnmarkInCardTable(PartitionRoot * root,SlotSpanMetadata * slot_span,uintptr_t slot_start)955 [[maybe_unused]] size_t FreeAndUnmarkInCardTable(PartitionRoot* root,
956                                                  SlotSpanMetadata* slot_span,
957                                                  uintptr_t slot_start) {
958   void* object = root->SlotStartToObject(slot_start);
959   root->FreeNoHooksImmediate(object, slot_span, slot_start);
960   UnmarkInCardTable(slot_start, slot_span);
961   return slot_span->bucket->slot_size;
962 }
963 
SweepSuperPage(PartitionRoot * root,uintptr_t super_page,size_t epoch,SweepStat & stat)964 [[maybe_unused]] void SweepSuperPage(PartitionRoot* root,
965                                      uintptr_t super_page,
966                                      size_t epoch,
967                                      SweepStat& stat) {
968   auto* bitmap = StateBitmapFromAddr(super_page);
969   PartitionRoot::FromFirstSuperPage(super_page);
970   bitmap->IterateUnmarkedQuarantined(epoch, [root,
971                                              &stat](uintptr_t slot_start) {
972     auto* slot_span = SlotSpanMetadata::FromSlotStart(slot_start);
973     stat.swept_bytes += FreeAndUnmarkInCardTable(root, slot_span, slot_start);
974   });
975 }
976 
SweepSuperPageAndDiscardMarkedQuarantine(PartitionRoot * root,uintptr_t super_page,size_t epoch,SweepStat & stat)977 [[maybe_unused]] void SweepSuperPageAndDiscardMarkedQuarantine(
978     PartitionRoot* root,
979     uintptr_t super_page,
980     size_t epoch,
981     SweepStat& stat) {
982   auto* bitmap = StateBitmapFromAddr(super_page);
983   bitmap->IterateQuarantined(epoch, [root, &stat](uintptr_t slot_start,
984                                                   bool is_marked) {
985     auto* slot_span = SlotSpanMetadata::FromSlotStart(slot_start);
986     if (PA_LIKELY(!is_marked)) {
987       stat.swept_bytes += FreeAndUnmarkInCardTable(root, slot_span, slot_start);
988       return;
989     }
990     // Otherwise, try to discard pages for marked quarantine. Since no data is
991     // stored in quarantined slots (e.g. the |next| pointer), this can be
992     // freely done.
993     const size_t slot_size = slot_span->bucket->slot_size;
994     if (slot_size >= SystemPageSize()) {
995       const uintptr_t discard_end =
996           base::bits::AlignDown(slot_start + slot_size, SystemPageSize());
997       const uintptr_t discard_begin =
998           base::bits::AlignUp(slot_start, SystemPageSize());
999       const intptr_t discard_size = discard_end - discard_begin;
1000       if (discard_size > 0) {
1001         DiscardSystemPages(discard_begin, discard_size);
1002         stat.discarded_bytes += discard_size;
1003       }
1004     }
1005   });
1006 }
1007 
SweepSuperPageWithBatchedFree(PartitionRoot * root,uintptr_t super_page,size_t epoch,SweepStat & stat)1008 [[maybe_unused]] void SweepSuperPageWithBatchedFree(PartitionRoot* root,
1009                                                     uintptr_t super_page,
1010                                                     size_t epoch,
1011                                                     SweepStat& stat) {
1012   using SlotSpan = SlotSpanMetadata;
1013 
1014   auto* bitmap = StateBitmapFromAddr(super_page);
1015   SlotSpan* previous_slot_span = nullptr;
1016   internal::EncodedNextFreelistEntry* freelist_tail = nullptr;
1017   internal::EncodedNextFreelistEntry* freelist_head = nullptr;
1018   size_t freelist_entries = 0;
1019 
1020   const auto bitmap_iterator = [&](uintptr_t slot_start) {
1021     SlotSpan* current_slot_span = SlotSpan::FromSlotStart(slot_start);
1022     auto* entry = EncodedNextFreelistEntry::EmplaceAndInitNull(slot_start);
1023 
1024     if (current_slot_span != previous_slot_span) {
1025       // We started scanning a new slot span. Flush the accumulated freelist to
1026       // the slot-span's freelist. This is a single lock acquired per slot span.
1027       if (previous_slot_span && freelist_entries) {
1028         root->RawFreeBatch(freelist_head, freelist_tail, freelist_entries,
1029                            previous_slot_span);
1030       }
1031       freelist_head = entry;
1032       freelist_tail = nullptr;
1033       freelist_entries = 0;
1034       previous_slot_span = current_slot_span;
1035     }
1036 
1037     if (freelist_tail) {
1038       freelist_tail->SetNext(entry);
1039     }
1040     freelist_tail = entry;
1041     ++freelist_entries;
1042 
1043     UnmarkInCardTable(slot_start, current_slot_span);
1044 
1045     stat.swept_bytes += current_slot_span->bucket->slot_size;
1046   };
1047 
1048   bitmap->IterateUnmarkedQuarantinedAndFree(epoch, bitmap_iterator);
1049 
1050   if (previous_slot_span && freelist_entries) {
1051     root->RawFreeBatch(freelist_head, freelist_tail, freelist_entries,
1052                        previous_slot_span);
1053   }
1054 }
1055 
1056 }  // namespace
1057 
SweepQuarantine()1058 void PCScanTask::SweepQuarantine() {
1059   // Check that scan is unjoinable by this time.
1060   PA_DCHECK(!pcscan_.IsJoinable());
1061   // Discard marked quarantine memory on every Nth scan.
1062   // TODO(bikineev): Find a better signal (e.g. memory pressure, high
1063   // survival rate, etc).
1064   static constexpr size_t kDiscardMarkedQuarantineFrequency = 16;
1065   const bool should_discard =
1066       (pcscan_epoch_ % kDiscardMarkedQuarantineFrequency == 0) &&
1067       (pcscan_.clear_type_ == PCScan::ClearType::kEager);
1068 
1069   SweepStat stat;
1070   StarScanSnapshot::SweepingView sweeping_view(*snapshot_);
1071   sweeping_view.VisitNonConcurrently(
1072       [this, &stat, should_discard](uintptr_t super_page) {
1073         auto* root = PartitionRoot::FromFirstSuperPage(super_page);
1074 
1075 #if PA_CONFIG(STARSCAN_BATCHED_FREE)
1076         SweepSuperPageWithBatchedFree(root, super_page, pcscan_epoch_, stat);
1077         (void)should_discard;
1078 #else
1079         if (PA_UNLIKELY(should_discard && !root->settings.use_cookie)) {
1080           SweepSuperPageAndDiscardMarkedQuarantine(root, super_page,
1081                                                    pcscan_epoch_, stat);
1082         } else {
1083           SweepSuperPage(root, super_page, pcscan_epoch_, stat);
1084         }
1085 #endif  // PA_CONFIG(STARSCAN_BATCHED_FREE)
1086       });
1087 
1088   stats_.IncreaseSweptSize(stat.swept_bytes);
1089   stats_.IncreaseDiscardedQuarantineSize(stat.discarded_bytes);
1090 
1091 #if PA_CONFIG(THREAD_CACHE_SUPPORTED)
1092   // Sweeping potentially frees into the current thread's thread cache. Purge
1093   // releases the cache back to the global allocator.
1094   auto* current_thread_tcache = ThreadCache::Get();
1095   if (ThreadCache::IsValid(current_thread_tcache)) {
1096     current_thread_tcache->Purge();
1097   }
1098 #endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED)
1099 }
1100 
FinishScanner()1101 void PCScanTask::FinishScanner() {
1102   stats_.ReportTracesAndHists(PCScanInternal::Instance().GetReporter());
1103 
1104   pcscan_.scheduler_.scheduling_backend().UpdateScheduleAfterScan(
1105       stats_.survived_quarantine_size(), stats_.GetOverallTime(),
1106       PCScanInternal::Instance().CalculateTotalHeapSize());
1107 
1108   PCScanInternal::Instance().ResetCurrentPCScanTask();
1109   // Change the state and check that concurrent task can't be scheduled twice.
1110   PA_CHECK(pcscan_.state_.exchange(PCScan::State::kNotRunning,
1111                                    std::memory_order_acq_rel) ==
1112            PCScan::State::kSweepingAndFinishing);
1113 }
1114 
RunFromMutator()1115 void PCScanTask::RunFromMutator() {
1116   ReentrantScannerGuard reentrancy_guard;
1117   StatsCollector::MutatorScope overall_scope(
1118       stats_, StatsCollector::MutatorId::kOverall);
1119   {
1120     SyncScope<Context::kMutator> sync_scope(*this);
1121     // Mutator might start entering the safepoint while scanning was already
1122     // finished.
1123     if (!pcscan_.IsJoinable()) {
1124       return;
1125     }
1126     {
1127       // Clear all quarantined slots and prepare card table.
1128       StatsCollector::MutatorScope clear_scope(
1129           stats_, StatsCollector::MutatorId::kClear);
1130       ClearQuarantinedSlotsAndPrepareCardTable();
1131     }
1132     {
1133       // Scan the thread's stack to find dangling references.
1134       StatsCollector::MutatorScope scan_scope(
1135           stats_, StatsCollector::MutatorId::kScanStack);
1136       ScanStack();
1137     }
1138     {
1139       // Unprotect all scanned pages, if needed.
1140       UnprotectPartitions();
1141     }
1142     {
1143       // Scan heap for dangling references.
1144       StatsCollector::MutatorScope scan_scope(stats_,
1145                                               StatsCollector::MutatorId::kScan);
1146       ScanPartitions();
1147     }
1148   }
1149 }
1150 
RunFromScanner()1151 void PCScanTask::RunFromScanner() {
1152   ReentrantScannerGuard reentrancy_guard;
1153   {
1154     StatsCollector::ScannerScope overall_scope(
1155         stats_, StatsCollector::ScannerId::kOverall);
1156     {
1157       SyncScope<Context::kScanner> sync_scope(*this);
1158       {
1159         // Clear all quarantined slots and prepare the card table.
1160         StatsCollector::ScannerScope clear_scope(
1161             stats_, StatsCollector::ScannerId::kClear);
1162         ClearQuarantinedSlotsAndPrepareCardTable();
1163       }
1164       {
1165         // Scan heap for dangling references.
1166         StatsCollector::ScannerScope scan_scope(
1167             stats_, StatsCollector::ScannerId::kScan);
1168         ScanPartitions();
1169       }
1170       {
1171         // Unprotect all scanned pages, if needed.
1172         UnprotectPartitions();
1173       }
1174     }
1175     {
1176       // Sweep unreachable quarantined slots.
1177       StatsCollector::ScannerScope sweep_scope(
1178           stats_, StatsCollector::ScannerId::kSweep);
1179       SweepQuarantine();
1180     }
1181   }
1182   FinishScanner();
1183 }
1184 
1185 class PCScan::PCScanThread final {
1186  public:
1187   using TaskHandle = PCScanInternal::TaskHandle;
1188 
Instance()1189   static PCScanThread& Instance() {
1190     // Lazily instantiate the scanning thread.
1191     static internal::base::NoDestructor<PCScanThread> instance;
1192     return *instance;
1193   }
1194 
PostTask(TaskHandle task)1195   void PostTask(TaskHandle task) {
1196     {
1197       std::lock_guard<std::mutex> lock(mutex_);
1198       PA_DCHECK(!posted_task_.get());
1199       posted_task_ = std::move(task);
1200       wanted_delay_ = base::TimeDelta();
1201     }
1202     condvar_.notify_one();
1203   }
1204 
PostDelayedTask(base::TimeDelta delay)1205   void PostDelayedTask(base::TimeDelta delay) {
1206     {
1207       std::lock_guard<std::mutex> lock(mutex_);
1208       if (posted_task_.get()) {
1209         return;
1210       }
1211       wanted_delay_ = delay;
1212     }
1213     condvar_.notify_one();
1214   }
1215 
1216  private:
1217   friend class internal::base::NoDestructor<PCScanThread>;
1218 
PCScanThread()1219   PCScanThread() {
1220     ScopedAllowAllocations allow_allocations_within_std_thread;
1221     std::thread{[](PCScanThread* instance) {
1222                   static constexpr const char* kThreadName = "PCScan";
1223                   // Ideally we should avoid mixing base:: and std:: API for
1224                   // threading, but this is useful for visualizing the pcscan
1225                   // thread in chrome://tracing.
1226                   internal::base::PlatformThread::SetName(kThreadName);
1227                   instance->TaskLoop();
1228                 },
1229                 this}
1230         .detach();
1231   }
1232 
1233   // Waits and returns whether the delay should be recomputed.
Wait(std::unique_lock<std::mutex> & lock)1234   bool Wait(std::unique_lock<std::mutex>& lock) {
1235     PA_DCHECK(lock.owns_lock());
1236     if (wanted_delay_.is_zero()) {
1237       condvar_.wait(lock, [this] {
1238         // Re-evaluate if either delay changed, or a task was
1239         // enqueued.
1240         return !wanted_delay_.is_zero() || posted_task_.get();
1241       });
1242       // The delay has already been set up and should not be queried again.
1243       return false;
1244     }
1245     condvar_.wait_for(
1246         lock, std::chrono::microseconds(wanted_delay_.InMicroseconds()));
1247     // If no task has been posted, the delay should be recomputed at this point.
1248     return !posted_task_.get();
1249   }
1250 
TaskLoop()1251   void TaskLoop() {
1252     while (true) {
1253       TaskHandle current_task;
1254       {
1255         std::unique_lock<std::mutex> lock(mutex_);
1256         // Scheduling.
1257         while (!posted_task_.get()) {
1258           if (Wait(lock)) {
1259             wanted_delay_ =
1260                 scheduler().scheduling_backend().UpdateDelayedSchedule();
1261             if (wanted_delay_.is_zero()) {
1262               break;
1263             }
1264           }
1265         }
1266         // Differentiate between a posted task and a delayed task schedule.
1267         if (posted_task_.get()) {
1268           std::swap(current_task, posted_task_);
1269           wanted_delay_ = base::TimeDelta();
1270         } else {
1271           PA_DCHECK(wanted_delay_.is_zero());
1272         }
1273       }
1274       // Differentiate between a posted task and a delayed task schedule.
1275       if (current_task.get()) {
1276         current_task->RunFromScanner();
1277       } else {
1278         PCScan::Instance().PerformScan(PCScan::InvocationMode::kNonBlocking);
1279       }
1280     }
1281   }
1282 
scheduler() const1283   PCScanScheduler& scheduler() const { return PCScan::Instance().scheduler(); }
1284 
1285   std::mutex mutex_;
1286   std::condition_variable condvar_;
1287   TaskHandle posted_task_;
1288   base::TimeDelta wanted_delay_;
1289 };
1290 
PCScanInternal()1291 PCScanInternal::PCScanInternal() : simd_support_(DetectSimdSupport()) {}
1292 
1293 PCScanInternal::~PCScanInternal() = default;
1294 
Initialize(PCScan::InitConfig config)1295 void PCScanInternal::Initialize(PCScan::InitConfig config) {
1296   PA_DCHECK(!is_initialized_);
1297 #if BUILDFLAG(HAS_64_BIT_POINTERS)
1298   // Make sure that pools are initialized.
1299   PartitionAddressSpace::Init();
1300 #endif
1301   CommitCardTable();
1302 #if PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
1303   if (config.write_protection ==
1304       PCScan::InitConfig::WantedWriteProtectionMode::kEnabled) {
1305     write_protector_ = std::make_unique<UserFaultFDWriteProtector>();
1306   } else {
1307     write_protector_ = std::make_unique<NoWriteProtector>();
1308   }
1309 #else
1310   write_protector_ = std::make_unique<NoWriteProtector>();
1311 #endif  // PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
1312   PCScan::SetClearType(write_protector_->SupportedClearType());
1313 
1314   if (config.safepoint == PCScan::InitConfig::SafepointMode::kEnabled) {
1315     PCScan::Instance().EnableSafepoints();
1316   }
1317   scannable_roots_ = RootsMap();
1318   nonscannable_roots_ = RootsMap();
1319 
1320   static partition_alloc::StatsReporter s_no_op_reporter;
1321   PCScan::Instance().RegisterStatsReporter(&s_no_op_reporter);
1322 
1323   // Don't initialize PCScanThread::Instance() as otherwise sandbox complains
1324   // about multiple threads running on sandbox initialization.
1325   is_initialized_ = true;
1326 }
1327 
PerformScan(PCScan::InvocationMode invocation_mode)1328 void PCScanInternal::PerformScan(PCScan::InvocationMode invocation_mode) {
1329 #if PA_SCAN_DCHECK_IS_ON()
1330   PA_DCHECK(is_initialized());
1331   PA_DCHECK(scannable_roots().size() > 0);
1332   PA_DCHECK(std::all_of(
1333       scannable_roots().begin(), scannable_roots().end(),
1334       [](const auto& pair) { return pair.first->IsScanEnabled(); }));
1335   PA_DCHECK(std::all_of(
1336       nonscannable_roots().begin(), nonscannable_roots().end(),
1337       [](const auto& pair) { return pair.first->IsQuarantineEnabled(); }));
1338 #endif
1339 
1340   PCScan& frontend = PCScan::Instance();
1341   {
1342     // If scanning is already in progress, bail out.
1343     PCScan::State expected = PCScan::State::kNotRunning;
1344     if (!frontend.state_.compare_exchange_strong(
1345             expected, PCScan::State::kScheduled, std::memory_order_acq_rel,
1346             std::memory_order_relaxed)) {
1347       return;
1348     }
1349   }
1350 
1351   const size_t last_quarantine_size =
1352       frontend.scheduler_.scheduling_backend().ScanStarted();
1353 
1354   // Create PCScan task and set it as current.
1355   auto task = base::MakeRefCounted<PCScanTask>(frontend, last_quarantine_size);
1356   PCScanInternal::Instance().SetCurrentPCScanTask(task);
1357 
1358   if (PA_UNLIKELY(invocation_mode ==
1359                   PCScan::InvocationMode::kScheduleOnlyForTesting)) {
1360     // Immediately change the state to enable safepoint testing.
1361     frontend.state_.store(PCScan::State::kScanning, std::memory_order_release);
1362     frontend.SetJoinableIfSafepointEnabled(true);
1363     return;
1364   }
1365 
1366   // Post PCScan task.
1367   if (PA_LIKELY(invocation_mode == PCScan::InvocationMode::kNonBlocking)) {
1368     PCScan::PCScanThread::Instance().PostTask(std::move(task));
1369   } else {
1370     PA_SCAN_DCHECK(PCScan::InvocationMode::kBlocking == invocation_mode ||
1371                    PCScan::InvocationMode::kForcedBlocking == invocation_mode);
1372     std::move(*task).RunFromScanner();
1373   }
1374 }
1375 
PerformScanIfNeeded(PCScan::InvocationMode invocation_mode)1376 void PCScanInternal::PerformScanIfNeeded(
1377     PCScan::InvocationMode invocation_mode) {
1378   if (!scannable_roots().size()) {
1379     return;
1380   }
1381   PCScan& frontend = PCScan::Instance();
1382   if (invocation_mode == PCScan::InvocationMode::kForcedBlocking ||
1383       frontend.scheduler_.scheduling_backend()
1384           .GetQuarantineData()
1385           .MinimumScanningThresholdReached()) {
1386     PerformScan(invocation_mode);
1387   }
1388 }
1389 
PerformDelayedScan(base::TimeDelta delay)1390 void PCScanInternal::PerformDelayedScan(base::TimeDelta delay) {
1391   PCScan::PCScanThread::Instance().PostDelayedTask(delay);
1392 }
1393 
JoinScan()1394 void PCScanInternal::JoinScan() {
1395   // Current task can be destroyed by the scanner. Check that it's valid.
1396   if (auto current_task = CurrentPCScanTask()) {
1397     current_task->RunFromMutator();
1398   }
1399 }
1400 
CurrentPCScanTask() const1401 PCScanInternal::TaskHandle PCScanInternal::CurrentPCScanTask() const {
1402   std::lock_guard<std::mutex> lock(current_task_mutex_);
1403   return current_task_;
1404 }
1405 
SetCurrentPCScanTask(TaskHandle task)1406 void PCScanInternal::SetCurrentPCScanTask(TaskHandle task) {
1407   std::lock_guard<std::mutex> lock(current_task_mutex_);
1408   current_task_ = std::move(task);
1409 }
1410 
ResetCurrentPCScanTask()1411 void PCScanInternal::ResetCurrentPCScanTask() {
1412   std::lock_guard<std::mutex> lock(current_task_mutex_);
1413   current_task_.reset();
1414 }
1415 
1416 namespace {
GetSuperPagesAndCommitStateBitmaps(PCScan::Root & root)1417 PCScanInternal::SuperPages GetSuperPagesAndCommitStateBitmaps(
1418     PCScan::Root& root) {
1419   const size_t state_bitmap_size_to_commit = CommittedStateBitmapSize();
1420   PCScanInternal::SuperPages super_pages;
1421   for (auto* super_page_extent = root.first_extent; super_page_extent;
1422        super_page_extent = super_page_extent->next) {
1423     for (uintptr_t super_page = SuperPagesBeginFromExtent(super_page_extent),
1424                    super_page_end = SuperPagesEndFromExtent(super_page_extent);
1425          super_page != super_page_end; super_page += kSuperPageSize) {
1426       // Make sure the metadata is committed.
1427       // TODO(bikineev): Remove once this is known to work.
1428       const volatile char* metadata =
1429           reinterpret_cast<char*>(PartitionSuperPageToMetadataArea(super_page));
1430       *metadata;
1431       RecommitSystemPages(SuperPageStateBitmapAddr(super_page),
1432                           state_bitmap_size_to_commit,
1433                           PageAccessibilityConfiguration(
1434                               PageAccessibilityConfiguration::kReadWrite),
1435                           PageAccessibilityDisposition::kRequireUpdate);
1436       super_pages.push_back(super_page);
1437     }
1438   }
1439   return super_pages;
1440 }
1441 }  // namespace
1442 
RegisterScannableRoot(Root * root)1443 void PCScanInternal::RegisterScannableRoot(Root* root) {
1444   PA_DCHECK(is_initialized());
1445   PA_DCHECK(root);
1446   // Avoid nesting locks and store super_pages in a temporary vector.
1447   SuperPages super_pages;
1448   {
1449     ::partition_alloc::internal::ScopedGuard guard(
1450         ::partition_alloc::internal::PartitionRootLock(root));
1451     PA_CHECK(root->IsQuarantineAllowed());
1452     if (root->IsScanEnabled()) {
1453       return;
1454     }
1455     PA_CHECK(!root->IsQuarantineEnabled());
1456     super_pages = GetSuperPagesAndCommitStateBitmaps(*root);
1457     root->settings.scan_mode = Root::ScanMode::kEnabled;
1458     root->settings.quarantine_mode = Root::QuarantineMode::kEnabled;
1459   }
1460   std::lock_guard<std::mutex> lock(roots_mutex_);
1461   PA_DCHECK(!scannable_roots_.count(root));
1462   auto& root_super_pages = scannable_roots_[root];
1463   root_super_pages.insert(root_super_pages.end(), super_pages.begin(),
1464                           super_pages.end());
1465 }
1466 
RegisterNonScannableRoot(Root * root)1467 void PCScanInternal::RegisterNonScannableRoot(Root* root) {
1468   PA_DCHECK(is_initialized());
1469   PA_DCHECK(root);
1470   // Avoid nesting locks and store super_pages in a temporary vector.
1471   SuperPages super_pages;
1472   {
1473     ::partition_alloc::internal::ScopedGuard guard(
1474         ::partition_alloc::internal::PartitionRootLock(root));
1475     PA_CHECK(root->IsQuarantineAllowed());
1476     PA_CHECK(!root->IsScanEnabled());
1477     if (root->IsQuarantineEnabled()) {
1478       return;
1479     }
1480     super_pages = GetSuperPagesAndCommitStateBitmaps(*root);
1481     root->settings.quarantine_mode = Root::QuarantineMode::kEnabled;
1482   }
1483   std::lock_guard<std::mutex> lock(roots_mutex_);
1484   PA_DCHECK(!nonscannable_roots_.count(root));
1485   auto& root_super_pages = nonscannable_roots_[root];
1486   root_super_pages.insert(root_super_pages.end(), super_pages.begin(),
1487                           super_pages.end());
1488 }
1489 
RegisterNewSuperPage(Root * root,uintptr_t super_page_base)1490 void PCScanInternal::RegisterNewSuperPage(Root* root,
1491                                           uintptr_t super_page_base) {
1492   PA_DCHECK(is_initialized());
1493   PA_DCHECK(root);
1494   PA_CHECK(root->IsQuarantineAllowed());
1495   PA_DCHECK(!(super_page_base % kSuperPageAlignment));
1496   // Make sure the metadata is committed.
1497   // TODO(bikineev): Remove once this is known to work.
1498   const volatile char* metadata = reinterpret_cast<char*>(
1499       PartitionSuperPageToMetadataArea(super_page_base));
1500   *metadata;
1501 
1502   std::lock_guard<std::mutex> lock(roots_mutex_);
1503 
1504   // Dispatch based on whether root is scannable or not.
1505   if (root->IsScanEnabled()) {
1506     PA_DCHECK(scannable_roots_.count(root));
1507     auto& super_pages = scannable_roots_[root];
1508     PA_DCHECK(std::find(super_pages.begin(), super_pages.end(),
1509                         super_page_base) == super_pages.end());
1510     super_pages.push_back(super_page_base);
1511   } else {
1512     PA_DCHECK(root->IsQuarantineEnabled());
1513     PA_DCHECK(nonscannable_roots_.count(root));
1514     auto& super_pages = nonscannable_roots_[root];
1515     PA_DCHECK(std::find(super_pages.begin(), super_pages.end(),
1516                         super_page_base) == super_pages.end());
1517     super_pages.push_back(super_page_base);
1518   }
1519 }
1520 
SetProcessName(const char * process_name)1521 void PCScanInternal::SetProcessName(const char* process_name) {
1522   PA_DCHECK(is_initialized());
1523   PA_DCHECK(process_name);
1524   PA_DCHECK(!process_name_);
1525   process_name_ = process_name;
1526 }
1527 
CalculateTotalHeapSize() const1528 size_t PCScanInternal::CalculateTotalHeapSize() const {
1529   PA_DCHECK(is_initialized());
1530   std::lock_guard<std::mutex> lock(roots_mutex_);
1531   const auto acc = [](size_t size, const auto& pair) {
1532     return size + pair.first->get_total_size_of_committed_pages();
1533   };
1534   return std::accumulate(scannable_roots_.begin(), scannable_roots_.end(), 0u,
1535                          acc) +
1536          std::accumulate(nonscannable_roots_.begin(), nonscannable_roots_.end(),
1537                          0u, acc);
1538 }
1539 
EnableStackScanning()1540 void PCScanInternal::EnableStackScanning() {
1541   PA_DCHECK(!stack_scanning_enabled_);
1542   stack_scanning_enabled_ = true;
1543 }
DisableStackScanning()1544 void PCScanInternal::DisableStackScanning() {
1545   PA_DCHECK(stack_scanning_enabled_);
1546   stack_scanning_enabled_ = false;
1547 }
IsStackScanningEnabled() const1548 bool PCScanInternal::IsStackScanningEnabled() const {
1549   return stack_scanning_enabled_;
1550 }
1551 
NotifyThreadCreated(void * stack_top)1552 void PCScanInternal::NotifyThreadCreated(void* stack_top) {
1553   const auto tid = base::PlatformThread::CurrentId();
1554   std::lock_guard<std::mutex> lock(stack_tops_mutex_);
1555   const auto res = stack_tops_.insert({tid, stack_top});
1556   PA_DCHECK(res.second);
1557 }
1558 
NotifyThreadDestroyed()1559 void PCScanInternal::NotifyThreadDestroyed() {
1560   const auto tid = base::PlatformThread::CurrentId();
1561   std::lock_guard<std::mutex> lock(stack_tops_mutex_);
1562   PA_DCHECK(1 == stack_tops_.count(tid));
1563   stack_tops_.erase(tid);
1564 }
1565 
GetCurrentThreadStackTop() const1566 void* PCScanInternal::GetCurrentThreadStackTop() const {
1567   const auto tid = base::PlatformThread::CurrentId();
1568   std::lock_guard<std::mutex> lock(stack_tops_mutex_);
1569   auto it = stack_tops_.find(tid);
1570   return it != stack_tops_.end() ? it->second : nullptr;
1571 }
1572 
WriteProtectionEnabled() const1573 bool PCScanInternal::WriteProtectionEnabled() const {
1574   return write_protector_->IsEnabled();
1575 }
1576 
ProtectPages(uintptr_t begin,size_t size)1577 void PCScanInternal::ProtectPages(uintptr_t begin, size_t size) {
1578   // Slot-span sizes are multiple of system page size. However, the ranges that
1579   // are recorded are not, since in the snapshot we only record the used
1580   // payload. Therefore we align up the incoming range by 4k. The unused part of
1581   // slot-spans doesn't need to be protected (the allocator will enter the
1582   // safepoint before trying to allocate from it).
1583   PA_SCAN_DCHECK(write_protector_.get());
1584   write_protector_->ProtectPages(
1585       begin,
1586       partition_alloc::internal::base::bits::AlignUp(size, SystemPageSize()));
1587 }
1588 
UnprotectPages(uintptr_t begin,size_t size)1589 void PCScanInternal::UnprotectPages(uintptr_t begin, size_t size) {
1590   PA_SCAN_DCHECK(write_protector_.get());
1591   write_protector_->UnprotectPages(
1592       begin,
1593       partition_alloc::internal::base::bits::AlignUp(size, SystemPageSize()));
1594 }
1595 
ClearRootsForTesting()1596 void PCScanInternal::ClearRootsForTesting() {
1597   std::lock_guard<std::mutex> lock(roots_mutex_);
1598   // Set all roots as non-scannable and non-quarantinable.
1599   for (auto& pair : scannable_roots_) {
1600     Root* root = pair.first;
1601     root->settings.scan_mode = Root::ScanMode::kDisabled;
1602     root->settings.quarantine_mode = Root::QuarantineMode::kDisabledByDefault;
1603   }
1604   for (auto& pair : nonscannable_roots_) {
1605     Root* root = pair.first;
1606     root->settings.quarantine_mode = Root::QuarantineMode::kDisabledByDefault;
1607   }
1608   // Make sure to destroy maps so that on the following ReinitForTesting() call
1609   // the maps don't attempt to destroy the backing.
1610   scannable_roots_.clear();
1611   scannable_roots_.~RootsMap();
1612   nonscannable_roots_.clear();
1613   nonscannable_roots_.~RootsMap();
1614   // Destroy write protector object, so that there is no double free on the next
1615   // call to ReinitForTesting();
1616   write_protector_.reset();
1617 }
1618 
ReinitForTesting(PCScan::InitConfig config)1619 void PCScanInternal::ReinitForTesting(PCScan::InitConfig config) {
1620   is_initialized_ = false;
1621   auto* new_this = new (this) PCScanInternal;
1622   new_this->Initialize(config);
1623 }
1624 
FinishScanForTesting()1625 void PCScanInternal::FinishScanForTesting() {
1626   auto current_task = CurrentPCScanTask();
1627   PA_CHECK(current_task.get());
1628   current_task->RunFromScanner();
1629 }
1630 
RegisterStatsReporter(partition_alloc::StatsReporter * reporter)1631 void PCScanInternal::RegisterStatsReporter(
1632     partition_alloc::StatsReporter* reporter) {
1633   PA_DCHECK(reporter);
1634   stats_reporter_ = reporter;
1635 }
1636 
GetReporter()1637 partition_alloc::StatsReporter& PCScanInternal::GetReporter() {
1638   PA_DCHECK(stats_reporter_);
1639   return *stats_reporter_;
1640 }
1641 
1642 }  // namespace partition_alloc::internal
1643