• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/allocator/partition_allocator/starscan/pcscan_internal.h"
6 
7 #include <algorithm>
8 #include <array>
9 #include <chrono>
10 #include <condition_variable>
11 #include <cstdint>
12 #include <mutex>
13 #include <numeric>
14 #include <set>
15 #include <thread>
16 #include <type_traits>
17 #include <unordered_map>
18 #include <vector>
19 
20 #include "base/allocator/partition_allocator/address_pool_manager.h"
21 #include "base/allocator/partition_allocator/allocation_guard.h"
22 #include "base/allocator/partition_allocator/page_allocator.h"
23 #include "base/allocator/partition_allocator/page_allocator_constants.h"
24 #include "base/allocator/partition_allocator/partition_address_space.h"
25 #include "base/allocator/partition_allocator/partition_alloc.h"
26 #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
27 #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
28 #include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
29 #include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
30 #include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
31 #include "base/allocator/partition_allocator/partition_alloc_base/memory/ref_counted.h"
32 #include "base/allocator/partition_allocator/partition_alloc_base/memory/scoped_refptr.h"
33 #include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
34 #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
35 #include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
36 #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
37 #include "base/allocator/partition_allocator/partition_alloc_check.h"
38 #include "base/allocator/partition_allocator/partition_alloc_config.h"
39 #include "base/allocator/partition_allocator/partition_alloc_constants.h"
40 #include "base/allocator/partition_allocator/partition_page.h"
41 #include "base/allocator/partition_allocator/reservation_offset_table.h"
42 #include "base/allocator/partition_allocator/starscan/metadata_allocator.h"
43 #include "base/allocator/partition_allocator/starscan/pcscan_scheduling.h"
44 #include "base/allocator/partition_allocator/starscan/raceful_worklist.h"
45 #include "base/allocator/partition_allocator/starscan/scan_loop.h"
46 #include "base/allocator/partition_allocator/starscan/snapshot.h"
47 #include "base/allocator/partition_allocator/starscan/stack/stack.h"
48 #include "base/allocator/partition_allocator/starscan/stats_collector.h"
49 #include "base/allocator/partition_allocator/starscan/stats_reporter.h"
50 #include "base/allocator/partition_allocator/tagging.h"
51 #include "base/allocator/partition_allocator/thread_cache.h"
52 #include "build/build_config.h"
53 
54 #if !BUILDFLAG(HAS_64_BIT_POINTERS)
55 #include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
56 #endif
57 
58 #if PA_CONFIG(STARSCAN_NOINLINE_SCAN_FUNCTIONS)
59 #define PA_SCAN_INLINE PA_NOINLINE
60 #else
61 #define PA_SCAN_INLINE PA_ALWAYS_INLINE
62 #endif
63 
64 namespace partition_alloc::internal {
65 
DoubleFreeAttempt()66 [[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void DoubleFreeAttempt() {
67   PA_NO_CODE_FOLDING();
68   PA_IMMEDIATE_CRASH();
69 }
70 
71 namespace {
72 
73 #if PA_CONFIG(HAS_ALLOCATION_GUARD)
74 // Currently, check reentracy only on Linux. On Android TLS is emulated by the
75 // runtime lib, which can allocate and therefore cause reentrancy.
76 struct ReentrantScannerGuard final {
77  public:
ReentrantScannerGuardpartition_alloc::internal::__anon9e90b8290111::ReentrantScannerGuard78   ReentrantScannerGuard() {
79     PA_CHECK(!guard_);
80     guard_ = true;
81   }
~ReentrantScannerGuardpartition_alloc::internal::__anon9e90b8290111::ReentrantScannerGuard82   ~ReentrantScannerGuard() { guard_ = false; }
83 
84  private:
85   // Since this variable has hidden visibility (not referenced by other DSOs),
86   // assume that thread_local works on all supported architectures.
87   static thread_local size_t guard_;
88 };
89 thread_local size_t ReentrantScannerGuard::guard_ = 0;
90 #else
91 struct [[maybe_unused]] ReentrantScannerGuard final {};
92 #endif  // PA_CONFIG(HAS_ALLOCATION_GUARD)
93 
94 // Scope that disables MTE checks. Only used inside scanning to avoid the race:
95 // a slot tag is changed by the mutator, while the scanner sees an old value.
96 struct DisableMTEScope final {
DisableMTEScopepartition_alloc::internal::__anon9e90b8290111::DisableMTEScope97   DisableMTEScope() {
98     ::partition_alloc::ChangeMemoryTaggingModeForCurrentThread(
99         ::partition_alloc::TagViolationReportingMode::kDisabled);
100   }
~DisableMTEScopepartition_alloc::internal::__anon9e90b8290111::DisableMTEScope101   ~DisableMTEScope() {
102     ::partition_alloc::ChangeMemoryTaggingModeForCurrentThread(
103         parent_tagging_mode);
104   }
105 
106  private:
107   ::partition_alloc::TagViolationReportingMode parent_tagging_mode =
108       ::partition_alloc::internal::GetMemoryTaggingModeForCurrentThread();
109 };
110 
111 #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
112 // Bytemap that represent regions (cards) that contain quarantined slots.
113 // A single PCScan cycle consists of the following steps:
114 // 1) clearing (memset quarantine + marking cards that contain quarantine);
115 // 2) scanning;
116 // 3) sweeping (freeing + unmarking cards that contain freed slots).
117 // Marking cards on step 1) ensures that the card table stays in the consistent
118 // state while scanning. Unmarking on the step 3) ensures that unmarking
119 // actually happens (and we don't hit too many false positives).
120 //
121 // The code here relies on the fact that |address| is in the regular pool and
122 // that the card table (this object) is allocated at the very beginning of that
123 // pool.
124 class QuarantineCardTable final {
125  public:
126   // Avoid the load of the base of the regular pool.
GetFrom(uintptr_t address)127   PA_ALWAYS_INLINE static QuarantineCardTable& GetFrom(uintptr_t address) {
128     PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(address));
129     return *reinterpret_cast<QuarantineCardTable*>(
130         address & PartitionAddressSpace::RegularPoolBaseMask());
131   }
132 
Quarantine(uintptr_t begin,size_t size)133   PA_ALWAYS_INLINE void Quarantine(uintptr_t begin, size_t size) {
134     return SetImpl(begin, size, true);
135   }
136 
Unquarantine(uintptr_t begin,size_t size)137   PA_ALWAYS_INLINE void Unquarantine(uintptr_t begin, size_t size) {
138     return SetImpl(begin, size, false);
139   }
140 
141   // Returns whether the card to which |address| points to contains quarantined
142   // slots. May return false positives for but should never return false
143   // negatives, as otherwise this breaks security.
IsQuarantined(uintptr_t address) const144   PA_ALWAYS_INLINE bool IsQuarantined(uintptr_t address) const {
145     const size_t byte = Byte(address);
146     PA_SCAN_DCHECK(byte < bytes_.size());
147     return bytes_[byte];
148   }
149 
150  private:
151   static constexpr size_t kCardSize = kPoolMaxSize / kSuperPageSize;
152   static constexpr size_t kBytes = kPoolMaxSize / kCardSize;
153 
154   QuarantineCardTable() = default;
155 
Byte(uintptr_t address)156   PA_ALWAYS_INLINE static size_t Byte(uintptr_t address) {
157     return (address & ~PartitionAddressSpace::RegularPoolBaseMask()) /
158            kCardSize;
159   }
160 
SetImpl(uintptr_t begin,size_t size,bool value)161   PA_ALWAYS_INLINE void SetImpl(uintptr_t begin, size_t size, bool value) {
162     const size_t byte = Byte(begin);
163     const size_t need_bytes = (size + (kCardSize - 1)) / kCardSize;
164     PA_SCAN_DCHECK(bytes_.size() >= byte + need_bytes);
165     PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(begin));
166     for (size_t i = byte; i < byte + need_bytes; ++i) {
167       bytes_[i] = value;
168     }
169   }
170 
171   std::array<bool, kBytes> bytes_;
172 };
173 static_assert(kSuperPageSize >= sizeof(QuarantineCardTable),
174               "Card table size must be less than kSuperPageSize, since this is "
175               "what is committed");
176 #endif  // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
177 
178 template <typename T>
179 using MetadataVector = std::vector<T, MetadataAllocator<T>>;
180 template <typename T>
181 using MetadataSet = std::set<T, std::less<>, MetadataAllocator<T>>;
182 template <typename K, typename V>
183 using MetadataHashMap =
184     std::unordered_map<K,
185                        V,
186                        std::hash<K>,
187                        std::equal_to<>,
188                        MetadataAllocator<std::pair<const K, V>>>;
189 
190 struct GetSlotStartResult final {
is_foundpartition_alloc::internal::__anon9e90b8290111::GetSlotStartResult191   PA_ALWAYS_INLINE bool is_found() const {
192     PA_SCAN_DCHECK(!slot_start || slot_size);
193     return slot_start;
194   }
195 
196   uintptr_t slot_start = 0;
197   size_t slot_size = 0;
198 };
199 
200 // Returns the start of a slot, or 0 if |maybe_inner_address| is not inside of
201 // an existing slot span. The function may return a non-0 address even inside a
202 // decommitted or free slot span, it's the caller responsibility to check if
203 // memory is actually allocated.
204 //
205 // |maybe_inner_address| must be within a normal-bucket super page and can also
206 // point to guard pages or slot-span metadata.
207 PA_SCAN_INLINE GetSlotStartResult
GetSlotStartInSuperPage(uintptr_t maybe_inner_address)208 GetSlotStartInSuperPage(uintptr_t maybe_inner_address) {
209   PA_SCAN_DCHECK(IsManagedByNormalBuckets(maybe_inner_address));
210   // Don't use SlotSpanMetadata/PartitionPage::FromAddr() and family, because
211   // they expect an address within a super page payload area, which we don't
212   // know yet if |maybe_inner_address| is.
213   const uintptr_t super_page = maybe_inner_address & kSuperPageBaseMask;
214 
215   const uintptr_t partition_page_index =
216       (maybe_inner_address & kSuperPageOffsetMask) >> PartitionPageShift();
217   auto* page = PartitionSuperPageToMetadataArea<ThreadSafe>(super_page) +
218                partition_page_index;
219   // Check if page is valid. The check also works for the guard pages and the
220   // metadata page.
221   if (!page->is_valid) {
222     return {};
223   }
224 
225   page -= page->slot_span_metadata_offset;
226   PA_SCAN_DCHECK(page->is_valid);
227   PA_SCAN_DCHECK(!page->slot_span_metadata_offset);
228   auto* slot_span = &page->slot_span_metadata;
229   // Check if the slot span is actually used and valid.
230   if (!slot_span->bucket) {
231     return {};
232   }
233   PA_SCAN_DCHECK(PartitionRoot<ThreadSafe>::IsValidSlotSpan(slot_span));
234   const uintptr_t slot_span_start =
235       SlotSpanMetadata<ThreadSafe>::ToSlotSpanStart(slot_span);
236   const ptrdiff_t ptr_offset = maybe_inner_address - slot_span_start;
237   PA_SCAN_DCHECK(0 <= ptr_offset &&
238                  ptr_offset < static_cast<ptrdiff_t>(
239                                   slot_span->bucket->get_pages_per_slot_span() *
240                                   PartitionPageSize()));
241   // Slot span size in bytes is not necessarily multiple of partition page.
242   // Don't check if the pointer points outside of usable area, since checking
243   // the quarantine bit will anyway return false in this case.
244   const size_t slot_size = slot_span->bucket->slot_size;
245   const size_t slot_number = slot_span->bucket->GetSlotNumber(ptr_offset);
246   const uintptr_t slot_start = slot_span_start + (slot_number * slot_size);
247   PA_SCAN_DCHECK(slot_start <= maybe_inner_address &&
248                  maybe_inner_address < slot_start + slot_size);
249   return {.slot_start = slot_start, .slot_size = slot_size};
250 }
251 
252 #if PA_SCAN_DCHECK_IS_ON()
IsQuarantineEmptyOnSuperPage(uintptr_t super_page)253 bool IsQuarantineEmptyOnSuperPage(uintptr_t super_page) {
254   auto* bitmap = SuperPageStateBitmap(super_page);
255   size_t visited = 0;
256   bitmap->IterateQuarantined([&visited](auto) { ++visited; });
257   return !visited;
258 }
259 #endif
260 
DetectSimdSupport()261 SimdSupport DetectSimdSupport() {
262 #if PA_CONFIG(STARSCAN_NEON_SUPPORTED)
263   return SimdSupport::kNEON;
264 #else
265   const base::CPU& cpu = base::CPU::GetInstanceNoAllocation();
266   if (cpu.has_avx2()) {
267     return SimdSupport::kAVX2;
268   }
269   if (cpu.has_sse41()) {
270     return SimdSupport::kSSE41;
271   }
272   return SimdSupport::kUnvectorized;
273 #endif  // PA_CONFIG(STARSCAN_NEON_SUPPORTED)
274 }
275 
CommitCardTable()276 void CommitCardTable() {
277 #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
278   RecommitSystemPages(PartitionAddressSpace::RegularPoolBase(),
279                       sizeof(QuarantineCardTable),
280                       PageAccessibilityConfiguration(
281                           PageAccessibilityConfiguration::kReadWrite),
282                       PageAccessibilityDisposition::kRequireUpdate);
283 #endif
284 }
285 
286 template <class Function>
IterateNonEmptySlotSpans(uintptr_t super_page,size_t nonempty_slot_spans,Function function)287 void IterateNonEmptySlotSpans(uintptr_t super_page,
288                               size_t nonempty_slot_spans,
289                               Function function) {
290   PA_SCAN_DCHECK(!(super_page % kSuperPageAlignment));
291   PA_SCAN_DCHECK(nonempty_slot_spans);
292 
293   size_t slot_spans_to_visit = nonempty_slot_spans;
294 #if PA_SCAN_DCHECK_IS_ON()
295   size_t visited = 0;
296 #endif
297 
298   IterateSlotSpans<ThreadSafe>(
299       super_page, true /*with_quarantine*/,
300       [&function, &slot_spans_to_visit
301 #if PA_SCAN_DCHECK_IS_ON()
302        ,
303        &visited
304 #endif
305   ](SlotSpanMetadata<ThreadSafe>* slot_span) {
306         if (slot_span->is_empty() || slot_span->is_decommitted()) {
307           // Skip empty/decommitted slot spans.
308           return false;
309         }
310         function(slot_span);
311         --slot_spans_to_visit;
312 #if PA_SCAN_DCHECK_IS_ON()
313         // In debug builds, scan all the slot spans to check that number of
314         // visited slot spans is equal to the number of nonempty_slot_spans.
315         ++visited;
316         return false;
317 #else
318         return slot_spans_to_visit == 0;
319 #endif
320       });
321 #if PA_SCAN_DCHECK_IS_ON()
322   // Check that exactly all non-empty slot spans have been visited.
323   PA_DCHECK(nonempty_slot_spans == visited);
324 #endif
325 }
326 
327 // SuperPageSnapshot is used to record all slot spans that contain live slots.
328 // The class avoids dynamic allocations and is designed to be instantiated on
329 // stack. To avoid stack overflow, internal data structures are kept packed.
330 class SuperPageSnapshot final {
331   // The following constants are used to define a conservative estimate for
332   // maximum number of slot spans in a super page.
333   //
334   // For systems with runtime-defined page size, assume partition page size is
335   // at least 16kiB.
336   static constexpr size_t kMinPartitionPageSize =
337       __builtin_constant_p(PartitionPageSize()) ? PartitionPageSize() : 1 << 14;
338   static constexpr size_t kStateBitmapMinReservedSize =
339       __builtin_constant_p(ReservedStateBitmapSize())
340           ? ReservedStateBitmapSize()
341           : partition_alloc::internal::base::bits::AlignUp(
342                 sizeof(AllocationStateMap),
343                 kMinPartitionPageSize);
344   // Take into account guard partition page at the end of super-page.
345   static constexpr size_t kGuardPagesSize = 2 * kMinPartitionPageSize;
346 
347   static constexpr size_t kPayloadMaxSize =
348       kSuperPageSize - kStateBitmapMinReservedSize - kGuardPagesSize;
349   static_assert(kPayloadMaxSize % kMinPartitionPageSize == 0,
350                 "kPayloadMaxSize must be multiple of kMinPartitionPageSize");
351 
352   static constexpr size_t kMaxSlotSpansInSuperPage =
353       kPayloadMaxSize / kMinPartitionPageSize;
354 
355  public:
356   struct ScanArea {
357     // Use packed integer types to save stack space. In theory, kAlignment could
358     // be used instead of words, but it doesn't seem to bring savings.
359     uint32_t offset_within_page_in_words;
360     uint32_t size_in_words;
361     uint32_t slot_size_in_words;
362   };
363 
364   class ScanAreas : private std::array<ScanArea, kMaxSlotSpansInSuperPage> {
365     using Base = std::array<ScanArea, kMaxSlotSpansInSuperPage>;
366 
367    public:
368     using iterator = Base::iterator;
369     using const_iterator = Base::const_iterator;
370     using Base::operator[];
371 
begin()372     iterator begin() { return Base::begin(); }
begin() const373     const_iterator begin() const { return Base::begin(); }
374 
end()375     iterator end() { return std::next(begin(), size_); }
end() const376     const_iterator end() const { return std::next(begin(), size_); }
377 
set_size(size_t new_size)378     void set_size(size_t new_size) { size_ = new_size; }
379 
380    private:
381     size_t size_;
382   };
383 
384   static_assert(std::is_trivially_default_constructible<ScanAreas>::value,
385                 "ScanAreas must be trivially default constructible to ensure "
386                 "that no memsets are generated by the compiler as a "
387                 "result of value-initialization (or zero-initialization)");
388 
389   void* operator new(size_t) = delete;
390   void operator delete(void*) = delete;
391 
392   // Creates snapshot for a single super page. In theory, we could simply
393   // iterate over slot spans without taking a snapshot. However, we do this to
394   // minimize the mutex locking time. The mutex must be acquired to make sure
395   // that no mutator is concurrently changing any of the slot spans.
396   explicit SuperPageSnapshot(uintptr_t super_page_base);
397 
scan_areas() const398   const ScanAreas& scan_areas() const { return scan_areas_; }
399 
400  private:
401   ScanAreas scan_areas_;
402 };
403 
404 static_assert(
405     sizeof(SuperPageSnapshot) <= 2048,
406     "SuperPageSnapshot must stay relatively small to be allocated on stack");
407 
SuperPageSnapshot(uintptr_t super_page)408 SuperPageSnapshot::SuperPageSnapshot(uintptr_t super_page) {
409   using SlotSpan = SlotSpanMetadata<ThreadSafe>;
410 
411   auto* extent_entry = PartitionSuperPageToExtent<ThreadSafe>(super_page);
412 
413   ::partition_alloc::internal::ScopedGuard lock(extent_entry->root->lock_);
414 
415   const size_t nonempty_slot_spans =
416       extent_entry->number_of_nonempty_slot_spans;
417   if (!nonempty_slot_spans) {
418 #if PA_SCAN_DCHECK_IS_ON()
419     // Check that quarantine bitmap is empty for super-pages that contain
420     // only empty/decommitted slot-spans.
421     PA_CHECK(IsQuarantineEmptyOnSuperPage(super_page));
422 #endif
423     scan_areas_.set_size(0);
424     return;
425   }
426 
427   size_t current = 0;
428 
429   IterateNonEmptySlotSpans(
430       super_page, nonempty_slot_spans, [this, &current](SlotSpan* slot_span) {
431         const uintptr_t payload_begin = SlotSpan::ToSlotSpanStart(slot_span);
432         // For single-slot slot-spans, scan only utilized slot part.
433         const size_t provisioned_size =
434             PA_UNLIKELY(slot_span->CanStoreRawSize())
435                 ? slot_span->GetRawSize()
436                 : slot_span->GetProvisionedSize();
437         // Free & decommitted slot spans are skipped.
438         PA_SCAN_DCHECK(provisioned_size > 0);
439         const uintptr_t payload_end = payload_begin + provisioned_size;
440         auto& area = scan_areas_[current];
441 
442         const size_t offset_in_words =
443             (payload_begin & kSuperPageOffsetMask) / sizeof(uintptr_t);
444         const size_t size_in_words =
445             (payload_end - payload_begin) / sizeof(uintptr_t);
446         const size_t slot_size_in_words =
447             slot_span->bucket->slot_size / sizeof(uintptr_t);
448 
449 #if PA_SCAN_DCHECK_IS_ON()
450         PA_DCHECK(offset_in_words <=
451                   std::numeric_limits<
452                       decltype(area.offset_within_page_in_words)>::max());
453         PA_DCHECK(size_in_words <=
454                   std::numeric_limits<decltype(area.size_in_words)>::max());
455         PA_DCHECK(
456             slot_size_in_words <=
457             std::numeric_limits<decltype(area.slot_size_in_words)>::max());
458 #endif
459 
460         area.offset_within_page_in_words = offset_in_words;
461         area.size_in_words = size_in_words;
462         area.slot_size_in_words = slot_size_in_words;
463 
464         ++current;
465       });
466 
467   PA_SCAN_DCHECK(kMaxSlotSpansInSuperPage >= current);
468   scan_areas_.set_size(current);
469 }
470 
471 }  // namespace
472 
473 class PCScanScanLoop;
474 
475 // This class is responsible for performing the entire PCScan task.
476 // TODO(bikineev): Move PCScan algorithm out of PCScanTask.
477 class PCScanTask final : public base::RefCountedThreadSafe<PCScanTask>,
478                          public AllocatedOnPCScanMetadataPartition {
479  public:
480   // Creates and initializes a PCScan state.
481   PCScanTask(PCScan& pcscan, size_t quarantine_last_size);
482 
483   PCScanTask(PCScanTask&&) noexcept = delete;
484   PCScanTask& operator=(PCScanTask&&) noexcept = delete;
485 
486   // Execute PCScan from mutator inside safepoint.
487   void RunFromMutator();
488 
489   // Execute PCScan from the scanner thread. Must be called only once from the
490   // scanner thread.
491   void RunFromScanner();
492 
scheduler() const493   PCScanScheduler& scheduler() const { return pcscan_.scheduler(); }
494 
495  private:
496   class StackVisitor;
497   friend class PCScanScanLoop;
498 
499   using Root = PCScan::Root;
500   using SlotSpan = SlotSpanMetadata<ThreadSafe>;
501 
502   // This is used:
503   // - to synchronize all scanning threads (mutators and the scanner);
504   // - for the scanner, to transition through the state machine
505   //   (kScheduled -> kScanning (ctor) -> kSweepingAndFinishing (dtor).
506   template <Context context>
507   class SyncScope final {
508    public:
SyncScope(PCScanTask & task)509     explicit SyncScope(PCScanTask& task) : task_(task) {
510       task_.number_of_scanning_threads_.fetch_add(1, std::memory_order_relaxed);
511       if (context == Context::kScanner) {
512         task_.pcscan_.state_.store(PCScan::State::kScanning,
513                                    std::memory_order_relaxed);
514         task_.pcscan_.SetJoinableIfSafepointEnabled(true);
515       }
516     }
~SyncScope()517     ~SyncScope() {
518       // First, notify the scanning thread that this thread is done.
519       NotifyThreads();
520       if (context == Context::kScanner) {
521         // The scanner thread must wait here until all safepoints leave.
522         // Otherwise, sweeping may free a page that can later be accessed by a
523         // descheduled mutator.
524         WaitForOtherThreads();
525         task_.pcscan_.state_.store(PCScan::State::kSweepingAndFinishing,
526                                    std::memory_order_relaxed);
527       }
528     }
529 
530    private:
NotifyThreads()531     void NotifyThreads() {
532       {
533         // The lock is required as otherwise there is a race between
534         // fetch_sub/notify in the mutator and checking
535         // number_of_scanning_threads_/waiting in the scanner.
536         std::lock_guard<std::mutex> lock(task_.mutex_);
537         task_.number_of_scanning_threads_.fetch_sub(1,
538                                                     std::memory_order_relaxed);
539         {
540           // Notify that scan is done and there is no need to enter
541           // the safepoint. This also helps a mutator to avoid repeating
542           // entering. Since the scanner thread waits for all threads to finish,
543           // there is no ABA problem here.
544           task_.pcscan_.SetJoinableIfSafepointEnabled(false);
545         }
546       }
547       task_.condvar_.notify_all();
548     }
549 
WaitForOtherThreads()550     void WaitForOtherThreads() {
551       std::unique_lock<std::mutex> lock(task_.mutex_);
552       task_.condvar_.wait(lock, [this] {
553         return !task_.number_of_scanning_threads_.load(
554             std::memory_order_relaxed);
555       });
556     }
557 
558     PCScanTask& task_;
559   };
560 
561   friend class base::RefCountedThreadSafe<PCScanTask>;
562   ~PCScanTask() = default;
563 
564   PA_SCAN_INLINE AllocationStateMap* TryFindScannerBitmapForPointer(
565       uintptr_t maybe_ptr) const;
566 
567   // Lookup and marking functions. Return size of the slot if marked, or zero
568   // otherwise.
569   PA_SCAN_INLINE size_t TryMarkSlotInNormalBuckets(uintptr_t maybe_ptr) const;
570 
571   // Scans stack, only called from safepoints.
572   void ScanStack();
573 
574   // Scan individual areas.
575   void ScanNormalArea(PCScanInternal& pcscan,
576                       PCScanScanLoop& scan_loop,
577                       uintptr_t begin,
578                       uintptr_t end);
579   void ScanLargeArea(PCScanInternal& pcscan,
580                      PCScanScanLoop& scan_loop,
581                      uintptr_t begin,
582                      uintptr_t end,
583                      size_t slot_size);
584 
585   // Scans all registered partitions and marks reachable quarantined slots.
586   void ScanPartitions();
587 
588   // Clear quarantined slots and prepare card table for fast lookup
589   void ClearQuarantinedSlotsAndPrepareCardTable();
590 
591   // Unprotect all slot spans from all partitions.
592   void UnprotectPartitions();
593 
594   // Sweeps (frees) unreachable quarantined entries.
595   void SweepQuarantine();
596 
597   // Finishes the scanner (updates limits, UMA, etc).
598   void FinishScanner();
599 
600   // Cache the pcscan epoch to avoid the compiler loading the atomic
601   // QuarantineData::epoch_ on each access.
602   const size_t pcscan_epoch_;
603   std::unique_ptr<StarScanSnapshot> snapshot_;
604   StatsCollector stats_;
605   // Mutex and codvar that are used to synchronize scanning threads.
606   std::mutex mutex_;
607   std::condition_variable condvar_;
608   std::atomic<size_t> number_of_scanning_threads_{0u};
609   // We can unprotect only once to reduce context-switches.
610   std::once_flag unprotect_once_flag_;
611   bool immediatelly_free_slots_{false};
612   PCScan& pcscan_;
613 };
614 
TryFindScannerBitmapForPointer(uintptr_t maybe_ptr) const615 PA_SCAN_INLINE AllocationStateMap* PCScanTask::TryFindScannerBitmapForPointer(
616     uintptr_t maybe_ptr) const {
617   PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(maybe_ptr));
618   // First, check if |maybe_ptr| points to a valid super page or a quarantined
619   // card.
620 #if BUILDFLAG(HAS_64_BIT_POINTERS)
621 #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
622   // Check if |maybe_ptr| points to a quarantined card.
623   if (PA_LIKELY(
624           !QuarantineCardTable::GetFrom(maybe_ptr).IsQuarantined(maybe_ptr))) {
625     return nullptr;
626   }
627 #else   // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
628   // Without the card table, use the reservation offset table to check if
629   // |maybe_ptr| points to a valid super-page. It's not as precise (meaning that
630   // we may have hit the slow path more frequently), but reduces the memory
631   // overhead.  Since we are certain here, that |maybe_ptr| refers to the
632   // regular pool, it's okay to use non-checking version of
633   // ReservationOffsetPointer().
634   const uintptr_t offset =
635       maybe_ptr & ~PartitionAddressSpace::RegularPoolBaseMask();
636   if (PA_LIKELY(*ReservationOffsetPointer(kRegularPoolHandle, offset) !=
637                 kOffsetTagNormalBuckets)) {
638     return nullptr;
639   }
640 #endif  // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
641 #else   // BUILDFLAG(HAS_64_BIT_POINTERS)
642   if (PA_LIKELY(!IsManagedByPartitionAllocRegularPool(maybe_ptr))) {
643     return nullptr;
644   }
645 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
646 
647   // We are certain here that |maybe_ptr| points to an allocated super-page.
648   return StateBitmapFromAddr(maybe_ptr);
649 }
650 
651 // Looks up and marks a potential dangling pointer. Returns the size of the slot
652 // (which is then accounted as quarantined), or zero if no slot is found.
653 // For normal bucket super pages, PCScan uses two quarantine bitmaps, the
654 // mutator and the scanner one. The former is used by mutators when slots are
655 // freed, while the latter is used concurrently by the PCScan thread. The
656 // bitmaps are swapped as soon as PCScan is triggered. Once a dangling pointer
657 // (which points to a slot in the scanner bitmap) is found,
658 // TryMarkSlotInNormalBuckets() marks it again in the bitmap and clears
659 // from the scanner bitmap. This way, when scanning is done, all uncleared
660 // entries in the scanner bitmap correspond to unreachable slots.
661 PA_SCAN_INLINE size_t
TryMarkSlotInNormalBuckets(uintptr_t maybe_ptr) const662 PCScanTask::TryMarkSlotInNormalBuckets(uintptr_t maybe_ptr) const {
663   // Check if |maybe_ptr| points somewhere to the heap.
664   // The caller has to make sure that |maybe_ptr| isn't MTE-tagged.
665   auto* state_map = TryFindScannerBitmapForPointer(maybe_ptr);
666   if (!state_map) {
667     return 0;
668   }
669 
670   // Beyond this point, we know that |maybe_ptr| is a pointer within a
671   // normal-bucket super page.
672   PA_SCAN_DCHECK(IsManagedByNormalBuckets(maybe_ptr));
673 
674 #if !PA_CONFIG(STARSCAN_USE_CARD_TABLE)
675   // Pointer from a normal bucket is always in the first superpage.
676   auto* root = Root::FromAddrInFirstSuperpage(maybe_ptr);
677   // Without the card table, we must make sure that |maybe_ptr| doesn't point to
678   // metadata partition.
679   // TODO(bikineev): To speed things up, consider removing the check and
680   // committing quarantine bitmaps for metadata partition.
681   // TODO(bikineev): Marking an entry in the reservation-table is not a
682   // publishing operation, meaning that the |root| pointer may not be assigned
683   // yet. This can happen as arbitrary pointers may point into a super-page
684   // during its set up. Make sure to check |root| is not null before
685   // dereferencing it.
686   if (PA_UNLIKELY(!root || !root->IsQuarantineEnabled())) {
687     return 0;
688   }
689 #endif  // !PA_CONFIG(STARSCAN_USE_CARD_TABLE)
690 
691   // Check if pointer was in the quarantine bitmap.
692   const GetSlotStartResult slot_start_result =
693       GetSlotStartInSuperPage(maybe_ptr);
694   if (!slot_start_result.is_found()) {
695     return 0;
696   }
697 
698   const uintptr_t slot_start = slot_start_result.slot_start;
699   if (PA_LIKELY(!state_map->IsQuarantined(slot_start))) {
700     return 0;
701   }
702 
703   PA_SCAN_DCHECK((maybe_ptr & kSuperPageBaseMask) ==
704                  (slot_start & kSuperPageBaseMask));
705 
706   if (PA_UNLIKELY(immediatelly_free_slots_)) {
707     return 0;
708   }
709 
710   // Now we are certain that |maybe_ptr| is a dangling pointer. Mark it again in
711   // the mutator bitmap and clear from the scanner bitmap. Note that since
712   // PCScan has exclusive access to the scanner bitmap, we can avoid atomic rmw
713   // operation for it.
714   if (PA_LIKELY(
715           state_map->MarkQuarantinedAsReachable(slot_start, pcscan_epoch_))) {
716     return slot_start_result.slot_size;
717   }
718 
719   return 0;
720 }
721 
ClearQuarantinedSlotsAndPrepareCardTable()722 void PCScanTask::ClearQuarantinedSlotsAndPrepareCardTable() {
723   const PCScan::ClearType clear_type = pcscan_.clear_type_;
724 
725 #if !PA_CONFIG(STARSCAN_USE_CARD_TABLE)
726   if (clear_type == PCScan::ClearType::kEager) {
727     return;
728   }
729 #endif
730 
731   StarScanSnapshot::ClearingView view(*snapshot_);
732   view.VisitConcurrently([clear_type](uintptr_t super_page) {
733     auto* bitmap = StateBitmapFromAddr(super_page);
734     auto* root = Root::FromFirstSuperPage(super_page);
735     bitmap->IterateQuarantined([root, clear_type](uintptr_t slot_start) {
736       auto* slot_span = SlotSpan::FromSlotStart(slot_start);
737       // Use zero as a zapping value to speed up the fast bailout check in
738       // ScanPartitions.
739       const size_t size = slot_span->GetUsableSize(root);
740       if (clear_type == PCScan::ClearType::kLazy) {
741         void* object = root->SlotStartToObject(slot_start);
742         memset(object, 0, size);
743       }
744 #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
745       // Set card(s) for this quarantined slot.
746       QuarantineCardTable::GetFrom(slot_start).Quarantine(slot_start, size);
747 #endif
748     });
749   });
750 }
751 
UnprotectPartitions()752 void PCScanTask::UnprotectPartitions() {
753   auto& pcscan = PCScanInternal::Instance();
754   if (!pcscan.WriteProtectionEnabled()) {
755     return;
756   }
757 
758   StarScanSnapshot::UnprotectingView unprotect_view(*snapshot_);
759   unprotect_view.VisitConcurrently([&pcscan](uintptr_t super_page) {
760     SuperPageSnapshot super_page_snapshot(super_page);
761 
762     for (const auto& scan_area : super_page_snapshot.scan_areas()) {
763       const uintptr_t begin =
764           super_page |
765           (scan_area.offset_within_page_in_words * sizeof(uintptr_t));
766       const uintptr_t end =
767           begin + (scan_area.size_in_words * sizeof(uintptr_t));
768 
769       pcscan.UnprotectPages(begin, end - begin);
770     }
771   });
772 }
773 
774 class PCScanScanLoop final : public ScanLoop<PCScanScanLoop> {
775   friend class ScanLoop<PCScanScanLoop>;
776 
777  public:
PCScanScanLoop(const PCScanTask & task)778   explicit PCScanScanLoop(const PCScanTask& task)
779       : ScanLoop(PCScanInternal::Instance().simd_support()), task_(task) {}
780 
quarantine_size() const781   size_t quarantine_size() const { return quarantine_size_; }
782 
783  private:
784 #if BUILDFLAG(HAS_64_BIT_POINTERS)
RegularPoolBase()785   PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() {
786     return PartitionAddressSpace::RegularPoolBase();
787   }
RegularPoolMask()788   PA_ALWAYS_INLINE static uintptr_t RegularPoolMask() {
789     return PartitionAddressSpace::RegularPoolBaseMask();
790   }
791 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
792 
CheckPointer(uintptr_t maybe_ptr_maybe_tagged)793   PA_SCAN_INLINE void CheckPointer(uintptr_t maybe_ptr_maybe_tagged) {
794     // |maybe_ptr| may have an MTE tag, so remove it first.
795     quarantine_size_ +=
796         task_.TryMarkSlotInNormalBuckets(UntagAddr(maybe_ptr_maybe_tagged));
797   }
798 
799   const PCScanTask& task_;
800   DisableMTEScope disable_mte_;
801   size_t quarantine_size_ = 0;
802 };
803 
804 class PCScanTask::StackVisitor final : public internal::StackVisitor {
805  public:
StackVisitor(const PCScanTask & task)806   explicit StackVisitor(const PCScanTask& task) : task_(task) {}
807 
VisitStack(uintptr_t * stack_ptr,uintptr_t * stack_top)808   void VisitStack(uintptr_t* stack_ptr, uintptr_t* stack_top) override {
809     static constexpr size_t kMinimalAlignment = 32;
810     uintptr_t begin =
811         reinterpret_cast<uintptr_t>(stack_ptr) & ~(kMinimalAlignment - 1);
812     uintptr_t end =
813         (reinterpret_cast<uintptr_t>(stack_top) + kMinimalAlignment - 1) &
814         ~(kMinimalAlignment - 1);
815     PA_CHECK(begin < end);
816     PCScanScanLoop loop(task_);
817     loop.Run(begin, end);
818     quarantine_size_ += loop.quarantine_size();
819   }
820 
821   // Returns size of quarantined slots that are reachable from the current
822   // stack.
quarantine_size() const823   size_t quarantine_size() const { return quarantine_size_; }
824 
825  private:
826   const PCScanTask& task_;
827   size_t quarantine_size_ = 0;
828 };
829 
PCScanTask(PCScan & pcscan,size_t quarantine_last_size)830 PCScanTask::PCScanTask(PCScan& pcscan, size_t quarantine_last_size)
831     : pcscan_epoch_(pcscan.epoch() - 1),
832       snapshot_(StarScanSnapshot::Create(PCScanInternal::Instance())),
833       stats_(PCScanInternal::Instance().process_name(), quarantine_last_size),
834       immediatelly_free_slots_(
835           PCScanInternal::Instance().IsImmediateFreeingEnabled()),
836       pcscan_(pcscan) {}
837 
ScanStack()838 void PCScanTask::ScanStack() {
839   const auto& pcscan = PCScanInternal::Instance();
840   if (!pcscan.IsStackScanningEnabled()) {
841     return;
842   }
843   // Check if the stack top was registered. It may happen that it's not if the
844   // current allocation happens from pthread trampolines.
845   void* stack_top = pcscan.GetCurrentThreadStackTop();
846   if (PA_UNLIKELY(!stack_top)) {
847     return;
848   }
849 
850   Stack stack_scanner(stack_top);
851   StackVisitor visitor(*this);
852   stack_scanner.IteratePointers(&visitor);
853   stats_.IncreaseSurvivedQuarantineSize(visitor.quarantine_size());
854 }
855 
ScanNormalArea(PCScanInternal & pcscan,PCScanScanLoop & scan_loop,uintptr_t begin,uintptr_t end)856 void PCScanTask::ScanNormalArea(PCScanInternal& pcscan,
857                                 PCScanScanLoop& scan_loop,
858                                 uintptr_t begin,
859                                 uintptr_t end) {
860   // Protect slot span before scanning it.
861   pcscan.ProtectPages(begin, end - begin);
862   scan_loop.Run(begin, end);
863 }
864 
ScanLargeArea(PCScanInternal & pcscan,PCScanScanLoop & scan_loop,uintptr_t begin,uintptr_t end,size_t slot_size)865 void PCScanTask::ScanLargeArea(PCScanInternal& pcscan,
866                                PCScanScanLoop& scan_loop,
867                                uintptr_t begin,
868                                uintptr_t end,
869                                size_t slot_size) {
870   // For scanning large areas, it's worthwhile checking whether the range that
871   // is scanned contains allocated slots. It also helps to skip discarded
872   // freed slots.
873   // Protect slot span before scanning it.
874   pcscan.ProtectPages(begin, end - begin);
875 
876   auto* bitmap = StateBitmapFromAddr(begin);
877 
878   for (uintptr_t current_slot = begin; current_slot < end;
879        current_slot += slot_size) {
880     // It is okay to skip slots as the object they hold has been zapped at this
881     // point, which means that the pointers no longer retain other slots.
882     if (!bitmap->IsAllocated(current_slot)) {
883       continue;
884     }
885     uintptr_t current_slot_end = current_slot + slot_size;
886     // |slot_size| may be larger than |raw_size| for single-slot slot spans.
887     scan_loop.Run(current_slot, std::min(current_slot_end, end));
888   }
889 }
890 
ScanPartitions()891 void PCScanTask::ScanPartitions() {
892   // Threshold for which bucket size it is worthwhile in checking whether the
893   // slot is allocated and needs to be scanned. PartitionPurgeSlotSpan()
894   // purges only slots >= page-size, this helps us to avoid faulting in
895   // discarded pages. We actually lower it further to 1024, to take advantage of
896   // skipping unallocated slots, but don't want to go any lower, as this comes
897   // at a cost of expensive bitmap checking.
898   static constexpr size_t kLargeScanAreaThresholdInWords =
899       1024 / sizeof(uintptr_t);
900 
901   PCScanScanLoop scan_loop(*this);
902   auto& pcscan = PCScanInternal::Instance();
903 
904   StarScanSnapshot::ScanningView snapshot_view(*snapshot_);
905   snapshot_view.VisitConcurrently([this, &pcscan,
906                                    &scan_loop](uintptr_t super_page) {
907     SuperPageSnapshot super_page_snapshot(super_page);
908 
909     for (const auto& scan_area : super_page_snapshot.scan_areas()) {
910       const uintptr_t begin =
911           super_page |
912           (scan_area.offset_within_page_in_words * sizeof(uintptr_t));
913       PA_SCAN_DCHECK(begin ==
914                      super_page + (scan_area.offset_within_page_in_words *
915                                    sizeof(uintptr_t)));
916       const uintptr_t end = begin + scan_area.size_in_words * sizeof(uintptr_t);
917 
918       if (PA_UNLIKELY(scan_area.slot_size_in_words >=
919                       kLargeScanAreaThresholdInWords)) {
920         ScanLargeArea(pcscan, scan_loop, begin, end,
921                       scan_area.slot_size_in_words * sizeof(uintptr_t));
922       } else {
923         ScanNormalArea(pcscan, scan_loop, begin, end);
924       }
925     }
926   });
927 
928   stats_.IncreaseSurvivedQuarantineSize(scan_loop.quarantine_size());
929 }
930 
931 namespace {
932 
933 struct SweepStat {
934   // Bytes that were really swept (by calling free()).
935   size_t swept_bytes = 0;
936   // Bytes of marked quarantine memory that were discarded (by calling
937   // madvice(DONT_NEED)).
938   size_t discarded_bytes = 0;
939 };
940 
UnmarkInCardTable(uintptr_t slot_start,SlotSpanMetadata<ThreadSafe> * slot_span)941 void UnmarkInCardTable(uintptr_t slot_start,
942                        SlotSpanMetadata<ThreadSafe>* slot_span) {
943 #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
944   // Reset card(s) for this quarantined slot. Please note that the cards may
945   // still contain quarantined slots (which were promoted in this scan cycle),
946   // but ClearQuarantinedSlotsAndPrepareCardTable() will set them again in the
947   // next PCScan cycle.
948   QuarantineCardTable::GetFrom(slot_start)
949       .Unquarantine(slot_start, slot_span->GetUtilizedSlotSize());
950 #endif  // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
951 }
952 
FreeAndUnmarkInCardTable(PartitionRoot<ThreadSafe> * root,SlotSpanMetadata<ThreadSafe> * slot_span,uintptr_t slot_start)953 [[maybe_unused]] size_t FreeAndUnmarkInCardTable(
954     PartitionRoot<ThreadSafe>* root,
955     SlotSpanMetadata<ThreadSafe>* slot_span,
956     uintptr_t slot_start) {
957   void* object = root->SlotStartToObject(slot_start);
958   root->FreeNoHooksImmediate(object, slot_span, slot_start);
959   UnmarkInCardTable(slot_start, slot_span);
960   return slot_span->bucket->slot_size;
961 }
962 
SweepSuperPage(ThreadSafePartitionRoot * root,uintptr_t super_page,size_t epoch,SweepStat & stat)963 [[maybe_unused]] void SweepSuperPage(ThreadSafePartitionRoot* root,
964                                      uintptr_t super_page,
965                                      size_t epoch,
966                                      SweepStat& stat) {
967   auto* bitmap = StateBitmapFromAddr(super_page);
968   ThreadSafePartitionRoot::FromFirstSuperPage(super_page);
969   bitmap->IterateUnmarkedQuarantined(epoch, [root,
970                                              &stat](uintptr_t slot_start) {
971     auto* slot_span = SlotSpanMetadata<ThreadSafe>::FromSlotStart(slot_start);
972     stat.swept_bytes += FreeAndUnmarkInCardTable(root, slot_span, slot_start);
973   });
974 }
975 
SweepSuperPageAndDiscardMarkedQuarantine(ThreadSafePartitionRoot * root,uintptr_t super_page,size_t epoch,SweepStat & stat)976 [[maybe_unused]] void SweepSuperPageAndDiscardMarkedQuarantine(
977     ThreadSafePartitionRoot* root,
978     uintptr_t super_page,
979     size_t epoch,
980     SweepStat& stat) {
981   auto* bitmap = StateBitmapFromAddr(super_page);
982   bitmap->IterateQuarantined(epoch, [root, &stat](uintptr_t slot_start,
983                                                   bool is_marked) {
984     auto* slot_span = SlotSpanMetadata<ThreadSafe>::FromSlotStart(slot_start);
985     if (PA_LIKELY(!is_marked)) {
986       stat.swept_bytes += FreeAndUnmarkInCardTable(root, slot_span, slot_start);
987       return;
988     }
989     // Otherwise, try to discard pages for marked quarantine. Since no data is
990     // stored in quarantined slots (e.g. the |next| pointer), this can be
991     // freely done.
992     const size_t slot_size = slot_span->bucket->slot_size;
993     if (slot_size >= SystemPageSize()) {
994       const uintptr_t discard_end =
995           base::bits::AlignDown(slot_start + slot_size, SystemPageSize());
996       const uintptr_t discard_begin =
997           base::bits::AlignUp(slot_start, SystemPageSize());
998       const intptr_t discard_size = discard_end - discard_begin;
999       if (discard_size > 0) {
1000         DiscardSystemPages(discard_begin, discard_size);
1001         stat.discarded_bytes += discard_size;
1002       }
1003     }
1004   });
1005 }
1006 
SweepSuperPageWithBatchedFree(ThreadSafePartitionRoot * root,uintptr_t super_page,size_t epoch,SweepStat & stat)1007 [[maybe_unused]] void SweepSuperPageWithBatchedFree(
1008     ThreadSafePartitionRoot* root,
1009     uintptr_t super_page,
1010     size_t epoch,
1011     SweepStat& stat) {
1012   using SlotSpan = SlotSpanMetadata<ThreadSafe>;
1013 
1014   auto* bitmap = StateBitmapFromAddr(super_page);
1015   SlotSpan* previous_slot_span = nullptr;
1016   internal::PartitionFreelistEntry* freelist_tail = nullptr;
1017   internal::PartitionFreelistEntry* freelist_head = nullptr;
1018   size_t freelist_entries = 0;
1019 
1020   const auto bitmap_iterator = [&](uintptr_t slot_start) {
1021     SlotSpan* current_slot_span = SlotSpan::FromSlotStart(slot_start);
1022     auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_start);
1023 
1024     if (current_slot_span != previous_slot_span) {
1025       // We started scanning a new slot span. Flush the accumulated freelist to
1026       // the slot-span's freelist. This is a single lock acquired per slot span.
1027       if (previous_slot_span && freelist_entries) {
1028         root->RawFreeBatch(freelist_head, freelist_tail, freelist_entries,
1029                            previous_slot_span);
1030       }
1031       freelist_head = entry;
1032       freelist_tail = nullptr;
1033       freelist_entries = 0;
1034       previous_slot_span = current_slot_span;
1035     }
1036 
1037     if (freelist_tail) {
1038       freelist_tail->SetNext(entry);
1039     }
1040     freelist_tail = entry;
1041     ++freelist_entries;
1042 
1043     UnmarkInCardTable(slot_start, current_slot_span);
1044 
1045     stat.swept_bytes += current_slot_span->bucket->slot_size;
1046   };
1047 
1048   bitmap->IterateUnmarkedQuarantinedAndFree(epoch, bitmap_iterator);
1049 
1050   if (previous_slot_span && freelist_entries) {
1051     root->RawFreeBatch(freelist_head, freelist_tail, freelist_entries,
1052                        previous_slot_span);
1053   }
1054 }
1055 
1056 }  // namespace
1057 
SweepQuarantine()1058 void PCScanTask::SweepQuarantine() {
1059   // Check that scan is unjoinable by this time.
1060   PA_DCHECK(!pcscan_.IsJoinable());
1061   // Discard marked quarantine memory on every Nth scan.
1062   // TODO(bikineev): Find a better signal (e.g. memory pressure, high
1063   // survival rate, etc).
1064   static constexpr size_t kDiscardMarkedQuarantineFrequency = 16;
1065   const bool should_discard =
1066       (pcscan_epoch_ % kDiscardMarkedQuarantineFrequency == 0) &&
1067       (pcscan_.clear_type_ == PCScan::ClearType::kEager);
1068 
1069   SweepStat stat;
1070   StarScanSnapshot::SweepingView sweeping_view(*snapshot_);
1071   sweeping_view.VisitNonConcurrently(
1072       [this, &stat, should_discard](uintptr_t super_page) {
1073         auto* root = ThreadSafePartitionRoot::FromFirstSuperPage(super_page);
1074 
1075 #if PA_CONFIG(STARSCAN_BATCHED_FREE)
1076         SweepSuperPageWithBatchedFree(root, super_page, pcscan_epoch_, stat);
1077         (void)should_discard;
1078 #else
1079         if (PA_UNLIKELY(should_discard && !root->flags.allow_cookie))
1080           SweepSuperPageAndDiscardMarkedQuarantine(root, super_page,
1081                                                    pcscan_epoch_, stat);
1082         else
1083           SweepSuperPage(root, super_page, pcscan_epoch_, stat);
1084 #endif  // PA_CONFIG(STARSCAN_BATCHED_FREE)
1085       });
1086 
1087   stats_.IncreaseSweptSize(stat.swept_bytes);
1088   stats_.IncreaseDiscardedQuarantineSize(stat.discarded_bytes);
1089 
1090 #if PA_CONFIG(THREAD_CACHE_SUPPORTED)
1091   // Sweeping potentially frees into the current thread's thread cache. Purge
1092   // releases the cache back to the global allocator.
1093   auto* current_thread_tcache = ThreadCache::Get();
1094   if (ThreadCache::IsValid(current_thread_tcache)) {
1095     current_thread_tcache->Purge();
1096   }
1097 #endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED)
1098 }
1099 
FinishScanner()1100 void PCScanTask::FinishScanner() {
1101   stats_.ReportTracesAndHists(PCScanInternal::Instance().GetReporter());
1102 
1103   pcscan_.scheduler_.scheduling_backend().UpdateScheduleAfterScan(
1104       stats_.survived_quarantine_size(), stats_.GetOverallTime(),
1105       PCScanInternal::Instance().CalculateTotalHeapSize());
1106 
1107   PCScanInternal::Instance().ResetCurrentPCScanTask();
1108   // Change the state and check that concurrent task can't be scheduled twice.
1109   PA_CHECK(pcscan_.state_.exchange(PCScan::State::kNotRunning,
1110                                    std::memory_order_acq_rel) ==
1111            PCScan::State::kSweepingAndFinishing);
1112 }
1113 
RunFromMutator()1114 void PCScanTask::RunFromMutator() {
1115   ReentrantScannerGuard reentrancy_guard;
1116   StatsCollector::MutatorScope overall_scope(
1117       stats_, StatsCollector::MutatorId::kOverall);
1118   {
1119     SyncScope<Context::kMutator> sync_scope(*this);
1120     // Mutator might start entering the safepoint while scanning was already
1121     // finished.
1122     if (!pcscan_.IsJoinable()) {
1123       return;
1124     }
1125     {
1126       // Clear all quarantined slots and prepare card table.
1127       StatsCollector::MutatorScope clear_scope(
1128           stats_, StatsCollector::MutatorId::kClear);
1129       ClearQuarantinedSlotsAndPrepareCardTable();
1130     }
1131     {
1132       // Scan the thread's stack to find dangling references.
1133       StatsCollector::MutatorScope scan_scope(
1134           stats_, StatsCollector::MutatorId::kScanStack);
1135       ScanStack();
1136     }
1137     {
1138       // Unprotect all scanned pages, if needed.
1139       UnprotectPartitions();
1140     }
1141     {
1142       // Scan heap for dangling references.
1143       StatsCollector::MutatorScope scan_scope(stats_,
1144                                               StatsCollector::MutatorId::kScan);
1145       ScanPartitions();
1146     }
1147   }
1148 }
1149 
RunFromScanner()1150 void PCScanTask::RunFromScanner() {
1151   ReentrantScannerGuard reentrancy_guard;
1152   {
1153     StatsCollector::ScannerScope overall_scope(
1154         stats_, StatsCollector::ScannerId::kOverall);
1155     {
1156       SyncScope<Context::kScanner> sync_scope(*this);
1157       {
1158         // Clear all quarantined slots and prepare the card table.
1159         StatsCollector::ScannerScope clear_scope(
1160             stats_, StatsCollector::ScannerId::kClear);
1161         ClearQuarantinedSlotsAndPrepareCardTable();
1162       }
1163       {
1164         // Scan heap for dangling references.
1165         StatsCollector::ScannerScope scan_scope(
1166             stats_, StatsCollector::ScannerId::kScan);
1167         ScanPartitions();
1168       }
1169       {
1170         // Unprotect all scanned pages, if needed.
1171         UnprotectPartitions();
1172       }
1173     }
1174     {
1175       // Sweep unreachable quarantined slots.
1176       StatsCollector::ScannerScope sweep_scope(
1177           stats_, StatsCollector::ScannerId::kSweep);
1178       SweepQuarantine();
1179     }
1180   }
1181   FinishScanner();
1182 }
1183 
1184 class PCScan::PCScanThread final {
1185  public:
1186   using TaskHandle = PCScanInternal::TaskHandle;
1187 
Instance()1188   static PCScanThread& Instance() {
1189     // Lazily instantiate the scanning thread.
1190     static internal::base::NoDestructor<PCScanThread> instance;
1191     return *instance;
1192   }
1193 
PostTask(TaskHandle task)1194   void PostTask(TaskHandle task) {
1195     {
1196       std::lock_guard<std::mutex> lock(mutex_);
1197       PA_DCHECK(!posted_task_.get());
1198       posted_task_ = std::move(task);
1199       wanted_delay_ = base::TimeDelta();
1200     }
1201     condvar_.notify_one();
1202   }
1203 
PostDelayedTask(base::TimeDelta delay)1204   void PostDelayedTask(base::TimeDelta delay) {
1205     {
1206       std::lock_guard<std::mutex> lock(mutex_);
1207       if (posted_task_.get()) {
1208         return;
1209       }
1210       wanted_delay_ = delay;
1211     }
1212     condvar_.notify_one();
1213   }
1214 
1215  private:
1216   friend class internal::base::NoDestructor<PCScanThread>;
1217 
PCScanThread()1218   PCScanThread() {
1219     ScopedAllowAllocations allow_allocations_within_std_thread;
1220     std::thread{[](PCScanThread* instance) {
1221                   static constexpr const char* kThreadName = "PCScan";
1222                   // Ideally we should avoid mixing base:: and std:: API for
1223                   // threading, but this is useful for visualizing the pcscan
1224                   // thread in chrome://tracing.
1225                   internal::base::PlatformThread::SetName(kThreadName);
1226                   instance->TaskLoop();
1227                 },
1228                 this}
1229         .detach();
1230   }
1231 
1232   // Waits and returns whether the delay should be recomputed.
Wait(std::unique_lock<std::mutex> & lock)1233   bool Wait(std::unique_lock<std::mutex>& lock) {
1234     PA_DCHECK(lock.owns_lock());
1235     if (wanted_delay_.is_zero()) {
1236       condvar_.wait(lock, [this] {
1237         // Re-evaluate if either delay changed, or a task was
1238         // enqueued.
1239         return !wanted_delay_.is_zero() || posted_task_.get();
1240       });
1241       // The delay has already been set up and should not be queried again.
1242       return false;
1243     }
1244     condvar_.wait_for(
1245         lock, std::chrono::microseconds(wanted_delay_.InMicroseconds()));
1246     // If no task has been posted, the delay should be recomputed at this point.
1247     return !posted_task_.get();
1248   }
1249 
TaskLoop()1250   void TaskLoop() {
1251     while (true) {
1252       TaskHandle current_task;
1253       {
1254         std::unique_lock<std::mutex> lock(mutex_);
1255         // Scheduling.
1256         while (!posted_task_.get()) {
1257           if (Wait(lock)) {
1258             wanted_delay_ =
1259                 scheduler().scheduling_backend().UpdateDelayedSchedule();
1260             if (wanted_delay_.is_zero()) {
1261               break;
1262             }
1263           }
1264         }
1265         // Differentiate between a posted task and a delayed task schedule.
1266         if (posted_task_.get()) {
1267           std::swap(current_task, posted_task_);
1268           wanted_delay_ = base::TimeDelta();
1269         } else {
1270           PA_DCHECK(wanted_delay_.is_zero());
1271         }
1272       }
1273       // Differentiate between a posted task and a delayed task schedule.
1274       if (current_task.get()) {
1275         current_task->RunFromScanner();
1276       } else {
1277         PCScan::Instance().PerformScan(PCScan::InvocationMode::kNonBlocking);
1278       }
1279     }
1280   }
1281 
scheduler() const1282   PCScanScheduler& scheduler() const { return PCScan::Instance().scheduler(); }
1283 
1284   std::mutex mutex_;
1285   std::condition_variable condvar_;
1286   TaskHandle posted_task_;
1287   base::TimeDelta wanted_delay_;
1288 };
1289 
PCScanInternal()1290 PCScanInternal::PCScanInternal() : simd_support_(DetectSimdSupport()) {}
1291 
1292 PCScanInternal::~PCScanInternal() = default;
1293 
Initialize(PCScan::InitConfig config)1294 void PCScanInternal::Initialize(PCScan::InitConfig config) {
1295   PA_DCHECK(!is_initialized_);
1296 #if BUILDFLAG(HAS_64_BIT_POINTERS)
1297   // Make sure that pools are initialized.
1298   PartitionAddressSpace::Init();
1299 #endif
1300   CommitCardTable();
1301 #if PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
1302   if (config.write_protection ==
1303       PCScan::InitConfig::WantedWriteProtectionMode::kEnabled) {
1304     write_protector_ = std::make_unique<UserFaultFDWriteProtector>();
1305   } else {
1306     write_protector_ = std::make_unique<NoWriteProtector>();
1307   }
1308 #else
1309   write_protector_ = std::make_unique<NoWriteProtector>();
1310 #endif  // PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
1311   PCScan::SetClearType(write_protector_->SupportedClearType());
1312 
1313   if (config.safepoint == PCScan::InitConfig::SafepointMode::kEnabled) {
1314     PCScan::Instance().EnableSafepoints();
1315   }
1316   scannable_roots_ = RootsMap();
1317   nonscannable_roots_ = RootsMap();
1318 
1319   static partition_alloc::StatsReporter s_no_op_reporter;
1320   PCScan::Instance().RegisterStatsReporter(&s_no_op_reporter);
1321 
1322   // Don't initialize PCScanThread::Instance() as otherwise sandbox complains
1323   // about multiple threads running on sandbox initialization.
1324   is_initialized_ = true;
1325 }
1326 
PerformScan(PCScan::InvocationMode invocation_mode)1327 void PCScanInternal::PerformScan(PCScan::InvocationMode invocation_mode) {
1328 #if PA_SCAN_DCHECK_IS_ON()
1329   PA_DCHECK(is_initialized());
1330   PA_DCHECK(scannable_roots().size() > 0);
1331   PA_DCHECK(std::all_of(
1332       scannable_roots().begin(), scannable_roots().end(),
1333       [](const auto& pair) { return pair.first->IsScanEnabled(); }));
1334   PA_DCHECK(std::all_of(
1335       nonscannable_roots().begin(), nonscannable_roots().end(),
1336       [](const auto& pair) { return pair.first->IsQuarantineEnabled(); }));
1337 #endif
1338 
1339   PCScan& frontend = PCScan::Instance();
1340   {
1341     // If scanning is already in progress, bail out.
1342     PCScan::State expected = PCScan::State::kNotRunning;
1343     if (!frontend.state_.compare_exchange_strong(
1344             expected, PCScan::State::kScheduled, std::memory_order_acq_rel,
1345             std::memory_order_relaxed)) {
1346       return;
1347     }
1348   }
1349 
1350   const size_t last_quarantine_size =
1351       frontend.scheduler_.scheduling_backend().ScanStarted();
1352 
1353   // Create PCScan task and set it as current.
1354   auto task = base::MakeRefCounted<PCScanTask>(frontend, last_quarantine_size);
1355   PCScanInternal::Instance().SetCurrentPCScanTask(task);
1356 
1357   if (PA_UNLIKELY(invocation_mode ==
1358                   PCScan::InvocationMode::kScheduleOnlyForTesting)) {
1359     // Immediately change the state to enable safepoint testing.
1360     frontend.state_.store(PCScan::State::kScanning, std::memory_order_release);
1361     frontend.SetJoinableIfSafepointEnabled(true);
1362     return;
1363   }
1364 
1365   // Post PCScan task.
1366   if (PA_LIKELY(invocation_mode == PCScan::InvocationMode::kNonBlocking)) {
1367     PCScan::PCScanThread::Instance().PostTask(std::move(task));
1368   } else {
1369     PA_SCAN_DCHECK(PCScan::InvocationMode::kBlocking == invocation_mode ||
1370                    PCScan::InvocationMode::kForcedBlocking == invocation_mode);
1371     std::move(*task).RunFromScanner();
1372   }
1373 }
1374 
PerformScanIfNeeded(PCScan::InvocationMode invocation_mode)1375 void PCScanInternal::PerformScanIfNeeded(
1376     PCScan::InvocationMode invocation_mode) {
1377   if (!scannable_roots().size()) {
1378     return;
1379   }
1380   PCScan& frontend = PCScan::Instance();
1381   if (invocation_mode == PCScan::InvocationMode::kForcedBlocking ||
1382       frontend.scheduler_.scheduling_backend()
1383           .GetQuarantineData()
1384           .MinimumScanningThresholdReached()) {
1385     PerformScan(invocation_mode);
1386   }
1387 }
1388 
PerformDelayedScan(base::TimeDelta delay)1389 void PCScanInternal::PerformDelayedScan(base::TimeDelta delay) {
1390   PCScan::PCScanThread::Instance().PostDelayedTask(delay);
1391 }
1392 
JoinScan()1393 void PCScanInternal::JoinScan() {
1394   // Current task can be destroyed by the scanner. Check that it's valid.
1395   if (auto current_task = CurrentPCScanTask()) {
1396     current_task->RunFromMutator();
1397   }
1398 }
1399 
CurrentPCScanTask() const1400 PCScanInternal::TaskHandle PCScanInternal::CurrentPCScanTask() const {
1401   std::lock_guard<std::mutex> lock(current_task_mutex_);
1402   return current_task_;
1403 }
1404 
SetCurrentPCScanTask(TaskHandle task)1405 void PCScanInternal::SetCurrentPCScanTask(TaskHandle task) {
1406   std::lock_guard<std::mutex> lock(current_task_mutex_);
1407   current_task_ = std::move(task);
1408 }
1409 
ResetCurrentPCScanTask()1410 void PCScanInternal::ResetCurrentPCScanTask() {
1411   std::lock_guard<std::mutex> lock(current_task_mutex_);
1412   current_task_.reset();
1413 }
1414 
1415 namespace {
GetSuperPagesAndCommitStateBitmaps(PCScan::Root & root)1416 PCScanInternal::SuperPages GetSuperPagesAndCommitStateBitmaps(
1417     PCScan::Root& root) {
1418   const size_t state_bitmap_size_to_commit = CommittedStateBitmapSize();
1419   PCScanInternal::SuperPages super_pages;
1420   for (auto* super_page_extent = root.first_extent; super_page_extent;
1421        super_page_extent = super_page_extent->next) {
1422     for (uintptr_t super_page = SuperPagesBeginFromExtent(super_page_extent),
1423                    super_page_end = SuperPagesEndFromExtent(super_page_extent);
1424          super_page != super_page_end; super_page += kSuperPageSize) {
1425       // Make sure the metadata is committed.
1426       // TODO(bikineev): Remove once this is known to work.
1427       const volatile char* metadata = reinterpret_cast<char*>(
1428           PartitionSuperPageToMetadataArea<ThreadSafe>(super_page));
1429       *metadata;
1430       RecommitSystemPages(SuperPageStateBitmapAddr(super_page),
1431                           state_bitmap_size_to_commit,
1432                           PageAccessibilityConfiguration(
1433                               PageAccessibilityConfiguration::kReadWrite),
1434                           PageAccessibilityDisposition::kRequireUpdate);
1435       super_pages.push_back(super_page);
1436     }
1437   }
1438   return super_pages;
1439 }
1440 }  // namespace
1441 
RegisterScannableRoot(Root * root)1442 void PCScanInternal::RegisterScannableRoot(Root* root) {
1443   PA_DCHECK(is_initialized());
1444   PA_DCHECK(root);
1445   // Avoid nesting locks and store super_pages in a temporary vector.
1446   SuperPages super_pages;
1447   {
1448     ::partition_alloc::internal::ScopedGuard guard(root->lock_);
1449     PA_CHECK(root->IsQuarantineAllowed());
1450     if (root->IsScanEnabled()) {
1451       return;
1452     }
1453     PA_CHECK(!root->IsQuarantineEnabled());
1454     super_pages = GetSuperPagesAndCommitStateBitmaps(*root);
1455     root->flags.scan_mode = Root::ScanMode::kEnabled;
1456     root->flags.quarantine_mode = Root::QuarantineMode::kEnabled;
1457   }
1458   std::lock_guard<std::mutex> lock(roots_mutex_);
1459   PA_DCHECK(!scannable_roots_.count(root));
1460   auto& root_super_pages = scannable_roots_[root];
1461   root_super_pages.insert(root_super_pages.end(), super_pages.begin(),
1462                           super_pages.end());
1463 }
1464 
RegisterNonScannableRoot(Root * root)1465 void PCScanInternal::RegisterNonScannableRoot(Root* root) {
1466   PA_DCHECK(is_initialized());
1467   PA_DCHECK(root);
1468   // Avoid nesting locks and store super_pages in a temporary vector.
1469   SuperPages super_pages;
1470   {
1471     ::partition_alloc::internal::ScopedGuard guard(root->lock_);
1472     PA_CHECK(root->IsQuarantineAllowed());
1473     PA_CHECK(!root->IsScanEnabled());
1474     if (root->IsQuarantineEnabled()) {
1475       return;
1476     }
1477     super_pages = GetSuperPagesAndCommitStateBitmaps(*root);
1478     root->flags.quarantine_mode = Root::QuarantineMode::kEnabled;
1479   }
1480   std::lock_guard<std::mutex> lock(roots_mutex_);
1481   PA_DCHECK(!nonscannable_roots_.count(root));
1482   auto& root_super_pages = nonscannable_roots_[root];
1483   root_super_pages.insert(root_super_pages.end(), super_pages.begin(),
1484                           super_pages.end());
1485 }
1486 
RegisterNewSuperPage(Root * root,uintptr_t super_page_base)1487 void PCScanInternal::RegisterNewSuperPage(Root* root,
1488                                           uintptr_t super_page_base) {
1489   PA_DCHECK(is_initialized());
1490   PA_DCHECK(root);
1491   PA_CHECK(root->IsQuarantineAllowed());
1492   PA_DCHECK(!(super_page_base % kSuperPageAlignment));
1493   // Make sure the metadata is committed.
1494   // TODO(bikineev): Remove once this is known to work.
1495   const volatile char* metadata = reinterpret_cast<char*>(
1496       PartitionSuperPageToMetadataArea<ThreadSafe>(super_page_base));
1497   *metadata;
1498 
1499   std::lock_guard<std::mutex> lock(roots_mutex_);
1500 
1501   // Dispatch based on whether root is scannable or not.
1502   if (root->IsScanEnabled()) {
1503     PA_DCHECK(scannable_roots_.count(root));
1504     auto& super_pages = scannable_roots_[root];
1505     PA_DCHECK(std::find(super_pages.begin(), super_pages.end(),
1506                         super_page_base) == super_pages.end());
1507     super_pages.push_back(super_page_base);
1508   } else {
1509     PA_DCHECK(root->IsQuarantineEnabled());
1510     PA_DCHECK(nonscannable_roots_.count(root));
1511     auto& super_pages = nonscannable_roots_[root];
1512     PA_DCHECK(std::find(super_pages.begin(), super_pages.end(),
1513                         super_page_base) == super_pages.end());
1514     super_pages.push_back(super_page_base);
1515   }
1516 }
1517 
SetProcessName(const char * process_name)1518 void PCScanInternal::SetProcessName(const char* process_name) {
1519   PA_DCHECK(is_initialized());
1520   PA_DCHECK(process_name);
1521   PA_DCHECK(!process_name_);
1522   process_name_ = process_name;
1523 }
1524 
CalculateTotalHeapSize() const1525 size_t PCScanInternal::CalculateTotalHeapSize() const {
1526   PA_DCHECK(is_initialized());
1527   std::lock_guard<std::mutex> lock(roots_mutex_);
1528   const auto acc = [](size_t size, const auto& pair) {
1529     return size + pair.first->get_total_size_of_committed_pages();
1530   };
1531   return std::accumulate(scannable_roots_.begin(), scannable_roots_.end(), 0u,
1532                          acc) +
1533          std::accumulate(nonscannable_roots_.begin(), nonscannable_roots_.end(),
1534                          0u, acc);
1535 }
1536 
EnableStackScanning()1537 void PCScanInternal::EnableStackScanning() {
1538   PA_DCHECK(!stack_scanning_enabled_);
1539   stack_scanning_enabled_ = true;
1540 }
DisableStackScanning()1541 void PCScanInternal::DisableStackScanning() {
1542   PA_DCHECK(stack_scanning_enabled_);
1543   stack_scanning_enabled_ = false;
1544 }
IsStackScanningEnabled() const1545 bool PCScanInternal::IsStackScanningEnabled() const {
1546   return stack_scanning_enabled_;
1547 }
1548 
NotifyThreadCreated(void * stack_top)1549 void PCScanInternal::NotifyThreadCreated(void* stack_top) {
1550   const auto tid = base::PlatformThread::CurrentId();
1551   std::lock_guard<std::mutex> lock(stack_tops_mutex_);
1552   const auto res = stack_tops_.insert({tid, stack_top});
1553   PA_DCHECK(res.second);
1554 }
1555 
NotifyThreadDestroyed()1556 void PCScanInternal::NotifyThreadDestroyed() {
1557   const auto tid = base::PlatformThread::CurrentId();
1558   std::lock_guard<std::mutex> lock(stack_tops_mutex_);
1559   PA_DCHECK(1 == stack_tops_.count(tid));
1560   stack_tops_.erase(tid);
1561 }
1562 
GetCurrentThreadStackTop() const1563 void* PCScanInternal::GetCurrentThreadStackTop() const {
1564   const auto tid = base::PlatformThread::CurrentId();
1565   std::lock_guard<std::mutex> lock(stack_tops_mutex_);
1566   auto it = stack_tops_.find(tid);
1567   return it != stack_tops_.end() ? it->second : nullptr;
1568 }
1569 
WriteProtectionEnabled() const1570 bool PCScanInternal::WriteProtectionEnabled() const {
1571   return write_protector_->IsEnabled();
1572 }
1573 
ProtectPages(uintptr_t begin,size_t size)1574 void PCScanInternal::ProtectPages(uintptr_t begin, size_t size) {
1575   // Slot-span sizes are multiple of system page size. However, the ranges that
1576   // are recorded are not, since in the snapshot we only record the used
1577   // payload. Therefore we align up the incoming range by 4k. The unused part of
1578   // slot-spans doesn't need to be protected (the allocator will enter the
1579   // safepoint before trying to allocate from it).
1580   PA_SCAN_DCHECK(write_protector_.get());
1581   write_protector_->ProtectPages(
1582       begin,
1583       partition_alloc::internal::base::bits::AlignUp(size, SystemPageSize()));
1584 }
1585 
UnprotectPages(uintptr_t begin,size_t size)1586 void PCScanInternal::UnprotectPages(uintptr_t begin, size_t size) {
1587   PA_SCAN_DCHECK(write_protector_.get());
1588   write_protector_->UnprotectPages(
1589       begin,
1590       partition_alloc::internal::base::bits::AlignUp(size, SystemPageSize()));
1591 }
1592 
ClearRootsForTesting()1593 void PCScanInternal::ClearRootsForTesting() {
1594   std::lock_guard<std::mutex> lock(roots_mutex_);
1595   // Set all roots as non-scannable and non-quarantinable.
1596   for (auto& pair : scannable_roots_) {
1597     Root* root = pair.first;
1598     root->flags.scan_mode = Root::ScanMode::kDisabled;
1599     root->flags.quarantine_mode = Root::QuarantineMode::kDisabledByDefault;
1600   }
1601   for (auto& pair : nonscannable_roots_) {
1602     Root* root = pair.first;
1603     root->flags.quarantine_mode = Root::QuarantineMode::kDisabledByDefault;
1604   }
1605   // Make sure to destroy maps so that on the following ReinitForTesting() call
1606   // the maps don't attempt to destroy the backing.
1607   scannable_roots_.clear();
1608   scannable_roots_.~RootsMap();
1609   nonscannable_roots_.clear();
1610   nonscannable_roots_.~RootsMap();
1611   // Destroy write protector object, so that there is no double free on the next
1612   // call to ReinitForTesting();
1613   write_protector_.reset();
1614 }
1615 
ReinitForTesting(PCScan::InitConfig config)1616 void PCScanInternal::ReinitForTesting(PCScan::InitConfig config) {
1617   is_initialized_ = false;
1618   auto* new_this = new (this) PCScanInternal;
1619   new_this->Initialize(config);
1620 }
1621 
FinishScanForTesting()1622 void PCScanInternal::FinishScanForTesting() {
1623   auto current_task = CurrentPCScanTask();
1624   PA_CHECK(current_task.get());
1625   current_task->RunFromScanner();
1626 }
1627 
RegisterStatsReporter(partition_alloc::StatsReporter * reporter)1628 void PCScanInternal::RegisterStatsReporter(
1629     partition_alloc::StatsReporter* reporter) {
1630   PA_DCHECK(reporter);
1631   stats_reporter_ = reporter;
1632 }
1633 
GetReporter()1634 partition_alloc::StatsReporter& PCScanInternal::GetReporter() {
1635   PA_DCHECK(stats_reporter_);
1636   return *stats_reporter_;
1637 }
1638 
1639 }  // namespace partition_alloc::internal
1640