• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/allocator/partition_alloc_support.h"
6 
7 #include <array>
8 #include <cinttypes>
9 #include <cstdint>
10 #include <map>
11 #include <string>
12 
13 #include "base/allocator/partition_alloc_features.h"
14 #include "base/allocator/partition_allocator/allocation_guard.h"
15 #include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
16 #include "base/allocator/partition_allocator/memory_reclaimer.h"
17 #include "base/allocator/partition_allocator/page_allocator.h"
18 #include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
19 #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
20 #include "base/allocator/partition_allocator/partition_alloc_check.h"
21 #include "base/allocator/partition_allocator/partition_alloc_config.h"
22 #include "base/allocator/partition_allocator/partition_lock.h"
23 #include "base/allocator/partition_allocator/shim/allocator_shim.h"
24 #include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
25 #include "base/allocator/partition_allocator/thread_cache.h"
26 #include "base/check.h"
27 #include "base/debug/dump_without_crashing.h"
28 #include "base/debug/stack_trace.h"
29 #include "base/debug/task_trace.h"
30 #include "base/feature_list.h"
31 #include "base/functional/bind.h"
32 #include "base/functional/callback.h"
33 #include "base/immediate_crash.h"
34 #include "base/location.h"
35 #include "base/memory/raw_ptr_asan_service.h"
36 #include "base/metrics/histogram_functions.h"
37 #include "base/metrics/histogram_macros.h"
38 #include "base/no_destructor.h"
39 #include "base/strings/string_piece.h"
40 #include "base/strings/string_split.h"
41 #include "base/strings/stringprintf.h"
42 #include "base/system/sys_info.h"
43 #include "base/task/single_thread_task_runner.h"
44 #include "base/thread_annotations.h"
45 #include "base/threading/platform_thread.h"
46 #include "base/time/time.h"
47 #include "base/timer/timer.h"
48 #include "base/trace_event/base_tracing.h"
49 #include "build/build_config.h"
50 #include "third_party/abseil-cpp/absl/types/optional.h"
51 
52 #if BUILDFLAG(USE_STARSCAN)
53 #include "base/allocator/partition_allocator/starscan/pcscan.h"
54 #include "base/allocator/partition_allocator/starscan/pcscan_scheduling.h"
55 #include "base/allocator/partition_allocator/starscan/stack/stack.h"
56 #include "base/allocator/partition_allocator/starscan/stats_collector.h"
57 #include "base/allocator/partition_allocator/starscan/stats_reporter.h"
58 #include "base/memory/nonscannable_memory.h"
59 #endif  // BUILDFLAG(USE_STARSCAN)
60 
61 #if BUILDFLAG(IS_ANDROID)
62 #include "base/system/sys_info.h"
63 #endif
64 
65 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
66 #include "base/allocator/partition_allocator/memory_reclaimer.h"
67 #endif
68 
69 namespace base::allocator {
70 
71 namespace {
72 
73 // This is defined in content/public/common/content_switches.h, which is not
74 // accessible in ::base. They must be kept in sync.
75 namespace switches {
76 [[maybe_unused]] constexpr char kRendererProcess[] = "renderer";
77 constexpr char kZygoteProcess[] = "zygote";
78 #if BUILDFLAG(USE_STARSCAN)
79 constexpr char kGpuProcess[] = "gpu-process";
80 constexpr char kUtilityProcess[] = "utility";
81 #endif
82 }  // namespace switches
83 
84 #if BUILDFLAG(USE_STARSCAN)
85 
86 #if BUILDFLAG(ENABLE_BASE_TRACING)
ScannerIdToTracingString(partition_alloc::internal::StatsCollector::ScannerId id)87 constexpr const char* ScannerIdToTracingString(
88     partition_alloc::internal::StatsCollector::ScannerId id) {
89   switch (id) {
90     case partition_alloc::internal::StatsCollector::ScannerId::kClear:
91       return "PCScan.Scanner.Clear";
92     case partition_alloc::internal::StatsCollector::ScannerId::kScan:
93       return "PCScan.Scanner.Scan";
94     case partition_alloc::internal::StatsCollector::ScannerId::kSweep:
95       return "PCScan.Scanner.Sweep";
96     case partition_alloc::internal::StatsCollector::ScannerId::kOverall:
97       return "PCScan.Scanner";
98     case partition_alloc::internal::StatsCollector::ScannerId::kNumIds:
99       __builtin_unreachable();
100   }
101 }
102 
MutatorIdToTracingString(partition_alloc::internal::StatsCollector::MutatorId id)103 constexpr const char* MutatorIdToTracingString(
104     partition_alloc::internal::StatsCollector::MutatorId id) {
105   switch (id) {
106     case partition_alloc::internal::StatsCollector::MutatorId::kClear:
107       return "PCScan.Mutator.Clear";
108     case partition_alloc::internal::StatsCollector::MutatorId::kScanStack:
109       return "PCScan.Mutator.ScanStack";
110     case partition_alloc::internal::StatsCollector::MutatorId::kScan:
111       return "PCScan.Mutator.Scan";
112     case partition_alloc::internal::StatsCollector::MutatorId::kOverall:
113       return "PCScan.Mutator";
114     case partition_alloc::internal::StatsCollector::MutatorId::kNumIds:
115       __builtin_unreachable();
116   }
117 }
118 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
119 
120 // Inject TRACE_EVENT_BEGIN/END, TRACE_COUNTER1, and UmaHistogramTimes.
121 class StatsReporterImpl final : public partition_alloc::StatsReporter {
122  public:
ReportTraceEvent(partition_alloc::internal::StatsCollector::ScannerId id,partition_alloc::internal::base::PlatformThreadId tid,int64_t start_time_ticks_internal_value,int64_t end_time_ticks_internal_value)123   void ReportTraceEvent(
124       partition_alloc::internal::StatsCollector::ScannerId id,
125       [[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
126       int64_t start_time_ticks_internal_value,
127       int64_t end_time_ticks_internal_value) override {
128 #if BUILDFLAG(ENABLE_BASE_TRACING)
129     // TRACE_EVENT_* macros below drop most parameters when tracing is
130     // disabled at compile time.
131     const char* tracing_id = ScannerIdToTracingString(id);
132     const TimeTicks start_time =
133         TimeTicks::FromInternalValue(start_time_ticks_internal_value);
134     const TimeTicks end_time =
135         TimeTicks::FromInternalValue(end_time_ticks_internal_value);
136     TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
137                       perfetto::ThreadTrack::ForThread(tid), start_time);
138     TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
139                     end_time);
140 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
141   }
142 
ReportTraceEvent(partition_alloc::internal::StatsCollector::MutatorId id,partition_alloc::internal::base::PlatformThreadId tid,int64_t start_time_ticks_internal_value,int64_t end_time_ticks_internal_value)143   void ReportTraceEvent(
144       partition_alloc::internal::StatsCollector::MutatorId id,
145       [[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
146       int64_t start_time_ticks_internal_value,
147       int64_t end_time_ticks_internal_value) override {
148 #if BUILDFLAG(ENABLE_BASE_TRACING)
149     // TRACE_EVENT_* macros below drop most parameters when tracing is
150     // disabled at compile time.
151     const char* tracing_id = MutatorIdToTracingString(id);
152     const TimeTicks start_time =
153         TimeTicks::FromInternalValue(start_time_ticks_internal_value);
154     const TimeTicks end_time =
155         TimeTicks::FromInternalValue(end_time_ticks_internal_value);
156     TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
157                       perfetto::ThreadTrack::ForThread(tid), start_time);
158     TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
159                     end_time);
160 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
161   }
162 
ReportSurvivedQuarantineSize(size_t survived_size)163   void ReportSurvivedQuarantineSize(size_t survived_size) override {
164     TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantineSize",
165                    survived_size);
166   }
167 
ReportSurvivedQuarantinePercent(double survived_rate)168   void ReportSurvivedQuarantinePercent(double survived_rate) override {
169     // Multiply by 1000 since TRACE_COUNTER1 expects integer. In catapult,
170     // divide back.
171     // TODO(bikineev): Remove after switching to perfetto.
172     TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantinePercent",
173                    1000 * survived_rate);
174   }
175 
ReportStats(const char * stats_name,int64_t sample_in_usec)176   void ReportStats(const char* stats_name, int64_t sample_in_usec) override {
177     TimeDelta sample = Microseconds(sample_in_usec);
178     UmaHistogramTimes(stats_name, sample);
179   }
180 
181  private:
182   static constexpr char kTraceCategory[] = "partition_alloc";
183 };
184 
185 #endif  // BUILDFLAG(USE_STARSCAN)
186 
187 }  // namespace
188 
189 #if BUILDFLAG(USE_STARSCAN)
RegisterPCScanStatsReporter()190 void RegisterPCScanStatsReporter() {
191   static StatsReporterImpl s_reporter;
192   static bool registered = false;
193 
194   DCHECK(!registered);
195 
196   partition_alloc::internal::PCScan::RegisterStatsReporter(&s_reporter);
197   registered = true;
198 }
199 #endif  // BUILDFLAG(USE_STARSCAN)
200 
201 namespace {
202 
RunThreadCachePeriodicPurge()203 void RunThreadCachePeriodicPurge() {
204   // Micros, since periodic purge should typically take at most a few ms.
205   SCOPED_UMA_HISTOGRAM_TIMER_MICROS("Memory.PartitionAlloc.PeriodicPurge");
206   TRACE_EVENT0("memory", "PeriodicPurge");
207   auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
208   instance.RunPeriodicPurge();
209   TimeDelta delay =
210       Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
211   SingleThreadTaskRunner::GetCurrentDefault()->PostDelayedTask(
212       FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
213 }
214 
RunMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner)215 void RunMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner) {
216   TRACE_EVENT0("base", "partition_alloc::MemoryReclaimer::Reclaim()");
217   auto* instance = ::partition_alloc::MemoryReclaimer::Instance();
218 
219   {
220     // Micros, since memory reclaiming should typically take at most a few ms.
221     SCOPED_UMA_HISTOGRAM_TIMER_MICROS("Memory.PartitionAlloc.MemoryReclaim");
222     instance->ReclaimNormal();
223   }
224 
225   TimeDelta delay =
226       Microseconds(instance->GetRecommendedReclaimIntervalInMicroseconds());
227   task_runner->PostDelayedTask(
228       FROM_HERE, BindOnce(RunMemoryReclaimer, task_runner), delay);
229 }
230 
231 }  // namespace
232 
StartThreadCachePeriodicPurge()233 void StartThreadCachePeriodicPurge() {
234   auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
235   TimeDelta delay =
236       Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
237   SingleThreadTaskRunner::GetCurrentDefault()->PostDelayedTask(
238       FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
239 }
240 
StartMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner)241 void StartMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner) {
242   // Can be called several times.
243   static bool is_memory_reclaimer_running = false;
244   if (is_memory_reclaimer_running) {
245     return;
246   }
247   is_memory_reclaimer_running = true;
248 
249   // The caller of the API fully controls where running the reclaim.
250   // However there are a few reasons to recommend that the caller runs
251   // it on the main thread:
252   // - Most of PartitionAlloc's usage is on the main thread, hence PA's metadata
253   //   is more likely in cache when executing on the main thread.
254   // - Memory reclaim takes the partition lock for each partition. As a
255   //   consequence, while reclaim is running, the main thread is unlikely to be
256   //   able to make progress, as it would be waiting on the lock.
257   // - Finally, this runs in idle time only, so there should be no visible
258   //   impact.
259   //
260   // From local testing, time to reclaim is 100us-1ms, and reclaiming every few
261   // seconds is useful. Since this is meant to run during idle time only, it is
262   // a reasonable starting point balancing effectivenes vs cost. See
263   // crbug.com/942512 for details and experimental results.
264   auto* instance = ::partition_alloc::MemoryReclaimer::Instance();
265   TimeDelta delay =
266       Microseconds(instance->GetRecommendedReclaimIntervalInMicroseconds());
267   task_runner->PostDelayedTask(
268       FROM_HERE, BindOnce(RunMemoryReclaimer, task_runner), delay);
269 }
270 
ProposeSyntheticFinchTrials()271 std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
272   std::map<std::string, std::string> trials;
273 
274 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
275   // BackupRefPtr_Effective and PCScan_Effective record whether or not
276   // BackupRefPtr and/or PCScan are enabled. The experiments aren't independent,
277   // so having a synthetic Finch will help look only at cases where one isn't
278   // affected by the other.
279 
280   // Whether PartitionAllocBackupRefPtr is enabled (as determined by
281   // FeatureList::IsEnabled).
282   [[maybe_unused]] bool brp_finch_enabled = false;
283   // Whether PartitionAllocBackupRefPtr is set up for the default behavior. The
284   // default behavior is when either the Finch flag is disabled, or is enabled
285   // in brp-mode=disabled (these two options are equivalent).
286   [[maybe_unused]] bool brp_nondefault_behavior = false;
287   // Whether PartitionAllocBackupRefPtr is set up to enable BRP protection. It
288   // requires the Finch flag to be enabled and brp-mode!=disabled*. Some modes,
289   // e.g. disabled-but-3-way-split, do something (hence can't be considered the
290   // default behavior), but don't enable BRP protection.
291   [[maybe_unused]] bool brp_truly_enabled = false;
292 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
293   if (FeatureList::IsEnabled(features::kPartitionAllocBackupRefPtr)) {
294     brp_finch_enabled = true;
295   }
296   if (brp_finch_enabled && features::kBackupRefPtrModeParam.Get() !=
297                                features::BackupRefPtrMode::kDisabled) {
298     brp_nondefault_behavior = true;
299   }
300   if (brp_finch_enabled && features::kBackupRefPtrModeParam.Get() ==
301                                features::BackupRefPtrMode::kEnabled) {
302     brp_truly_enabled = true;
303   }
304 #endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
305   [[maybe_unused]] bool pcscan_enabled =
306 #if BUILDFLAG(USE_STARSCAN)
307       FeatureList::IsEnabled(features::kPartitionAllocPCScanBrowserOnly);
308 #else
309       false;
310 #endif
311 
312   std::string brp_group_name = "Unavailable";
313 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
314   if (pcscan_enabled) {
315     // If PCScan is enabled, just ignore the population.
316     brp_group_name = "Ignore_PCScanIsOn";
317   } else if (!brp_finch_enabled) {
318     // The control group is actually disguised as "enabled", but in fact it's
319     // disabled using a param. This is to differentiate the population that
320     // participates in the control group, from the population that isn't in any
321     // group.
322     brp_group_name = "Ignore_NoGroup";
323   } else {
324     switch (features::kBackupRefPtrModeParam.Get()) {
325       case features::BackupRefPtrMode::kDisabled:
326         brp_group_name = "Disabled";
327         break;
328       case features::BackupRefPtrMode::kEnabled:
329 #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
330         brp_group_name = "EnabledPrevSlot";
331 #else
332         brp_group_name = "EnabledBeforeAlloc";
333 #endif
334         break;
335       case features::BackupRefPtrMode::kEnabledWithoutZapping:
336 #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
337         brp_group_name = "EnabledPrevSlotWithoutZapping";
338 #else
339         brp_group_name = "EnabledBeforeAllocWithoutZapping";
340 #endif
341         break;
342       case features::BackupRefPtrMode::kEnabledWithMemoryReclaimer:
343 #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
344         brp_group_name = "EnabledPrevSlotWithMemoryReclaimer";
345 #else
346         brp_group_name = "EnabledBeforeAllocWithMemoryReclaimer";
347 #endif
348         break;
349       case features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
350         brp_group_name = "DisabledBut2WaySplit";
351         break;
352       case features::BackupRefPtrMode::
353           kDisabledButSplitPartitions2WayWithMemoryReclaimer:
354         brp_group_name = "DisabledBut2WaySplitWithMemoryReclaimer";
355         break;
356       case features::BackupRefPtrMode::kDisabledButSplitPartitions3Way:
357         brp_group_name = "DisabledBut3WaySplit";
358         break;
359       case features::BackupRefPtrMode::kDisabledButAddDummyRefCount:
360         brp_group_name = "DisabledButAddDummyRefCount";
361         break;
362     }
363 
364     if (features::kBackupRefPtrModeParam.Get() !=
365         features::BackupRefPtrMode::kDisabled) {
366       std::string process_selector;
367       switch (features::kBackupRefPtrEnabledProcessesParam.Get()) {
368         case features::BackupRefPtrEnabledProcesses::kBrowserOnly:
369           process_selector = "BrowserOnly";
370           break;
371         case features::BackupRefPtrEnabledProcesses::kBrowserAndRenderer:
372           process_selector = "BrowserAndRenderer";
373           break;
374         case features::BackupRefPtrEnabledProcesses::kNonRenderer:
375           process_selector = "NonRenderer";
376           break;
377         case features::BackupRefPtrEnabledProcesses::kAllProcesses:
378           process_selector = "AllProcesses";
379           break;
380       }
381 
382       brp_group_name += ("_" + process_selector);
383     }
384   }
385 #endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
386   trials.emplace("BackupRefPtr_Effective", brp_group_name);
387 
388   // On 32-bit architectures, PCScan is not supported and permanently disabled.
389   // Don't lump it into "Disabled", so that belonging to "Enabled"/"Disabled" is
390   // fully controlled by Finch and thus have identical population sizes.
391   std::string pcscan_group_name = "Unavailable";
392   std::string pcscan_group_name_fallback = "Unavailable";
393 #if BUILDFLAG(USE_STARSCAN)
394   if (brp_truly_enabled) {
395     // If BRP protection is enabled, just ignore the population. Check
396     // brp_truly_enabled, not brp_finch_enabled, because there are certain modes
397     // where BRP protection is actually disabled.
398     pcscan_group_name = "Ignore_BRPIsOn";
399   } else {
400     pcscan_group_name = (pcscan_enabled ? "Enabled" : "Disabled");
401   }
402   // In case we are incorrect that PCScan is independent of partition-split
403   // modes, create a fallback trial that only takes into account the BRP Finch
404   // settings that preserve the default behavior.
405   if (brp_nondefault_behavior) {
406     pcscan_group_name_fallback = "Ignore_BRPIsOn";
407   } else {
408     pcscan_group_name_fallback = (pcscan_enabled ? "Enabled" : "Disabled");
409   }
410 #endif  // BUILDFLAG(USE_STARSCAN)
411   trials.emplace("PCScan_Effective", pcscan_group_name);
412   trials.emplace("PCScan_Effective_Fallback", pcscan_group_name_fallback);
413 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
414 
415 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
416   trials.emplace("DanglingPointerDetector", "Enabled");
417 #else
418   trials.emplace("DanglingPointerDetector", "Disabled");
419 #endif
420 
421   return trials;
422 }
423 
424 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
425 
426 namespace {
427 
428 internal::PartitionLock g_stack_trace_buffer_lock;
429 
430 struct DanglingPointerFreeInfo {
431   debug::StackTrace stack_trace;
432   debug::TaskTrace task_trace;
433   uintptr_t id = 0;
434 };
435 using DanglingRawPtrBuffer =
436     std::array<absl::optional<DanglingPointerFreeInfo>, 32>;
437 DanglingRawPtrBuffer g_stack_trace_buffer GUARDED_BY(g_stack_trace_buffer_lock);
438 
DanglingRawPtrDetected(uintptr_t id)439 void DanglingRawPtrDetected(uintptr_t id) {
440   // This is called from inside the allocator. No allocation is allowed.
441 
442   internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
443 
444 #if DCHECK_IS_ON()
445   for (absl::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
446     PA_DCHECK(!entry || entry->id != id);
447   }
448 #endif  // DCHECK_IS_ON()
449 
450   for (absl::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
451     if (!entry) {
452       entry = {debug::StackTrace(), debug::TaskTrace(), id};
453       return;
454     }
455   }
456 
457   // The StackTrace hasn't been recorded, because the buffer isn't large
458   // enough.
459 }
460 
461 // From the traces recorded in |DanglingRawPtrDetected|, extract the one
462 // whose id match |id|. Return nullopt if not found.
TakeDanglingPointerFreeInfo(uintptr_t id)463 absl::optional<DanglingPointerFreeInfo> TakeDanglingPointerFreeInfo(
464     uintptr_t id) {
465   internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
466   for (absl::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
467     if (entry && entry->id == id) {
468       absl::optional<DanglingPointerFreeInfo> result(entry);
469       entry = absl::nullopt;
470       return result;
471     }
472   }
473   return absl::nullopt;
474 }
475 
476 // Extract from the StackTrace output, the signature of the pertinent caller.
477 // This function is meant to be used only by Chromium developers, to list what
478 // are all the dangling raw_ptr occurrences in a table.
ExtractDanglingPtrSignature(std::string stacktrace)479 std::string ExtractDanglingPtrSignature(std::string stacktrace) {
480   std::vector<StringPiece> lines = SplitStringPiece(
481       stacktrace, "\r\n", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
482 
483   // We are looking for the callers of the function releasing the raw_ptr and
484   // freeing memory:
485   const StringPiece callees[] = {
486       // Common signatures
487       "internal::PartitionFree",
488       "base::(anonymous namespace)::FreeFn",
489 
490       // Linux signatures
491       "internal::RawPtrBackupRefImpl<>::ReleaseInternal()",
492       "base::RefCountedThreadSafe<>::Release()",
493 
494       // Windows signatures
495       "internal::RawPtrBackupRefImpl<0>::ReleaseInternal",
496       "_free_base",
497       // Windows stack traces are prefixed with "Backtrace:"
498       "Backtrace:",
499 
500       // Mac signatures
501       "internal::RawPtrBackupRefImpl<false>::ReleaseInternal",
502 
503       // Task traces are prefixed with "Task trace:" in
504       // |TaskTrace::OutputToStream|
505       "Task trace:",
506   };
507   size_t caller_index = 0;
508   for (size_t i = 0; i < lines.size(); ++i) {
509     for (const auto& callee : callees) {
510       if (lines[i].find(callee) != StringPiece::npos) {
511         caller_index = i + 1;
512       }
513     }
514   }
515   if (caller_index >= lines.size()) {
516     return "no_callee_match";
517   }
518   StringPiece caller = lines[caller_index];
519 
520   if (caller.empty()) {
521     return "invalid_format";
522   }
523 
524   // On Posix platforms |callers| follows the following format:
525   //
526   // #<index> <address> <symbol>
527   //
528   // See https://crsrc.org/c/base/debug/stack_trace_posix.cc
529   if (caller[0] == '#') {
530     const size_t address_start = caller.find(' ');
531     const size_t function_start = caller.find(' ', address_start + 1);
532 
533     if (address_start == caller.npos || function_start == caller.npos) {
534       return "invalid_format";
535     }
536 
537     return std::string(caller.substr(function_start + 1));
538   }
539 
540   // On Windows platforms |callers| follows the following format:
541   //
542   // \t<symbol> [0x<address>]+<displacement>(<filename>:<line>)
543   //
544   // See https://crsrc.org/c/base/debug/stack_trace_win.cc
545   if (caller[0] == '\t') {
546     const size_t symbol_start = 1;
547     const size_t symbol_end = caller.find(' ');
548     if (symbol_end == caller.npos) {
549       return "invalid_format";
550     }
551     return std::string(caller.substr(symbol_start, symbol_end - symbol_start));
552   }
553 
554   // On Mac platforms |callers| follows the following format:
555   //
556   // <index> <library> 0x<address> <symbol> + <line>
557   //
558   // See https://crsrc.org/c/base/debug/stack_trace_posix.cc
559   if (caller[0] >= '0' && caller[0] <= '9') {
560     const size_t address_start = caller.find("0x");
561     const size_t symbol_start = caller.find(' ', address_start + 1) + 1;
562     const size_t symbol_end = caller.find(' ', symbol_start);
563     if (symbol_start == caller.npos || symbol_end == caller.npos) {
564       return "invalid_format";
565     }
566     return std::string(caller.substr(symbol_start, symbol_end - symbol_start));
567   }
568 
569   return "invalid_format";
570 }
571 
ExtractDanglingPtrSignature(debug::TaskTrace task_trace)572 std::string ExtractDanglingPtrSignature(debug::TaskTrace task_trace) {
573   if (task_trace.empty()) {
574     return "No active task";
575   }
576   return ExtractDanglingPtrSignature(task_trace.ToString());
577 }
578 
ExtractDanglingPtrSignature(absl::optional<DanglingPointerFreeInfo> free_info,debug::StackTrace release_stack_trace,debug::TaskTrace release_task_trace)579 std::string ExtractDanglingPtrSignature(
580     absl::optional<DanglingPointerFreeInfo> free_info,
581     debug::StackTrace release_stack_trace,
582     debug::TaskTrace release_task_trace) {
583   if (free_info) {
584     return StringPrintf(
585         "[DanglingSignature]\t%s\t%s\t%s\t%s",
586         ExtractDanglingPtrSignature(free_info->stack_trace.ToString()).c_str(),
587         ExtractDanglingPtrSignature(free_info->task_trace).c_str(),
588         ExtractDanglingPtrSignature(release_stack_trace.ToString()).c_str(),
589         ExtractDanglingPtrSignature(release_task_trace).c_str());
590   }
591   return StringPrintf(
592       "[DanglingSignature]\t%s\t%s\t%s\t%s", "missing", "missing",
593       ExtractDanglingPtrSignature(release_stack_trace.ToString()).c_str(),
594       ExtractDanglingPtrSignature(release_task_trace).c_str());
595 }
596 
597 template <features::DanglingPtrMode dangling_pointer_mode,
598           features::DanglingPtrType dangling_pointer_type>
DanglingRawPtrReleased(uintptr_t id)599 void DanglingRawPtrReleased(uintptr_t id) {
600   // This is called from raw_ptr<>'s release operation. Making allocations is
601   // allowed. In particular, symbolizing and printing the StackTraces may
602   // allocate memory.
603   debug::StackTrace stack_trace_release;
604   debug::TaskTrace task_trace_release;
605   absl::optional<DanglingPointerFreeInfo> free_info =
606       TakeDanglingPointerFreeInfo(id);
607 
608   if constexpr (dangling_pointer_type ==
609                 features::DanglingPtrType::kCrossTask) {
610     if (!free_info) {
611       return;
612     }
613     if (task_trace_release.ToString() == free_info->task_trace.ToString()) {
614       return;
615     }
616   }
617 
618   std::string dangling_signature = ExtractDanglingPtrSignature(
619       free_info, stack_trace_release, task_trace_release);
620   static const char dangling_ptr_footer[] =
621       "\n"
622       "\n"
623       "Please check for more information on:\n"
624       "https://chromium.googlesource.com/chromium/src/+/main/docs/"
625       "dangling_ptr_guide.md\n"
626       "\n"
627       "Googlers: Please give us your feedback about the dangling pointer\n"
628       "          detector at:\n"
629       "          http://go/dangling-ptr-cq-survey\n";
630   if (free_info) {
631     LOG(ERROR) << "Detected dangling raw_ptr with id="
632                << StringPrintf("0x%016" PRIxPTR, id) << ":\n"
633                << dangling_signature << "\n\n"
634                << "The memory was freed at:\n"
635                << free_info->stack_trace << "\n"
636                << free_info->task_trace << "\n"
637                << "The dangling raw_ptr was released at:\n"
638                << stack_trace_release << "\n"
639                << task_trace_release << dangling_ptr_footer;
640   } else {
641     LOG(ERROR) << "Detected dangling raw_ptr with id="
642                << StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
643                << dangling_signature << "\n\n"
644                << "It was not recorded where the memory was freed.\n\n"
645                << "The dangling raw_ptr was released at:\n"
646                << stack_trace_release << "\n"
647                << task_trace_release << dangling_ptr_footer;
648   }
649 
650   if constexpr (dangling_pointer_mode == features::DanglingPtrMode::kCrash) {
651     ImmediateCrash();
652   }
653 }
654 
ClearDanglingRawPtrBuffer()655 void ClearDanglingRawPtrBuffer() {
656   internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
657   g_stack_trace_buffer = DanglingRawPtrBuffer();
658 }
659 
660 }  // namespace
661 
InstallDanglingRawPtrChecks()662 void InstallDanglingRawPtrChecks() {
663   // Clearing storage is useful for running multiple unit tests without
664   // restarting the test executable.
665   ClearDanglingRawPtrBuffer();
666 
667   if (!FeatureList::IsEnabled(features::kPartitionAllocDanglingPtr)) {
668     partition_alloc::SetDanglingRawPtrDetectedFn([](uintptr_t) {});
669     partition_alloc::SetDanglingRawPtrReleasedFn([](uintptr_t) {});
670     return;
671   }
672 
673   partition_alloc::SetDanglingRawPtrDetectedFn(&DanglingRawPtrDetected);
674   switch (features::kDanglingPtrModeParam.Get()) {
675     case features::DanglingPtrMode::kCrash:
676       switch (features::kDanglingPtrTypeParam.Get()) {
677         case features::DanglingPtrType::kAll:
678           partition_alloc::SetDanglingRawPtrReleasedFn(
679               &DanglingRawPtrReleased<features::DanglingPtrMode::kCrash,
680                                       features::DanglingPtrType::kAll>);
681           break;
682         case features::DanglingPtrType::kCrossTask:
683           partition_alloc::SetDanglingRawPtrReleasedFn(
684               &DanglingRawPtrReleased<features::DanglingPtrMode::kCrash,
685                                       features::DanglingPtrType::kCrossTask>);
686           break;
687       }
688       break;
689     case features::DanglingPtrMode::kLogOnly:
690       switch (features::kDanglingPtrTypeParam.Get()) {
691         case features::DanglingPtrType::kAll:
692           partition_alloc::SetDanglingRawPtrReleasedFn(
693               &DanglingRawPtrReleased<features::DanglingPtrMode::kLogOnly,
694                                       features::DanglingPtrType::kAll>);
695           break;
696         case features::DanglingPtrType::kCrossTask:
697           partition_alloc::SetDanglingRawPtrReleasedFn(
698               &DanglingRawPtrReleased<features::DanglingPtrMode::kLogOnly,
699                                       features::DanglingPtrType::kCrossTask>);
700           break;
701       }
702       break;
703   }
704 }
705 
706 // TODO(arthursonzogni): There might exist long lived dangling raw_ptr. If there
707 // is a dangling pointer, we should crash at some point. Consider providing an
708 // API to periodically check the buffer.
709 
710 #else   // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
InstallDanglingRawPtrChecks()711 void InstallDanglingRawPtrChecks() {}
712 #endif  // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
713 
UnretainedDanglingRawPtrDetectedDumpWithoutCrashing(uintptr_t id)714 void UnretainedDanglingRawPtrDetectedDumpWithoutCrashing(uintptr_t id) {
715   PA_NO_CODE_FOLDING();
716   debug::DumpWithoutCrashing();
717 }
718 
UnretainedDanglingRawPtrDetectedCrash(uintptr_t id)719 void UnretainedDanglingRawPtrDetectedCrash(uintptr_t id) {
720   debug::TaskTrace task_trace;
721   debug::StackTrace stack_trace;
722   LOG(ERROR) << "Detected dangling raw_ptr in unretained with id="
723              << StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
724              << task_trace << stack_trace;
725   ImmediateCrash();
726 }
727 
InstallUnretainedDanglingRawPtrChecks()728 void InstallUnretainedDanglingRawPtrChecks() {
729   if (!FeatureList::IsEnabled(features::kPartitionAllocUnretainedDanglingPtr)) {
730     partition_alloc::SetUnretainedDanglingRawPtrDetectedFn([](uintptr_t) {});
731     partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(/*enabled=*/false);
732     return;
733   }
734 
735   partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(/*enabled=*/true);
736   switch (features::kUnretainedDanglingPtrModeParam.Get()) {
737     case features::UnretainedDanglingPtrMode::kCrash:
738       partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
739           &UnretainedDanglingRawPtrDetectedCrash);
740       break;
741 
742     case features::UnretainedDanglingPtrMode::kDumpWithoutCrashing:
743       partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
744           &UnretainedDanglingRawPtrDetectedDumpWithoutCrashing);
745       break;
746   }
747 }
748 
749 namespace {
750 
751 #if BUILDFLAG(USE_STARSCAN)
SetProcessNameForPCScan(const std::string & process_type)752 void SetProcessNameForPCScan(const std::string& process_type) {
753   const char* name = [&process_type] {
754     if (process_type.empty()) {
755       // Empty means browser process.
756       return "Browser";
757     }
758     if (process_type == switches::kRendererProcess) {
759       return "Renderer";
760     }
761     if (process_type == switches::kGpuProcess) {
762       return "Gpu";
763     }
764     if (process_type == switches::kUtilityProcess) {
765       return "Utility";
766     }
767     return static_cast<const char*>(nullptr);
768   }();
769 
770   if (name) {
771     partition_alloc::internal::PCScan::SetProcessName(name);
772   }
773 }
774 
EnablePCScanForMallocPartitionsIfNeeded()775 bool EnablePCScanForMallocPartitionsIfNeeded() {
776 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
777   using Config = partition_alloc::internal::PCScan::InitConfig;
778   DCHECK(base::FeatureList::GetInstance());
779   if (base::FeatureList::IsEnabled(base::features::kPartitionAllocPCScan)) {
780     allocator_shim::EnablePCScan({Config::WantedWriteProtectionMode::kEnabled,
781                                   Config::SafepointMode::kEnabled});
782     base::allocator::RegisterPCScanStatsReporter();
783     return true;
784   }
785 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
786   return false;
787 }
788 
EnablePCScanForMallocPartitionsInBrowserProcessIfNeeded()789 bool EnablePCScanForMallocPartitionsInBrowserProcessIfNeeded() {
790 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
791   using Config = partition_alloc::internal::PCScan::InitConfig;
792   DCHECK(base::FeatureList::GetInstance());
793   if (base::FeatureList::IsEnabled(
794           base::features::kPartitionAllocPCScanBrowserOnly)) {
795     const Config::WantedWriteProtectionMode wp_mode =
796         base::FeatureList::IsEnabled(base::features::kPartitionAllocDCScan)
797             ? Config::WantedWriteProtectionMode::kEnabled
798             : Config::WantedWriteProtectionMode::kDisabled;
799 #if !PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
800     CHECK_EQ(Config::WantedWriteProtectionMode::kDisabled, wp_mode)
801         << "DCScan is currently only supported on Linux based systems";
802 #endif
803     allocator_shim::EnablePCScan({wp_mode, Config::SafepointMode::kEnabled});
804     base::allocator::RegisterPCScanStatsReporter();
805     return true;
806   }
807 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
808   return false;
809 }
810 
EnablePCScanForMallocPartitionsInRendererProcessIfNeeded()811 bool EnablePCScanForMallocPartitionsInRendererProcessIfNeeded() {
812 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
813   using Config = partition_alloc::internal::PCScan::InitConfig;
814   DCHECK(base::FeatureList::GetInstance());
815   if (base::FeatureList::IsEnabled(
816           base::features::kPartitionAllocPCScanRendererOnly)) {
817     const Config::WantedWriteProtectionMode wp_mode =
818         base::FeatureList::IsEnabled(base::features::kPartitionAllocDCScan)
819             ? Config::WantedWriteProtectionMode::kEnabled
820             : Config::WantedWriteProtectionMode::kDisabled;
821 #if !PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
822     CHECK_EQ(Config::WantedWriteProtectionMode::kDisabled, wp_mode)
823         << "DCScan is currently only supported on Linux based systems";
824 #endif
825     allocator_shim::EnablePCScan({wp_mode, Config::SafepointMode::kDisabled});
826     base::allocator::RegisterPCScanStatsReporter();
827     return true;
828   }
829 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
830   return false;
831 }
832 #endif  // BUILDFLAG(USE_STARSCAN)
833 
834 }  // namespace
835 
ReconfigurePartitionForKnownProcess(const std::string & process_type)836 void ReconfigurePartitionForKnownProcess(const std::string& process_type) {
837   DCHECK_NE(process_type, switches::kZygoteProcess);
838   // TODO(keishi): Move the code to enable BRP back here after Finch
839   // experiments.
840 }
841 
Get()842 PartitionAllocSupport* PartitionAllocSupport::Get() {
843   static auto* singleton = new PartitionAllocSupport();
844   return singleton;
845 }
846 
847 PartitionAllocSupport::PartitionAllocSupport() = default;
848 
ReconfigureForTests()849 void PartitionAllocSupport::ReconfigureForTests() {
850   ReconfigureEarlyish("");
851   base::AutoLock scoped_lock(lock_);
852   called_for_tests_ = true;
853 }
854 
855 // static
856 PartitionAllocSupport::BrpConfiguration
GetBrpConfiguration(const std::string & process_type)857 PartitionAllocSupport::GetBrpConfiguration(const std::string& process_type) {
858   // TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
859   CHECK(base::FeatureList::GetInstance());
860 
861   bool enable_brp = false;
862   bool enable_brp_zapping = false;
863   bool split_main_partition = false;
864   bool use_dedicated_aligned_partition = false;
865   bool add_dummy_ref_count = false;
866   bool process_affected_by_brp_flag = false;
867   bool enable_memory_reclaimer = false;
868 
869 #if (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&  \
870      BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) || \
871     BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
872   if (base::FeatureList::IsEnabled(
873           base::features::kPartitionAllocBackupRefPtr)) {
874     // No specified process type means this is the Browser process.
875     switch (base::features::kBackupRefPtrEnabledProcessesParam.Get()) {
876       case base::features::BackupRefPtrEnabledProcesses::kBrowserOnly:
877         process_affected_by_brp_flag = process_type.empty();
878         break;
879       case base::features::BackupRefPtrEnabledProcesses::kBrowserAndRenderer:
880         process_affected_by_brp_flag =
881             process_type.empty() ||
882             (process_type == switches::kRendererProcess);
883         break;
884       case base::features::BackupRefPtrEnabledProcesses::kNonRenderer:
885         process_affected_by_brp_flag =
886             (process_type != switches::kRendererProcess);
887         break;
888       case base::features::BackupRefPtrEnabledProcesses::kAllProcesses:
889         process_affected_by_brp_flag = true;
890         break;
891     }
892   }
893 #endif  // (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
894         // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) ||
895         // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
896 
897 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
898     BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
899   if (process_affected_by_brp_flag) {
900     switch (base::features::kBackupRefPtrModeParam.Get()) {
901       case base::features::BackupRefPtrMode::kDisabled:
902         // Do nothing. Equivalent to !IsEnabled(kPartitionAllocBackupRefPtr).
903         break;
904 
905       case base::features::BackupRefPtrMode::kEnabledWithMemoryReclaimer:
906         enable_memory_reclaimer = true;
907         ABSL_FALLTHROUGH_INTENDED;
908       case base::features::BackupRefPtrMode::kEnabled:
909         enable_brp_zapping = true;
910         ABSL_FALLTHROUGH_INTENDED;
911       case base::features::BackupRefPtrMode::kEnabledWithoutZapping:
912         enable_brp = true;
913         split_main_partition = true;
914 #if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
915         // AlignedAlloc relies on natural alignment offered by the allocator
916         // (see the comment inside PartitionRoot::AlignedAllocFlags). Any extras
917         // in front of the allocation will mess up that alignment. Such extras
918         // are used when BackupRefPtr is on, in which case, we need a separate
919         // partition, dedicated to handle only aligned allocations, where those
920         // extras are disabled. However, if the "previous slot" variant is used,
921         // no dedicated partition is needed, as the extras won't interfere with
922         // the alignment requirements.
923         use_dedicated_aligned_partition = true;
924 #endif
925         break;
926 
927       case base::features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
928         split_main_partition = true;
929         break;
930 
931       case base::features::BackupRefPtrMode::
932           kDisabledButSplitPartitions2WayWithMemoryReclaimer:
933         split_main_partition = true;
934         enable_memory_reclaimer = true;
935         break;
936 
937       case base::features::BackupRefPtrMode::kDisabledButSplitPartitions3Way:
938         split_main_partition = true;
939         use_dedicated_aligned_partition = true;
940         break;
941 
942       case base::features::BackupRefPtrMode::kDisabledButAddDummyRefCount:
943         split_main_partition = true;
944         add_dummy_ref_count = true;
945 #if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
946         use_dedicated_aligned_partition = true;
947 #endif
948         break;
949     }
950   }
951 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
952         // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
953 
954   return {enable_brp,
955           enable_brp_zapping,
956           enable_memory_reclaimer,
957           split_main_partition,
958           use_dedicated_aligned_partition,
959           add_dummy_ref_count,
960           process_affected_by_brp_flag};
961 }
962 
ReconfigureEarlyish(const std::string & process_type)963 void PartitionAllocSupport::ReconfigureEarlyish(
964     const std::string& process_type) {
965   {
966     base::AutoLock scoped_lock(lock_);
967 
968     // In tests, ReconfigureEarlyish() is called by ReconfigureForTest(), which
969     // is earlier than ContentMain().
970     if (called_for_tests_) {
971       DCHECK(called_earlyish_);
972       return;
973     }
974 
975     // TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
976     CHECK(!called_earlyish_)
977         << "ReconfigureEarlyish was already called for process '"
978         << established_process_type_ << "'; current process: '" << process_type
979         << "'";
980 
981     called_earlyish_ = true;
982     established_process_type_ = process_type;
983   }
984 
985   if (process_type != switches::kZygoteProcess) {
986     ReconfigurePartitionForKnownProcess(process_type);
987   }
988 
989   // These initializations are only relevant for PartitionAlloc-Everywhere
990   // builds.
991 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
992   allocator_shim::EnablePartitionAllocMemoryReclaimer();
993 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
994 }
995 
ReconfigureAfterZygoteFork(const std::string & process_type)996 void PartitionAllocSupport::ReconfigureAfterZygoteFork(
997     const std::string& process_type) {
998   {
999     base::AutoLock scoped_lock(lock_);
1000     // TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
1001     CHECK(!called_after_zygote_fork_)
1002         << "ReconfigureAfterZygoteFork was already called for process '"
1003         << established_process_type_ << "'; current process: '" << process_type
1004         << "'";
1005     DCHECK(called_earlyish_)
1006         << "Attempt to call ReconfigureAfterZygoteFork without calling "
1007            "ReconfigureEarlyish; current process: '"
1008         << process_type << "'";
1009     DCHECK_EQ(established_process_type_, switches::kZygoteProcess)
1010         << "Attempt to call ReconfigureAfterZygoteFork while "
1011            "ReconfigureEarlyish was called on non-zygote process '"
1012         << established_process_type_ << "'; current process: '" << process_type
1013         << "'";
1014 
1015     called_after_zygote_fork_ = true;
1016     established_process_type_ = process_type;
1017   }
1018 
1019   if (process_type != switches::kZygoteProcess) {
1020     ReconfigurePartitionForKnownProcess(process_type);
1021   }
1022 }
1023 
ReconfigureAfterFeatureListInit(const std::string & process_type,bool configure_dangling_pointer_detector)1024 void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
1025     const std::string& process_type,
1026     bool configure_dangling_pointer_detector) {
1027   if (configure_dangling_pointer_detector) {
1028     base::allocator::InstallDanglingRawPtrChecks();
1029   }
1030   base::allocator::InstallUnretainedDanglingRawPtrChecks();
1031   {
1032     base::AutoLock scoped_lock(lock_);
1033     // Avoid initializing more than once.
1034     // TODO(bartekn): See if can be converted to (D)CHECK.
1035     if (called_after_feature_list_init_) {
1036       DCHECK_EQ(established_process_type_, process_type)
1037           << "ReconfigureAfterFeatureListInit was already called for process '"
1038           << established_process_type_ << "'; current process: '"
1039           << process_type << "'";
1040       return;
1041     }
1042     DCHECK(called_earlyish_)
1043         << "Attempt to call ReconfigureAfterFeatureListInit without calling "
1044            "ReconfigureEarlyish; current process: '"
1045         << process_type << "'";
1046     DCHECK_NE(established_process_type_, switches::kZygoteProcess)
1047         << "Attempt to call ReconfigureAfterFeatureListInit without calling "
1048            "ReconfigureAfterZygoteFork; current process: '"
1049         << process_type << "'";
1050     DCHECK_EQ(established_process_type_, process_type)
1051         << "ReconfigureAfterFeatureListInit wasn't called for an already "
1052            "established process '"
1053         << established_process_type_ << "'; current process: '" << process_type
1054         << "'";
1055 
1056     called_after_feature_list_init_ = true;
1057   }
1058 
1059   DCHECK_NE(process_type, switches::kZygoteProcess);
1060   [[maybe_unused]] BrpConfiguration brp_config =
1061       GetBrpConfiguration(process_type);
1062 
1063 #if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
1064   if (brp_config.process_affected_by_brp_flag) {
1065     base::RawPtrAsanService::GetInstance().Configure(
1066         base::EnableDereferenceCheck(
1067             base::features::kBackupRefPtrAsanEnableDereferenceCheckParam.Get()),
1068         base::EnableExtractionCheck(
1069             base::features::kBackupRefPtrAsanEnableExtractionCheckParam.Get()),
1070         base::EnableInstantiationCheck(
1071             base::features::kBackupRefPtrAsanEnableInstantiationCheckParam
1072                 .Get()));
1073   } else {
1074     base::RawPtrAsanService::GetInstance().Configure(
1075         base::EnableDereferenceCheck(false), base::EnableExtractionCheck(false),
1076         base::EnableInstantiationCheck(false));
1077   }
1078 #endif  // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
1079 
1080 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1081   // No specified type means we are in the browser.
1082   auto bucket_distribution =
1083       process_type == ""
1084           ? base::features::kPartitionAllocAlternateBucketDistributionParam
1085                 .Get()
1086           : base::features::AlternateBucketDistributionMode::kDefault;
1087 
1088   allocator_shim::ConfigurePartitions(
1089       allocator_shim::EnableBrp(brp_config.enable_brp),
1090       allocator_shim::EnableBrpZapping(brp_config.enable_brp_zapping),
1091       allocator_shim::EnableBrpPartitionMemoryReclaimer(
1092           brp_config.enable_brp_partition_memory_reclaimer),
1093       allocator_shim::SplitMainPartition(brp_config.split_main_partition),
1094       allocator_shim::UseDedicatedAlignedPartition(
1095           brp_config.use_dedicated_aligned_partition),
1096       allocator_shim::AddDummyRefCount(brp_config.add_dummy_ref_count),
1097       allocator_shim::AlternateBucketDistribution(bucket_distribution));
1098 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1099 
1100   // If BRP is not enabled, check if any of PCScan flags is enabled.
1101   [[maybe_unused]] bool scan_enabled = false;
1102 #if BUILDFLAG(USE_STARSCAN)
1103   if (!brp_config.enable_brp) {
1104     scan_enabled = EnablePCScanForMallocPartitionsIfNeeded();
1105     // No specified process type means this is the Browser process.
1106     if (process_type.empty()) {
1107       scan_enabled = scan_enabled ||
1108                      EnablePCScanForMallocPartitionsInBrowserProcessIfNeeded();
1109     }
1110     if (process_type == switches::kRendererProcess) {
1111       scan_enabled = scan_enabled ||
1112                      EnablePCScanForMallocPartitionsInRendererProcessIfNeeded();
1113     }
1114     if (scan_enabled) {
1115       if (base::FeatureList::IsEnabled(
1116               base::features::kPartitionAllocPCScanStackScanning)) {
1117 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1118         partition_alloc::internal::PCScan::EnableStackScanning();
1119         // Notify PCScan about the main thread.
1120         partition_alloc::internal::PCScan::NotifyThreadCreated(
1121             partition_alloc::internal::GetStackTop());
1122 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1123       }
1124       if (base::FeatureList::IsEnabled(
1125               base::features::kPartitionAllocPCScanImmediateFreeing)) {
1126         partition_alloc::internal::PCScan::EnableImmediateFreeing();
1127       }
1128       if (base::FeatureList::IsEnabled(
1129               base::features::kPartitionAllocPCScanEagerClearing)) {
1130         partition_alloc::internal::PCScan::SetClearType(
1131             partition_alloc::internal::PCScan::ClearType::kEager);
1132       }
1133       SetProcessNameForPCScan(process_type);
1134     }
1135   }
1136 #endif  // BUILDFLAG(USE_STARSCAN)
1137 
1138 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1139 #if BUILDFLAG(USE_STARSCAN)
1140   // Non-quarantinable partition is dealing with hot V8's zone allocations.
1141   // In case PCScan is enabled in Renderer, enable thread cache on this
1142   // partition. At the same time, thread cache on the main(malloc) partition
1143   // must be disabled, because only one partition can have it on.
1144   if (scan_enabled && process_type == switches::kRendererProcess) {
1145     base::internal::NonQuarantinableAllocator::Instance()
1146         .root()
1147         ->EnableThreadCacheIfSupported();
1148   } else
1149 #endif  // BUILDFLAG(USE_STARSCAN)
1150   {
1151     allocator_shim::internal::PartitionAllocMalloc::Allocator()
1152         ->EnableThreadCacheIfSupported();
1153   }
1154 
1155   if (base::FeatureList::IsEnabled(
1156           base::features::kPartitionAllocLargeEmptySlotSpanRing)) {
1157     allocator_shim::internal::PartitionAllocMalloc::Allocator()
1158         ->EnableLargeEmptySlotSpanRing();
1159     allocator_shim::internal::PartitionAllocMalloc::AlignedAllocator()
1160         ->EnableLargeEmptySlotSpanRing();
1161   }
1162 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1163 
1164 #if BUILDFLAG(IS_WIN)
1165   // Browser process only, since this is the one we want to prevent from
1166   // crashing the most (as it takes down all the tabs).
1167   if (base::FeatureList::IsEnabled(
1168           base::features::kPageAllocatorRetryOnCommitFailure) &&
1169       process_type.empty()) {
1170     partition_alloc::SetRetryOnCommitFailure(true);
1171   }
1172 #endif
1173 }
1174 
ReconfigureAfterTaskRunnerInit(const std::string & process_type)1175 void PartitionAllocSupport::ReconfigureAfterTaskRunnerInit(
1176     const std::string& process_type) {
1177   {
1178     base::AutoLock scoped_lock(lock_);
1179 
1180     // Init only once.
1181     if (called_after_thread_pool_init_) {
1182       return;
1183     }
1184 
1185     DCHECK_EQ(established_process_type_, process_type);
1186     // Enforce ordering.
1187     DCHECK(called_earlyish_);
1188     DCHECK(called_after_feature_list_init_);
1189 
1190     called_after_thread_pool_init_ = true;
1191   }
1192 
1193 #if PA_CONFIG(THREAD_CACHE_SUPPORTED) && \
1194     BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1195   // This should be called in specific processes, as the main thread is
1196   // initialized later.
1197   DCHECK(process_type != switches::kZygoteProcess);
1198 
1199   base::allocator::StartThreadCachePeriodicPurge();
1200 
1201 #if BUILDFLAG(IS_ANDROID)
1202   // Lower thread cache limits to avoid stranding too much memory in the caches.
1203   if (base::SysInfo::IsLowEndDeviceOrPartialLowEndModeEnabled()) {
1204     ::partition_alloc::ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
1205         ::partition_alloc::ThreadCache::kDefaultMultiplier / 2.);
1206   }
1207 #endif  // BUILDFLAG(IS_ANDROID)
1208 
1209   // Renderer processes are more performance-sensitive, increase thread cache
1210   // limits.
1211   if (process_type == switches::kRendererProcess &&
1212       base::FeatureList::IsEnabled(
1213           base::features::kPartitionAllocLargeThreadCacheSize)) {
1214     largest_cached_size_ =
1215         ::partition_alloc::ThreadCacheLimits::kLargeSizeThreshold;
1216 
1217 #if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
1218     // Devices almost always report less physical memory than what they actually
1219     // have, so anything above 3GiB will catch 4GiB and above.
1220     if (base::SysInfo::AmountOfPhysicalMemoryMB() <= 3500) {
1221       largest_cached_size_ =
1222           ::partition_alloc::ThreadCacheLimits::kDefaultSizeThreshold;
1223     }
1224 #endif  // BUILDFLAG(IS_ANDROID) && !defined(ARCH_CPU_64_BITS)
1225 
1226     ::partition_alloc::ThreadCache::SetLargestCachedSize(largest_cached_size_);
1227   }
1228 #endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED) &&
1229         // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1230 
1231 #if BUILDFLAG(USE_STARSCAN)
1232   if (base::FeatureList::IsEnabled(
1233           base::features::kPartitionAllocPCScanMUAwareScheduler)) {
1234     // Assign PCScan a task-based scheduling backend.
1235     static base::NoDestructor<
1236         partition_alloc::internal::MUAwareTaskBasedBackend>
1237         mu_aware_task_based_backend{
1238             partition_alloc::internal::PCScan::scheduler(),
1239             &partition_alloc::internal::PCScan::PerformDelayedScan};
1240     partition_alloc::internal::PCScan::scheduler().SetNewSchedulingBackend(
1241         *mu_aware_task_based_backend.get());
1242   }
1243 #endif  // BUILDFLAG(USE_STARSCAN)
1244 
1245 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1246   base::allocator::StartMemoryReclaimer(
1247       base::SingleThreadTaskRunner::GetCurrentDefault());
1248 #endif
1249 
1250   if (base::FeatureList::IsEnabled(
1251           base::features::kPartitionAllocSortActiveSlotSpans)) {
1252     partition_alloc::PartitionRoot<
1253         partition_alloc::internal::ThreadSafe>::EnableSortActiveSlotSpans();
1254   }
1255 }
1256 
OnForegrounded(bool has_main_frame)1257 void PartitionAllocSupport::OnForegrounded(bool has_main_frame) {
1258 #if PA_CONFIG(THREAD_CACHE_SUPPORTED) && \
1259     BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1260   {
1261     base::AutoLock scoped_lock(lock_);
1262     if (established_process_type_ != switches::kRendererProcess) {
1263       return;
1264     }
1265   }
1266 
1267   if (!base::FeatureList::IsEnabled(
1268           features::kLowerPAMemoryLimitForNonMainRenderers) ||
1269       has_main_frame) {
1270     ::partition_alloc::ThreadCache::SetLargestCachedSize(largest_cached_size_);
1271   }
1272 #endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED) &&
1273         // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1274 }
1275 
OnBackgrounded()1276 void PartitionAllocSupport::OnBackgrounded() {
1277 #if PA_CONFIG(THREAD_CACHE_SUPPORTED) && \
1278     BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1279   {
1280     base::AutoLock scoped_lock(lock_);
1281     if (established_process_type_ != switches::kRendererProcess) {
1282       return;
1283     }
1284   }
1285 
1286   // Performance matters less for background renderers, don't pay the memory
1287   // cost.
1288   ::partition_alloc::ThreadCache::SetLargestCachedSize(
1289       ::partition_alloc::ThreadCacheLimits::kDefaultSizeThreshold);
1290 
1291   // In renderers, memory reclaim uses the "idle time" task runner to run
1292   // periodic reclaim. This does not always run when the renderer is idle, and
1293   // in particular after the renderer gets backgrounded. As a result, empty slot
1294   // spans are potentially never decommitted. To mitigate that, run a one-off
1295   // reclaim a few seconds later. Even if the renderer comes back to foreground
1296   // in the meantime, the worst case is a few more system calls.
1297   //
1298   // TODO(lizeb): Remove once/if the behavior of idle tasks changes.
1299   base::SingleThreadTaskRunner::GetCurrentDefault()->PostDelayedTask(
1300       FROM_HERE, base::BindOnce([]() {
1301         ::partition_alloc::MemoryReclaimer::Instance()->ReclaimAll();
1302       }),
1303       base::Seconds(10));
1304 
1305 #endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED) &&
1306         // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1307 }
1308 
1309 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
ExtractDanglingPtrSignatureForTests(std::string stacktrace)1310 std::string PartitionAllocSupport::ExtractDanglingPtrSignatureForTests(
1311     std::string stacktrace) {
1312   return ExtractDanglingPtrSignature(stacktrace);
1313 }
1314 #endif
1315 
1316 }  // namespace base::allocator
1317