1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/allocator/partition_alloc_support.h"
6
7 #include <array>
8 #include <cinttypes>
9 #include <cstdint>
10 #include <map>
11 #include <string>
12
13 #include "base/allocator/partition_alloc_features.h"
14 #include "base/allocator/partition_allocator/src/partition_alloc/allocation_guard.h"
15 #include "base/allocator/partition_allocator/src/partition_alloc/dangling_raw_ptr_checks.h"
16 #include "base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer.h"
17 #include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
18 #include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.h"
19 #include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.h"
20 #include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
21 #include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
22 #include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
23 #include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
24 #include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
25 #include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr.h"
26 #include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
27 #include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
28 #include "base/allocator/partition_allocator/src/partition_alloc/thread_cache.h"
29 #include "base/at_exit.h"
30 #include "base/check.h"
31 #include "base/cpu.h"
32 #include "base/debug/dump_without_crashing.h"
33 #include "base/debug/stack_trace.h"
34 #include "base/debug/task_trace.h"
35 #include "base/feature_list.h"
36 #include "base/functional/bind.h"
37 #include "base/functional/callback.h"
38 #include "base/immediate_crash.h"
39 #include "base/location.h"
40 #include "base/memory/raw_ptr_asan_service.h"
41 #include "base/metrics/histogram_functions.h"
42 #include "base/metrics/histogram_macros.h"
43 #include "base/no_destructor.h"
44 #include "base/pending_task.h"
45 #include "base/strings/string_piece.h"
46 #include "base/strings/string_split.h"
47 #include "base/strings/stringprintf.h"
48 #include "base/system/sys_info.h"
49 #include "base/task/single_thread_task_runner.h"
50 #include "base/thread_annotations.h"
51 #include "base/threading/platform_thread.h"
52 #include "base/time/time.h"
53 #include "base/timer/timer.h"
54 #include "base/trace_event/base_tracing.h"
55 #include "build/build_config.h"
56 #include "third_party/abseil-cpp/absl/types/optional.h"
57
58 #if BUILDFLAG(USE_STARSCAN)
59 #include "base/allocator/partition_allocator/src/partition_alloc/shim/nonscannable_allocator.h"
60 #include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
61 #include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_scheduling.h"
62 #include "base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.h"
63 #include "base/allocator/partition_allocator/src/partition_alloc/starscan/stats_collector.h"
64 #include "base/allocator/partition_allocator/src/partition_alloc/starscan/stats_reporter.h"
65 #endif // BUILDFLAG(USE_STARSCAN)
66
67 #if BUILDFLAG(IS_ANDROID)
68 #include "base/system/sys_info.h"
69 #endif
70
71 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
72 #include "base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer.h"
73 #endif
74
75 namespace base::allocator {
76
77 namespace {
78
79 // When under this experiment avoid running periodic purging or reclaim for the
80 // first minute after the first attempt. This is based on the insight that
81 // processes often don't live paste this minute.
82 static BASE_FEATURE(kDelayFirstPeriodicPAPurgeOrReclaim,
83 "DelayFirstPeriodicPAPurgeOrReclaim",
84 base::FEATURE_ENABLED_BY_DEFAULT);
85 constexpr base::TimeDelta kFirstPAPurgeOrReclaimDelay = base::Minutes(1);
86
87 // This is defined in content/public/common/content_switches.h, which is not
88 // accessible in ::base. They must be kept in sync.
89 namespace switches {
90 [[maybe_unused]] constexpr char kRendererProcess[] = "renderer";
91 constexpr char kZygoteProcess[] = "zygote";
92 #if BUILDFLAG(USE_STARSCAN)
93 constexpr char kGpuProcess[] = "gpu-process";
94 constexpr char kUtilityProcess[] = "utility";
95 #endif
96 } // namespace switches
97
98 #if BUILDFLAG(USE_STARSCAN)
99
100 #if BUILDFLAG(ENABLE_BASE_TRACING)
ScannerIdToTracingString(partition_alloc::internal::StatsCollector::ScannerId id)101 constexpr const char* ScannerIdToTracingString(
102 partition_alloc::internal::StatsCollector::ScannerId id) {
103 switch (id) {
104 case partition_alloc::internal::StatsCollector::ScannerId::kClear:
105 return "PCScan.Scanner.Clear";
106 case partition_alloc::internal::StatsCollector::ScannerId::kScan:
107 return "PCScan.Scanner.Scan";
108 case partition_alloc::internal::StatsCollector::ScannerId::kSweep:
109 return "PCScan.Scanner.Sweep";
110 case partition_alloc::internal::StatsCollector::ScannerId::kOverall:
111 return "PCScan.Scanner";
112 case partition_alloc::internal::StatsCollector::ScannerId::kNumIds:
113 __builtin_unreachable();
114 }
115 }
116
MutatorIdToTracingString(partition_alloc::internal::StatsCollector::MutatorId id)117 constexpr const char* MutatorIdToTracingString(
118 partition_alloc::internal::StatsCollector::MutatorId id) {
119 switch (id) {
120 case partition_alloc::internal::StatsCollector::MutatorId::kClear:
121 return "PCScan.Mutator.Clear";
122 case partition_alloc::internal::StatsCollector::MutatorId::kScanStack:
123 return "PCScan.Mutator.ScanStack";
124 case partition_alloc::internal::StatsCollector::MutatorId::kScan:
125 return "PCScan.Mutator.Scan";
126 case partition_alloc::internal::StatsCollector::MutatorId::kOverall:
127 return "PCScan.Mutator";
128 case partition_alloc::internal::StatsCollector::MutatorId::kNumIds:
129 __builtin_unreachable();
130 }
131 }
132 #endif // BUILDFLAG(ENABLE_BASE_TRACING)
133
134 // Inject TRACE_EVENT_BEGIN/END, TRACE_COUNTER1, and UmaHistogramTimes.
135 class StatsReporterImpl final : public partition_alloc::StatsReporter {
136 public:
ReportTraceEvent(partition_alloc::internal::StatsCollector::ScannerId id,partition_alloc::internal::base::PlatformThreadId tid,int64_t start_time_ticks_internal_value,int64_t end_time_ticks_internal_value)137 void ReportTraceEvent(
138 partition_alloc::internal::StatsCollector::ScannerId id,
139 [[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
140 int64_t start_time_ticks_internal_value,
141 int64_t end_time_ticks_internal_value) override {
142 #if BUILDFLAG(ENABLE_BASE_TRACING)
143 // TRACE_EVENT_* macros below drop most parameters when tracing is
144 // disabled at compile time.
145 const char* tracing_id = ScannerIdToTracingString(id);
146 const TimeTicks start_time =
147 TimeTicks::FromInternalValue(start_time_ticks_internal_value);
148 const TimeTicks end_time =
149 TimeTicks::FromInternalValue(end_time_ticks_internal_value);
150 TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
151 perfetto::ThreadTrack::ForThread(tid), start_time);
152 TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
153 end_time);
154 #endif // BUILDFLAG(ENABLE_BASE_TRACING)
155 }
156
ReportTraceEvent(partition_alloc::internal::StatsCollector::MutatorId id,partition_alloc::internal::base::PlatformThreadId tid,int64_t start_time_ticks_internal_value,int64_t end_time_ticks_internal_value)157 void ReportTraceEvent(
158 partition_alloc::internal::StatsCollector::MutatorId id,
159 [[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
160 int64_t start_time_ticks_internal_value,
161 int64_t end_time_ticks_internal_value) override {
162 #if BUILDFLAG(ENABLE_BASE_TRACING)
163 // TRACE_EVENT_* macros below drop most parameters when tracing is
164 // disabled at compile time.
165 const char* tracing_id = MutatorIdToTracingString(id);
166 const TimeTicks start_time =
167 TimeTicks::FromInternalValue(start_time_ticks_internal_value);
168 const TimeTicks end_time =
169 TimeTicks::FromInternalValue(end_time_ticks_internal_value);
170 TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
171 perfetto::ThreadTrack::ForThread(tid), start_time);
172 TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
173 end_time);
174 #endif // BUILDFLAG(ENABLE_BASE_TRACING)
175 }
176
ReportSurvivedQuarantineSize(size_t survived_size)177 void ReportSurvivedQuarantineSize(size_t survived_size) override {
178 TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantineSize",
179 survived_size);
180 }
181
ReportSurvivedQuarantinePercent(double survived_rate)182 void ReportSurvivedQuarantinePercent(double survived_rate) override {
183 // Multiply by 1000 since TRACE_COUNTER1 expects integer. In catapult,
184 // divide back.
185 // TODO(bikineev): Remove after switching to perfetto.
186 TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantinePercent",
187 1000 * survived_rate);
188 }
189
ReportStats(const char * stats_name,int64_t sample_in_usec)190 void ReportStats(const char* stats_name, int64_t sample_in_usec) override {
191 TimeDelta sample = Microseconds(sample_in_usec);
192 UmaHistogramTimes(stats_name, sample);
193 }
194
195 private:
196 static constexpr char kTraceCategory[] = "partition_alloc";
197 };
198
199 #endif // BUILDFLAG(USE_STARSCAN)
200
201 } // namespace
202
203 #if BUILDFLAG(USE_STARSCAN)
RegisterPCScanStatsReporter()204 void RegisterPCScanStatsReporter() {
205 static StatsReporterImpl s_reporter;
206 static bool registered = false;
207
208 DCHECK(!registered);
209
210 partition_alloc::internal::PCScan::RegisterStatsReporter(&s_reporter);
211 registered = true;
212 }
213 #endif // BUILDFLAG(USE_STARSCAN)
214
215 namespace {
216
RunThreadCachePeriodicPurge()217 void RunThreadCachePeriodicPurge() {
218 // Micros, since periodic purge should typically take at most a few ms.
219 SCOPED_UMA_HISTOGRAM_TIMER_MICROS("Memory.PartitionAlloc.PeriodicPurge");
220 TRACE_EVENT0("memory", "PeriodicPurge");
221 auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
222 instance.RunPeriodicPurge();
223 TimeDelta delay =
224 Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
225 SingleThreadTaskRunner::GetCurrentDefault()->PostDelayedTask(
226 FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
227 }
228
RunMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner)229 void RunMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner) {
230 TRACE_EVENT0("base", "partition_alloc::MemoryReclaimer::Reclaim()");
231 auto* instance = ::partition_alloc::MemoryReclaimer::Instance();
232
233 {
234 // Micros, since memory reclaiming should typically take at most a few ms.
235 SCOPED_UMA_HISTOGRAM_TIMER_MICROS("Memory.PartitionAlloc.MemoryReclaim");
236 instance->ReclaimNormal();
237 }
238
239 TimeDelta delay = features::kPartitionAllocMemoryReclaimerInterval.Get();
240 if (!delay.is_positive()) {
241 delay =
242 Microseconds(instance->GetRecommendedReclaimIntervalInMicroseconds());
243 }
244
245 task_runner->PostDelayedTask(
246 FROM_HERE, BindOnce(RunMemoryReclaimer, task_runner), delay);
247 }
248
249 } // namespace
250
StartThreadCachePeriodicPurge()251 void StartThreadCachePeriodicPurge() {
252 auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
253 TimeDelta delay =
254 Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
255
256 if (base::FeatureList::IsEnabled(kDelayFirstPeriodicPAPurgeOrReclaim)) {
257 delay = std::max(delay, kFirstPAPurgeOrReclaimDelay);
258 }
259
260 SingleThreadTaskRunner::GetCurrentDefault()->PostDelayedTask(
261 FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
262 }
263
StartMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner)264 void StartMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner) {
265 if (!base::FeatureList::IsEnabled(
266 base::features::kPartitionAllocMemoryReclaimer)) {
267 return;
268 }
269
270 // Can be called several times.
271 static bool is_memory_reclaimer_running = false;
272 if (is_memory_reclaimer_running) {
273 return;
274 }
275 is_memory_reclaimer_running = true;
276
277 // The caller of the API fully controls where running the reclaim.
278 // However there are a few reasons to recommend that the caller runs
279 // it on the main thread:
280 // - Most of PartitionAlloc's usage is on the main thread, hence PA's metadata
281 // is more likely in cache when executing on the main thread.
282 // - Memory reclaim takes the partition lock for each partition. As a
283 // consequence, while reclaim is running, the main thread is unlikely to be
284 // able to make progress, as it would be waiting on the lock.
285 // - Finally, this runs in idle time only, so there should be no visible
286 // impact.
287 //
288 // From local testing, time to reclaim is 100us-1ms, and reclaiming every few
289 // seconds is useful. Since this is meant to run during idle time only, it is
290 // a reasonable starting point balancing effectivenes vs cost. See
291 // crbug.com/942512 for details and experimental results.
292 TimeDelta delay = features::kPartitionAllocMemoryReclaimerInterval.Get();
293 if (!delay.is_positive()) {
294 delay = Microseconds(::partition_alloc::MemoryReclaimer::Instance()
295 ->GetRecommendedReclaimIntervalInMicroseconds());
296 }
297
298 if (base::FeatureList::IsEnabled(kDelayFirstPeriodicPAPurgeOrReclaim)) {
299 delay = std::max(delay, kFirstPAPurgeOrReclaimDelay);
300 }
301
302 task_runner->PostDelayedTask(
303 FROM_HERE, BindOnce(RunMemoryReclaimer, task_runner), delay);
304 }
305
ProposeSyntheticFinchTrials()306 std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
307 std::map<std::string, std::string> trials;
308
309 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
310 trials.emplace("DanglingPointerDetector", "Enabled");
311 #else
312 trials.emplace("DanglingPointerDetector", "Disabled");
313 #endif
314
315 // This value is not surrounded by build flags as it is meant to be updated
316 // manually in binary experiment patches.
317 trials.emplace("VectorRawPtrExperiment", "Disabled");
318
319 #if BUILDFLAG(FORCIBLY_ENABLE_BACKUP_REF_PTR_IN_ALL_PROCESSES)
320 trials.emplace(base::features::kRendererLiveBRPSyntheticTrialName, "Enabled");
321 #else
322 trials.emplace(base::features::kRendererLiveBRPSyntheticTrialName, "Control");
323 #endif
324
325 #if PA_CONFIG(HAS_MEMORY_TAGGING)
326 if (base::FeatureList::IsEnabled(
327 base::features::kPartitionAllocMemoryTagging)) {
328 if (base::CPU::GetInstanceNoAllocation().has_mte()) {
329 trials.emplace("MemoryTaggingDogfood", "Enabled");
330 } else {
331 trials.emplace("MemoryTaggingDogfood", "Disabled");
332 }
333 }
334 #endif
335
336 return trials;
337 }
338
339 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
340
341 namespace {
342
343 internal::PartitionLock g_stack_trace_buffer_lock;
344
345 struct DanglingPointerFreeInfo {
346 debug::StackTrace stack_trace;
347 debug::TaskTrace task_trace;
348 uintptr_t id = 0;
349 };
350 using DanglingRawPtrBuffer =
351 std::array<absl::optional<DanglingPointerFreeInfo>, 32>;
352 DanglingRawPtrBuffer g_stack_trace_buffer GUARDED_BY(g_stack_trace_buffer_lock);
353
DanglingRawPtrDetected(uintptr_t id)354 void DanglingRawPtrDetected(uintptr_t id) {
355 // This is called from inside the allocator. No allocation is allowed.
356
357 internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
358
359 #if DCHECK_IS_ON()
360 for (absl::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
361 PA_DCHECK(!entry || entry->id != id);
362 }
363 #endif // DCHECK_IS_ON()
364
365 for (absl::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
366 if (!entry) {
367 entry = {debug::StackTrace(), debug::TaskTrace(), id};
368 return;
369 }
370 }
371
372 // The StackTrace hasn't been recorded, because the buffer isn't large
373 // enough.
374 }
375
376 // From the traces recorded in |DanglingRawPtrDetected|, extract the one
377 // whose id match |id|. Return nullopt if not found.
TakeDanglingPointerFreeInfo(uintptr_t id)378 absl::optional<DanglingPointerFreeInfo> TakeDanglingPointerFreeInfo(
379 uintptr_t id) {
380 internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
381 for (absl::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
382 if (entry && entry->id == id) {
383 absl::optional<DanglingPointerFreeInfo> result(entry);
384 entry = absl::nullopt;
385 return result;
386 }
387 }
388 return absl::nullopt;
389 }
390
391 // Extract from the StackTrace output, the signature of the pertinent caller.
392 // This function is meant to be used only by Chromium developers, to list what
393 // are all the dangling raw_ptr occurrences in a table.
ExtractDanglingPtrSignature(std::string stacktrace)394 std::string ExtractDanglingPtrSignature(std::string stacktrace) {
395 std::vector<StringPiece> lines = SplitStringPiece(
396 stacktrace, "\r\n", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
397
398 // We are looking for the callers of the function releasing the raw_ptr and
399 // freeing memory:
400 const StringPiece callees[] = {
401 // Common signatures
402 "internal::PartitionFree",
403 "base::(anonymous namespace)::FreeFn",
404
405 // Linux signatures
406 "internal::RawPtrBackupRefImpl<>::ReleaseInternal()",
407 "base::RefCountedThreadSafe<>::Release()",
408
409 // Windows signatures
410 "internal::RawPtrBackupRefImpl<0,0>::ReleaseInternal",
411 "internal::RawPtrBackupRefImpl<0,1>::ReleaseInternal",
412 "_free_base",
413
414 // Mac signatures
415 "internal::RawPtrBackupRefImpl<false, false>::ReleaseInternal",
416 "internal::RawPtrBackupRefImpl<false, true>::ReleaseInternal",
417
418 // ChromeOS signatures
419 "base::allocator::dispatcher::internal::DispatcherImpl<>::FreeFn()",
420
421 // Task traces are prefixed with "Task trace:" in
422 // |TaskTrace::OutputToStream|
423 "Task trace:",
424 };
425 size_t caller_index = 0;
426 for (size_t i = 0; i < lines.size(); ++i) {
427 for (const auto& callee : callees) {
428 if (lines[i].find(callee) != StringPiece::npos) {
429 caller_index = i + 1;
430 }
431 }
432 }
433 if (caller_index >= lines.size()) {
434 return "no_callee_match";
435 }
436 StringPiece caller = lines[caller_index];
437
438 if (caller.empty()) {
439 return "invalid_format";
440 }
441
442 // On Posix platforms |callers| follows the following format:
443 //
444 // #<index> <address> <symbol>
445 //
446 // See https://crsrc.org/c/base/debug/stack_trace_posix.cc
447 if (caller[0] == '#') {
448 const size_t address_start = caller.find(' ');
449 const size_t function_start = caller.find(' ', address_start + 1);
450
451 if (address_start == caller.npos || function_start == caller.npos) {
452 return "invalid_format";
453 }
454
455 return std::string(caller.substr(function_start + 1));
456 }
457
458 // On Windows platforms |callers| follows the following format:
459 //
460 // \t<symbol> [0x<address>]+<displacement>(<filename>:<line>)
461 //
462 // See https://crsrc.org/c/base/debug/stack_trace_win.cc
463 if (caller[0] == '\t') {
464 const size_t symbol_start = 1;
465 const size_t symbol_end = caller.find(' ');
466 if (symbol_end == caller.npos) {
467 return "invalid_format";
468 }
469 return std::string(caller.substr(symbol_start, symbol_end - symbol_start));
470 }
471
472 // On Mac platforms |callers| follows the following format:
473 //
474 // <index> <library> 0x<address> <symbol> + <line>
475 //
476 // See https://crsrc.org/c/base/debug/stack_trace_posix.cc
477 if (caller[0] >= '0' && caller[0] <= '9') {
478 const size_t address_start = caller.find("0x");
479 const size_t symbol_start = caller.find(' ', address_start + 1) + 1;
480 const size_t symbol_end = caller.find(' ', symbol_start);
481 if (symbol_start == caller.npos || symbol_end == caller.npos) {
482 return "invalid_format";
483 }
484 return std::string(caller.substr(symbol_start, symbol_end - symbol_start));
485 }
486
487 return "invalid_format";
488 }
489
ExtractDanglingPtrSignature(debug::TaskTrace task_trace)490 std::string ExtractDanglingPtrSignature(debug::TaskTrace task_trace) {
491 if (task_trace.empty()) {
492 return "No active task";
493 }
494 return ExtractDanglingPtrSignature(task_trace.ToString());
495 }
496
ExtractDanglingPtrSignature(absl::optional<DanglingPointerFreeInfo> free_info,debug::StackTrace release_stack_trace,debug::TaskTrace release_task_trace)497 std::string ExtractDanglingPtrSignature(
498 absl::optional<DanglingPointerFreeInfo> free_info,
499 debug::StackTrace release_stack_trace,
500 debug::TaskTrace release_task_trace) {
501 if (free_info) {
502 return StringPrintf(
503 "[DanglingSignature]\t%s\t%s\t%s\t%s",
504 ExtractDanglingPtrSignature(free_info->stack_trace.ToString()).c_str(),
505 ExtractDanglingPtrSignature(free_info->task_trace).c_str(),
506 ExtractDanglingPtrSignature(release_stack_trace.ToString()).c_str(),
507 ExtractDanglingPtrSignature(release_task_trace).c_str());
508 }
509 return StringPrintf(
510 "[DanglingSignature]\t%s\t%s\t%s\t%s", "missing", "missing",
511 ExtractDanglingPtrSignature(release_stack_trace.ToString()).c_str(),
512 ExtractDanglingPtrSignature(release_task_trace).c_str());
513 }
514
operator ==(const debug::TaskTrace & lhs,const debug::TaskTrace & rhs)515 bool operator==(const debug::TaskTrace& lhs, const debug::TaskTrace& rhs) {
516 // Compare the addresses contained in the task traces.
517 // The task traces are at most |PendingTask::kTaskBacktraceLength| long.
518 std::array<const void*, PendingTask::kTaskBacktraceLength> addresses_lhs = {};
519 std::array<const void*, PendingTask::kTaskBacktraceLength> addresses_rhs = {};
520 lhs.GetAddresses(addresses_lhs);
521 rhs.GetAddresses(addresses_rhs);
522 return addresses_lhs == addresses_rhs;
523 }
524
525 template <features::DanglingPtrMode dangling_pointer_mode,
526 features::DanglingPtrType dangling_pointer_type>
DanglingRawPtrReleased(uintptr_t id)527 void DanglingRawPtrReleased(uintptr_t id) {
528 // This is called from raw_ptr<>'s release operation. Making allocations is
529 // allowed. In particular, symbolizing and printing the StackTraces may
530 // allocate memory.
531 debug::StackTrace stack_trace_release;
532 debug::TaskTrace task_trace_release;
533 absl::optional<DanglingPointerFreeInfo> free_info =
534 TakeDanglingPointerFreeInfo(id);
535
536 if constexpr (dangling_pointer_type ==
537 features::DanglingPtrType::kCrossTask) {
538 if (!free_info) {
539 return;
540 }
541 if (task_trace_release == free_info->task_trace) {
542 return;
543 }
544 }
545
546 std::string dangling_signature = ExtractDanglingPtrSignature(
547 free_info, stack_trace_release, task_trace_release);
548 static const char dangling_ptr_footer[] =
549 "\n"
550 "\n"
551 "Please check for more information on:\n"
552 "https://chromium.googlesource.com/chromium/src/+/main/docs/"
553 "dangling_ptr_guide.md\n"
554 "\n"
555 "Googlers: Please give us your feedback about the dangling pointer\n"
556 " detector at:\n"
557 " http://go/dangling-ptr-cq-survey\n";
558 if (free_info) {
559 LOG(ERROR) << "Detected dangling raw_ptr with id="
560 << StringPrintf("0x%016" PRIxPTR, id) << ":\n"
561 << dangling_signature << "\n\n"
562 << "The memory was freed at:\n"
563 << free_info->stack_trace << "\n"
564 << free_info->task_trace << "\n"
565 << "The dangling raw_ptr was released at:\n"
566 << stack_trace_release << "\n"
567 << task_trace_release << dangling_ptr_footer;
568 } else {
569 LOG(ERROR) << "Detected dangling raw_ptr with id="
570 << StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
571 << dangling_signature << "\n\n"
572 << "It was not recorded where the memory was freed.\n\n"
573 << "The dangling raw_ptr was released at:\n"
574 << stack_trace_release << "\n"
575 << task_trace_release << dangling_ptr_footer;
576 }
577
578 if constexpr (dangling_pointer_mode == features::DanglingPtrMode::kCrash) {
579 ImmediateCrash();
580 }
581 }
582
CheckDanglingRawPtrBufferEmpty()583 void CheckDanglingRawPtrBufferEmpty() {
584 internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
585
586 // TODO(https://crbug.com/1425095): Check for leaked refcount on Android.
587 #if BUILDFLAG(IS_ANDROID)
588 g_stack_trace_buffer = DanglingRawPtrBuffer();
589 #else
590 bool errors = false;
591 for (auto entry : g_stack_trace_buffer) {
592 if (!entry) {
593 continue;
594 }
595 errors = true;
596 LOG(ERROR) << "A freed allocation is still referenced by a dangling "
597 "pointer at exit, or at test end. Leaked raw_ptr/raw_ref "
598 "could cause PartitionAlloc's quarantine memory bloat."
599 "\n\n"
600 "Memory was released on:\n"
601 << entry->task_trace << "\n"
602 << entry->stack_trace << "\n";
603 }
604 CHECK(!errors);
605 #endif
606 }
607
608 } // namespace
609
InstallDanglingRawPtrChecks()610 void InstallDanglingRawPtrChecks() {
611 // Multiple tests can run within the same executable's execution. This line
612 // ensures problems detected from the previous test are causing error before
613 // entering the next one...
614 CheckDanglingRawPtrBufferEmpty();
615
616 // ... similarly, some allocation may stay forever in the quarantine and we
617 // might ignore them if the executable exists. This line makes sure dangling
618 // pointers errors are never ignored, by crashing at exit, as a last resort.
619 // This makes quarantine memory bloat more likely to be detected.
620 static bool first_run_in_process = true;
621 if (first_run_in_process) {
622 first_run_in_process = false;
623 AtExitManager::RegisterTask(base::BindOnce(CheckDanglingRawPtrBufferEmpty));
624 }
625
626 if (!FeatureList::IsEnabled(features::kPartitionAllocDanglingPtr)) {
627 partition_alloc::SetDanglingRawPtrDetectedFn([](uintptr_t) {});
628 partition_alloc::SetDanglingRawPtrReleasedFn([](uintptr_t) {});
629 return;
630 }
631
632 partition_alloc::SetDanglingRawPtrDetectedFn(&DanglingRawPtrDetected);
633 switch (features::kDanglingPtrModeParam.Get()) {
634 case features::DanglingPtrMode::kCrash:
635 switch (features::kDanglingPtrTypeParam.Get()) {
636 case features::DanglingPtrType::kAll:
637 partition_alloc::SetDanglingRawPtrReleasedFn(
638 &DanglingRawPtrReleased<features::DanglingPtrMode::kCrash,
639 features::DanglingPtrType::kAll>);
640 break;
641 case features::DanglingPtrType::kCrossTask:
642 partition_alloc::SetDanglingRawPtrReleasedFn(
643 &DanglingRawPtrReleased<features::DanglingPtrMode::kCrash,
644 features::DanglingPtrType::kCrossTask>);
645 break;
646 }
647 break;
648 case features::DanglingPtrMode::kLogOnly:
649 switch (features::kDanglingPtrTypeParam.Get()) {
650 case features::DanglingPtrType::kAll:
651 partition_alloc::SetDanglingRawPtrReleasedFn(
652 &DanglingRawPtrReleased<features::DanglingPtrMode::kLogOnly,
653 features::DanglingPtrType::kAll>);
654 break;
655 case features::DanglingPtrType::kCrossTask:
656 partition_alloc::SetDanglingRawPtrReleasedFn(
657 &DanglingRawPtrReleased<features::DanglingPtrMode::kLogOnly,
658 features::DanglingPtrType::kCrossTask>);
659 break;
660 }
661 break;
662 }
663 }
664
665 // TODO(arthursonzogni): There might exist long lived dangling raw_ptr. If there
666 // is a dangling pointer, we should crash at some point. Consider providing an
667 // API to periodically check the buffer.
668
669 #else // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
InstallDanglingRawPtrChecks()670 void InstallDanglingRawPtrChecks() {}
671 #endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
672
UnretainedDanglingRawPtrDetectedDumpWithoutCrashing(uintptr_t id)673 void UnretainedDanglingRawPtrDetectedDumpWithoutCrashing(uintptr_t id) {
674 PA_NO_CODE_FOLDING();
675 debug::DumpWithoutCrashing();
676 }
677
UnretainedDanglingRawPtrDetectedCrash(uintptr_t id)678 void UnretainedDanglingRawPtrDetectedCrash(uintptr_t id) {
679 debug::TaskTrace task_trace;
680 debug::StackTrace stack_trace;
681 LOG(ERROR) << "Detected dangling raw_ptr in unretained with id="
682 << StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
683 << task_trace << stack_trace;
684 ImmediateCrash();
685 }
686
InstallUnretainedDanglingRawPtrChecks()687 void InstallUnretainedDanglingRawPtrChecks() {
688 if (!FeatureList::IsEnabled(features::kPartitionAllocUnretainedDanglingPtr)) {
689 partition_alloc::SetUnretainedDanglingRawPtrDetectedFn([](uintptr_t) {});
690 partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(/*enabled=*/false);
691 return;
692 }
693
694 partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(/*enabled=*/true);
695 switch (features::kUnretainedDanglingPtrModeParam.Get()) {
696 case features::UnretainedDanglingPtrMode::kCrash:
697 partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
698 &UnretainedDanglingRawPtrDetectedCrash);
699 break;
700
701 case features::UnretainedDanglingPtrMode::kDumpWithoutCrashing:
702 partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
703 &UnretainedDanglingRawPtrDetectedDumpWithoutCrashing);
704 break;
705 }
706 }
707
708 namespace {
709
710 #if BUILDFLAG(USE_STARSCAN)
SetProcessNameForPCScan(const std::string & process_type)711 void SetProcessNameForPCScan(const std::string& process_type) {
712 const char* name = [&process_type] {
713 if (process_type.empty()) {
714 // Empty means browser process.
715 return "Browser";
716 }
717 if (process_type == switches::kRendererProcess) {
718 return "Renderer";
719 }
720 if (process_type == switches::kGpuProcess) {
721 return "Gpu";
722 }
723 if (process_type == switches::kUtilityProcess) {
724 return "Utility";
725 }
726 return static_cast<const char*>(nullptr);
727 }();
728
729 if (name) {
730 partition_alloc::internal::PCScan::SetProcessName(name);
731 }
732 }
733
EnablePCScanForMallocPartitionsIfNeeded()734 bool EnablePCScanForMallocPartitionsIfNeeded() {
735 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
736 partition_alloc::internal::base::PlatformThread::SetThreadNameHook(
737 &base::PlatformThread::SetName);
738
739 using Config = partition_alloc::internal::PCScan::InitConfig;
740 DCHECK(base::FeatureList::GetInstance());
741 if (base::FeatureList::IsEnabled(base::features::kPartitionAllocPCScan)) {
742 allocator_shim::EnablePCScan({Config::WantedWriteProtectionMode::kEnabled,
743 Config::SafepointMode::kEnabled});
744 base::allocator::RegisterPCScanStatsReporter();
745 return true;
746 }
747 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
748 return false;
749 }
750
EnablePCScanForMallocPartitionsInBrowserProcessIfNeeded()751 bool EnablePCScanForMallocPartitionsInBrowserProcessIfNeeded() {
752 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
753 using Config = partition_alloc::internal::PCScan::InitConfig;
754 DCHECK(base::FeatureList::GetInstance());
755 if (base::FeatureList::IsEnabled(
756 base::features::kPartitionAllocPCScanBrowserOnly)) {
757 const Config::WantedWriteProtectionMode wp_mode =
758 base::FeatureList::IsEnabled(base::features::kPartitionAllocDCScan)
759 ? Config::WantedWriteProtectionMode::kEnabled
760 : Config::WantedWriteProtectionMode::kDisabled;
761 #if !PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
762 CHECK_EQ(Config::WantedWriteProtectionMode::kDisabled, wp_mode)
763 << "DCScan is currently only supported on Linux based systems";
764 #endif
765 allocator_shim::EnablePCScan({wp_mode, Config::SafepointMode::kEnabled});
766 base::allocator::RegisterPCScanStatsReporter();
767 return true;
768 }
769 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
770 return false;
771 }
772
EnablePCScanForMallocPartitionsInRendererProcessIfNeeded()773 bool EnablePCScanForMallocPartitionsInRendererProcessIfNeeded() {
774 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
775 using Config = partition_alloc::internal::PCScan::InitConfig;
776 DCHECK(base::FeatureList::GetInstance());
777 if (base::FeatureList::IsEnabled(
778 base::features::kPartitionAllocPCScanRendererOnly)) {
779 const Config::WantedWriteProtectionMode wp_mode =
780 base::FeatureList::IsEnabled(base::features::kPartitionAllocDCScan)
781 ? Config::WantedWriteProtectionMode::kEnabled
782 : Config::WantedWriteProtectionMode::kDisabled;
783 #if !PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
784 CHECK_EQ(Config::WantedWriteProtectionMode::kDisabled, wp_mode)
785 << "DCScan is currently only supported on Linux based systems";
786 #endif
787 allocator_shim::EnablePCScan({wp_mode, Config::SafepointMode::kDisabled});
788 base::allocator::RegisterPCScanStatsReporter();
789 return true;
790 }
791 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
792 return false;
793 }
794 #endif // BUILDFLAG(USE_STARSCAN)
795
796 } // namespace
797
ReconfigurePartitionForKnownProcess(const std::string & process_type)798 void ReconfigurePartitionForKnownProcess(const std::string& process_type) {
799 DCHECK_NE(process_type, switches::kZygoteProcess);
800 // TODO(keishi): Move the code to enable BRP back here after Finch
801 // experiments.
802 }
803
Get()804 PartitionAllocSupport* PartitionAllocSupport::Get() {
805 static auto* singleton = new PartitionAllocSupport();
806 return singleton;
807 }
808
809 PartitionAllocSupport::PartitionAllocSupport() = default;
810
ReconfigureForTests()811 void PartitionAllocSupport::ReconfigureForTests() {
812 ReconfigureEarlyish("");
813 base::AutoLock scoped_lock(lock_);
814 called_for_tests_ = true;
815 }
816
817 // static
ShouldEnableMemoryTagging(const std::string & process_type)818 bool PartitionAllocSupport::ShouldEnableMemoryTagging(
819 const std::string& process_type) {
820 // Check kPartitionAllocMemoryTagging first so the Feature is activated even
821 // when mte bootloader flag is disabled.
822 if (!base::FeatureList::IsEnabled(
823 base::features::kPartitionAllocMemoryTagging)) {
824 return false;
825 }
826 if (!base::CPU::GetInstanceNoAllocation().has_mte()) {
827 return false;
828 }
829
830 DCHECK(base::FeatureList::GetInstance());
831 if (base::FeatureList::IsEnabled(
832 base::features::kKillPartitionAllocMemoryTagging)) {
833 return false;
834 }
835 switch (base::features::kMemoryTaggingEnabledProcessesParam.Get()) {
836 case base::features::MemoryTaggingEnabledProcesses::kBrowserOnly:
837 return process_type.empty();
838 case base::features::MemoryTaggingEnabledProcesses::kNonRenderer:
839 return process_type != switches::kRendererProcess;
840 case base::features::MemoryTaggingEnabledProcesses::kAllProcesses:
841 return true;
842 }
843 }
844
845 // static
ShouldEnableMemoryTaggingInRendererProcess()846 bool PartitionAllocSupport::ShouldEnableMemoryTaggingInRendererProcess() {
847 return ShouldEnableMemoryTagging(switches::kRendererProcess);
848 }
849
850 // static
851 PartitionAllocSupport::BrpConfiguration
GetBrpConfiguration(const std::string & process_type)852 PartitionAllocSupport::GetBrpConfiguration(const std::string& process_type) {
853 // TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
854 CHECK(base::FeatureList::GetInstance());
855
856 bool enable_brp = false;
857 bool enable_brp_for_ash = false;
858 bool split_main_partition = false;
859 bool use_dedicated_aligned_partition = false;
860 bool process_affected_by_brp_flag = false;
861 size_t ref_count_size = 0;
862
863 #if (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
864 BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) || \
865 BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
866 #if BUILDFLAG(FORCIBLY_ENABLE_BACKUP_REF_PTR_IN_ALL_PROCESSES)
867 process_affected_by_brp_flag = true;
868 #else
869 if (base::FeatureList::IsEnabled(
870 base::features::kPartitionAllocBackupRefPtr)) {
871 // No specified process type means this is the Browser process.
872 switch (base::features::kBackupRefPtrEnabledProcessesParam.Get()) {
873 case base::features::BackupRefPtrEnabledProcesses::kBrowserOnly:
874 process_affected_by_brp_flag = process_type.empty();
875 break;
876 case base::features::BackupRefPtrEnabledProcesses::kBrowserAndRenderer:
877 process_affected_by_brp_flag =
878 process_type.empty() ||
879 (process_type == switches::kRendererProcess);
880 break;
881 case base::features::BackupRefPtrEnabledProcesses::kNonRenderer:
882 process_affected_by_brp_flag =
883 (process_type != switches::kRendererProcess);
884 break;
885 case base::features::BackupRefPtrEnabledProcesses::kAllProcesses:
886 process_affected_by_brp_flag = true;
887 break;
888 }
889 }
890 #endif // BUILDFLAG(FORCIBLY_ENABLE_BACKUP_REF_PTR_IN_ALL_PROCESSES)
891 #endif // (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
892 // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) ||
893 // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
894
895 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
896 BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
897 if (process_affected_by_brp_flag) {
898 switch (base::features::kBackupRefPtrModeParam.Get()) {
899 case base::features::BackupRefPtrMode::kDisabled:
900 // Do nothing. Equivalent to !IsEnabled(kPartitionAllocBackupRefPtr).
901 break;
902
903 case base::features::BackupRefPtrMode::kEnabled:
904 enable_brp = true;
905 split_main_partition = true;
906 #if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
907 // AlignedAlloc relies on natural alignment offered by the allocator
908 // (see the comment inside PartitionRoot::AlignedAllocFlags). Any extras
909 // in front of the allocation will mess up that alignment. Such extras
910 // are used when BackupRefPtr is on, in which case, we need a separate
911 // partition, dedicated to handle only aligned allocations, where those
912 // extras are disabled. However, if the "previous slot" variant is used,
913 // no dedicated partition is needed, as the extras won't interfere with
914 // the alignment requirements.
915 use_dedicated_aligned_partition = true;
916 #endif
917 break;
918
919 case base::features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
920 split_main_partition = true;
921 break;
922
923 case base::features::BackupRefPtrMode::kDisabledButSplitPartitions3Way:
924 split_main_partition = true;
925 use_dedicated_aligned_partition = true;
926 break;
927 }
928
929 if (enable_brp) {
930 switch (base::features::kBackupRefPtrRefCountSizeParam.Get()) {
931 case base::features::BackupRefPtrRefCountSize::kNatural:
932 ref_count_size = 0;
933 break;
934 case base::features::BackupRefPtrRefCountSize::k4B:
935 ref_count_size = 4;
936 break;
937 case base::features::BackupRefPtrRefCountSize::k8B:
938 ref_count_size = 8;
939 break;
940 case base::features::BackupRefPtrRefCountSize::k16B:
941 ref_count_size = 16;
942 break;
943 }
944 }
945 }
946 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
947 // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
948
949 // Enabling BRP for Ash makes sense only when BRP is enabled. If it wasn't,
950 // there would be no BRP pool, thus BRP would be equally inactive for Ash
951 // pointers.
952 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
953 enable_brp_for_ash =
954 enable_brp && base::FeatureList::IsEnabled(
955 base::features::kPartitionAllocBackupRefPtrForAsh);
956 #endif
957
958 return {
959 enable_brp,
960 enable_brp_for_ash,
961 split_main_partition,
962 use_dedicated_aligned_partition,
963 process_affected_by_brp_flag,
964 ref_count_size,
965 };
966 }
967
ReconfigureEarlyish(const std::string & process_type)968 void PartitionAllocSupport::ReconfigureEarlyish(
969 const std::string& process_type) {
970 {
971 base::AutoLock scoped_lock(lock_);
972
973 // In tests, ReconfigureEarlyish() is called by ReconfigureForTest(), which
974 // is earlier than ContentMain().
975 if (called_for_tests_) {
976 DCHECK(called_earlyish_);
977 return;
978 }
979
980 // TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
981 CHECK(!called_earlyish_)
982 << "ReconfigureEarlyish was already called for process '"
983 << established_process_type_ << "'; current process: '" << process_type
984 << "'";
985
986 called_earlyish_ = true;
987 established_process_type_ = process_type;
988 }
989
990 if (process_type != switches::kZygoteProcess) {
991 ReconfigurePartitionForKnownProcess(process_type);
992 }
993
994 // These initializations are only relevant for PartitionAlloc-Everywhere
995 // builds.
996 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
997 allocator_shim::EnablePartitionAllocMemoryReclaimer();
998 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
999 }
1000
ReconfigureAfterZygoteFork(const std::string & process_type)1001 void PartitionAllocSupport::ReconfigureAfterZygoteFork(
1002 const std::string& process_type) {
1003 {
1004 base::AutoLock scoped_lock(lock_);
1005 // TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
1006 CHECK(!called_after_zygote_fork_)
1007 << "ReconfigureAfterZygoteFork was already called for process '"
1008 << established_process_type_ << "'; current process: '" << process_type
1009 << "'";
1010 DCHECK(called_earlyish_)
1011 << "Attempt to call ReconfigureAfterZygoteFork without calling "
1012 "ReconfigureEarlyish; current process: '"
1013 << process_type << "'";
1014 DCHECK_EQ(established_process_type_, switches::kZygoteProcess)
1015 << "Attempt to call ReconfigureAfterZygoteFork while "
1016 "ReconfigureEarlyish was called on non-zygote process '"
1017 << established_process_type_ << "'; current process: '" << process_type
1018 << "'";
1019
1020 called_after_zygote_fork_ = true;
1021 established_process_type_ = process_type;
1022 }
1023
1024 if (process_type != switches::kZygoteProcess) {
1025 ReconfigurePartitionForKnownProcess(process_type);
1026 }
1027 }
1028
ReconfigureAfterFeatureListInit(const std::string & process_type,bool configure_dangling_pointer_detector)1029 void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
1030 const std::string& process_type,
1031 bool configure_dangling_pointer_detector) {
1032 if (configure_dangling_pointer_detector) {
1033 base::allocator::InstallDanglingRawPtrChecks();
1034 }
1035 base::allocator::InstallUnretainedDanglingRawPtrChecks();
1036 {
1037 base::AutoLock scoped_lock(lock_);
1038 // Avoid initializing more than once.
1039 // TODO(bartekn): See if can be converted to (D)CHECK.
1040 if (called_after_feature_list_init_) {
1041 DCHECK_EQ(established_process_type_, process_type)
1042 << "ReconfigureAfterFeatureListInit was already called for process '"
1043 << established_process_type_ << "'; current process: '"
1044 << process_type << "'";
1045 return;
1046 }
1047 DCHECK(called_earlyish_)
1048 << "Attempt to call ReconfigureAfterFeatureListInit without calling "
1049 "ReconfigureEarlyish; current process: '"
1050 << process_type << "'";
1051 DCHECK_NE(established_process_type_, switches::kZygoteProcess)
1052 << "Attempt to call ReconfigureAfterFeatureListInit without calling "
1053 "ReconfigureAfterZygoteFork; current process: '"
1054 << process_type << "'";
1055 DCHECK_EQ(established_process_type_, process_type)
1056 << "ReconfigureAfterFeatureListInit wasn't called for an already "
1057 "established process '"
1058 << established_process_type_ << "'; current process: '" << process_type
1059 << "'";
1060
1061 called_after_feature_list_init_ = true;
1062 }
1063
1064 DCHECK_NE(process_type, switches::kZygoteProcess);
1065 [[maybe_unused]] BrpConfiguration brp_config =
1066 GetBrpConfiguration(process_type);
1067
1068 if (brp_config.enable_brp_for_ash) {
1069 // This must be enabled before the BRP partition is created. See
1070 // RawPtrBackupRefImpl::UseBrp().
1071 base::RawPtrGlobalSettings::EnableExperimentalAsh();
1072 }
1073
1074 #if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
1075 if (brp_config.process_affected_by_brp_flag) {
1076 base::RawPtrAsanService::GetInstance().Configure(
1077 base::EnableDereferenceCheck(
1078 base::features::kBackupRefPtrAsanEnableDereferenceCheckParam.Get()),
1079 base::EnableExtractionCheck(
1080 base::features::kBackupRefPtrAsanEnableExtractionCheckParam.Get()),
1081 base::EnableInstantiationCheck(
1082 base::features::kBackupRefPtrAsanEnableInstantiationCheckParam
1083 .Get()));
1084 } else {
1085 base::RawPtrAsanService::GetInstance().Configure(
1086 base::EnableDereferenceCheck(false), base::EnableExtractionCheck(false),
1087 base::EnableInstantiationCheck(false));
1088 }
1089 #endif // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
1090
1091 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1092 auto bucket_distribution = allocator_shim::BucketDistribution::kNeutral;
1093 // No specified type means we are in the browser.
1094 switch (process_type == ""
1095 ? base::features::kPartitionAllocBucketDistributionParam.Get()
1096 : base::features::BucketDistributionMode::kDefault) {
1097 case base::features::BucketDistributionMode::kDefault:
1098 break;
1099 case base::features::BucketDistributionMode::kDenser:
1100 bucket_distribution = allocator_shim::BucketDistribution::kDenser;
1101 break;
1102 }
1103
1104 const size_t scheduler_loop_quarantine_capacity_in_bytes =
1105 static_cast<size_t>(
1106 base::features::kPartitionAllocSchedulerLoopQuarantineCapacity.Get());
1107 const bool zapping_by_free_flags = base::FeatureList::IsEnabled(
1108 base::features::kPartitionAllocZappingByFreeFlags);
1109
1110 bool enable_memory_tagging = false;
1111 partition_alloc::TagViolationReportingMode memory_tagging_reporting_mode =
1112 partition_alloc::TagViolationReportingMode::kUndefined;
1113
1114 #if PA_CONFIG(HAS_MEMORY_TAGGING)
1115 // ShouldEnableMemoryTagging() checks kKillPartitionAllocMemoryTagging but
1116 // check here too to wrap the GetMemoryTaggingModeForCurrentThread() call.
1117 if (!base::FeatureList::IsEnabled(
1118 base::features::kKillPartitionAllocMemoryTagging)) {
1119 // If synchronous mode is enabled from startup it means this is a test and
1120 // memory tagging should be enabled.
1121 if (partition_alloc::internal::GetMemoryTaggingModeForCurrentThread() ==
1122 partition_alloc::TagViolationReportingMode::kSynchronous) {
1123 enable_memory_tagging = true;
1124 memory_tagging_reporting_mode =
1125 partition_alloc::TagViolationReportingMode::kSynchronous;
1126 } else {
1127 enable_memory_tagging = ShouldEnableMemoryTagging(process_type);
1128 #if BUILDFLAG(IS_ANDROID)
1129 if (enable_memory_tagging) {
1130 switch (base::features::kMemtagModeParam.Get()) {
1131 case base::features::MemtagMode::kSync:
1132 memory_tagging_reporting_mode =
1133 partition_alloc::TagViolationReportingMode::kSynchronous;
1134 break;
1135 case base::features::MemtagMode::kAsync:
1136 memory_tagging_reporting_mode =
1137 partition_alloc::TagViolationReportingMode::kAsynchronous;
1138 break;
1139 }
1140 partition_alloc::PermissiveMte::SetEnabled(base::FeatureList::IsEnabled(
1141 base::features::kPartitionAllocPermissiveMte));
1142 partition_alloc::internal::
1143 ChangeMemoryTaggingModeForAllThreadsPerProcess(
1144 memory_tagging_reporting_mode);
1145 CHECK_EQ(
1146 partition_alloc::internal::GetMemoryTaggingModeForCurrentThread(),
1147 memory_tagging_reporting_mode);
1148 } else if (base::CPU::GetInstanceNoAllocation().has_mte()) {
1149 memory_tagging_reporting_mode =
1150 partition_alloc::TagViolationReportingMode::kDisabled;
1151 partition_alloc::internal::
1152 ChangeMemoryTaggingModeForAllThreadsPerProcess(
1153 memory_tagging_reporting_mode);
1154 CHECK_EQ(
1155 partition_alloc::internal::GetMemoryTaggingModeForCurrentThread(),
1156 memory_tagging_reporting_mode);
1157 }
1158 #endif // BUILDFLAG(IS_ANDROID)
1159 }
1160 }
1161 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
1162
1163 if (enable_memory_tagging) {
1164 CHECK((memory_tagging_reporting_mode ==
1165 partition_alloc::TagViolationReportingMode::kSynchronous) ||
1166 (memory_tagging_reporting_mode ==
1167 partition_alloc::TagViolationReportingMode::kAsynchronous));
1168 } else {
1169 CHECK((memory_tagging_reporting_mode ==
1170 partition_alloc::TagViolationReportingMode::kUndefined) ||
1171 (memory_tagging_reporting_mode ==
1172 partition_alloc::TagViolationReportingMode::kDisabled));
1173 }
1174
1175 allocator_shim::ConfigurePartitions(
1176 allocator_shim::EnableBrp(brp_config.enable_brp),
1177 allocator_shim::EnableMemoryTagging(enable_memory_tagging),
1178 memory_tagging_reporting_mode,
1179 allocator_shim::SplitMainPartition(brp_config.split_main_partition ||
1180 enable_memory_tagging),
1181 allocator_shim::UseDedicatedAlignedPartition(
1182 brp_config.use_dedicated_aligned_partition),
1183 brp_config.ref_count_size, bucket_distribution,
1184 scheduler_loop_quarantine_capacity_in_bytes,
1185 allocator_shim::ZappingByFreeFlags(zapping_by_free_flags));
1186
1187 const uint32_t extras_size = allocator_shim::GetMainPartitionRootExtrasSize();
1188 // As per description, extras are optional and are expected not to
1189 // exceed (cookie + max(BRP ref-count)) == 16 + 16 == 32 bytes.
1190 // 100 is a reasonable cap for this value.
1191 UmaHistogramCounts100("Memory.PartitionAlloc.PartitionRoot.ExtrasSize",
1192 int(extras_size));
1193 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1194
1195 // If BRP is not enabled, check if any of PCScan flags is enabled.
1196 [[maybe_unused]] bool scan_enabled = false;
1197 #if BUILDFLAG(USE_STARSCAN)
1198 if (!brp_config.enable_brp) {
1199 scan_enabled = EnablePCScanForMallocPartitionsIfNeeded();
1200 // No specified process type means this is the Browser process.
1201 if (process_type.empty()) {
1202 scan_enabled = scan_enabled ||
1203 EnablePCScanForMallocPartitionsInBrowserProcessIfNeeded();
1204 }
1205 if (process_type == switches::kRendererProcess) {
1206 scan_enabled = scan_enabled ||
1207 EnablePCScanForMallocPartitionsInRendererProcessIfNeeded();
1208 }
1209 if (scan_enabled) {
1210 if (base::FeatureList::IsEnabled(
1211 base::features::kPartitionAllocPCScanStackScanning)) {
1212 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1213 partition_alloc::internal::PCScan::EnableStackScanning();
1214 // Notify PCScan about the main thread.
1215 partition_alloc::internal::PCScan::NotifyThreadCreated(
1216 partition_alloc::internal::GetStackTop());
1217 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1218 }
1219 if (base::FeatureList::IsEnabled(
1220 base::features::kPartitionAllocPCScanImmediateFreeing)) {
1221 partition_alloc::internal::PCScan::EnableImmediateFreeing();
1222 }
1223 if (base::FeatureList::IsEnabled(
1224 base::features::kPartitionAllocPCScanEagerClearing)) {
1225 partition_alloc::internal::PCScan::SetClearType(
1226 partition_alloc::internal::PCScan::ClearType::kEager);
1227 }
1228 SetProcessNameForPCScan(process_type);
1229 }
1230 }
1231 #endif // BUILDFLAG(USE_STARSCAN)
1232
1233 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1234 #if BUILDFLAG(USE_STARSCAN)
1235 // Non-quarantinable partition is dealing with hot V8's zone allocations.
1236 // In case PCScan is enabled in Renderer, enable thread cache on this
1237 // partition. At the same time, thread cache on the main(malloc) partition
1238 // must be disabled, because only one partition can have it on.
1239 if (scan_enabled && process_type == switches::kRendererProcess) {
1240 allocator_shim::NonQuarantinableAllocator::Instance()
1241 .root()
1242 ->EnableThreadCacheIfSupported();
1243 } else
1244 #endif // BUILDFLAG(USE_STARSCAN)
1245 {
1246 allocator_shim::internal::PartitionAllocMalloc::Allocator()
1247 ->EnableThreadCacheIfSupported();
1248 }
1249
1250 if (base::FeatureList::IsEnabled(
1251 base::features::kPartitionAllocLargeEmptySlotSpanRing)) {
1252 allocator_shim::internal::PartitionAllocMalloc::Allocator()
1253 ->EnableLargeEmptySlotSpanRing();
1254 allocator_shim::internal::PartitionAllocMalloc::AlignedAllocator()
1255 ->EnableLargeEmptySlotSpanRing();
1256 }
1257 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1258
1259 #if BUILDFLAG(IS_WIN)
1260 // Browser process only, since this is the one we want to prevent from
1261 // crashing the most (as it takes down all the tabs).
1262 if (base::FeatureList::IsEnabled(
1263 base::features::kPageAllocatorRetryOnCommitFailure) &&
1264 process_type.empty()) {
1265 partition_alloc::SetRetryOnCommitFailure(true);
1266 }
1267 #endif
1268 }
1269
ReconfigureAfterTaskRunnerInit(const std::string & process_type)1270 void PartitionAllocSupport::ReconfigureAfterTaskRunnerInit(
1271 const std::string& process_type) {
1272 {
1273 base::AutoLock scoped_lock(lock_);
1274
1275 // Init only once.
1276 if (called_after_thread_pool_init_) {
1277 return;
1278 }
1279
1280 DCHECK_EQ(established_process_type_, process_type);
1281 // Enforce ordering.
1282 DCHECK(called_earlyish_);
1283 DCHECK(called_after_feature_list_init_);
1284
1285 called_after_thread_pool_init_ = true;
1286 }
1287
1288 #if PA_CONFIG(THREAD_CACHE_SUPPORTED) && \
1289 BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1290 // This should be called in specific processes, as the main thread is
1291 // initialized later.
1292 DCHECK(process_type != switches::kZygoteProcess);
1293
1294 partition_alloc::ThreadCacheRegistry::Instance().SetPurgingConfiguration(
1295 base::features::GetThreadCacheMinPurgeInterval(),
1296 base::features::GetThreadCacheMaxPurgeInterval(),
1297 base::features::GetThreadCacheDefaultPurgeInterval(),
1298 size_t(base::features::GetThreadCacheMinCachedMemoryForPurgingBytes()));
1299
1300 base::allocator::StartThreadCachePeriodicPurge();
1301
1302 if (base::FeatureList::IsEnabled(
1303 base::features::kEnableConfigurableThreadCacheMultiplier)) {
1304 // If kEnableConfigurableThreadCacheMultiplier is enabled, override the
1305 // multiplier value with the corresponding feature param.
1306 #if BUILDFLAG(IS_ANDROID)
1307 ::partition_alloc::ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
1308 base::features::GetThreadCacheMultiplierForAndroid());
1309 #else // BUILDFLAG(IS_ANDROID)
1310 ::partition_alloc::ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
1311 base::features::GetThreadCacheMultiplier());
1312 #endif // BUILDFLAG(IS_ANDROID)
1313 } else {
1314 #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
1315 // If kEnableConfigurableThreadCacheMultiplier is not enabled, lower
1316 // thread cache limits on Android low end device to avoid stranding too much
1317 // memory in the caches.
1318 if (SysInfo::IsLowEndDeviceOrPartialLowEndModeEnabled(
1319 features::kPartialLowEndModeExcludePartitionAllocSupport)) {
1320 ::partition_alloc::ThreadCacheRegistry::Instance()
1321 .SetThreadCacheMultiplier(
1322 ::partition_alloc::ThreadCache::kDefaultMultiplier / 2.);
1323 }
1324 #endif // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
1325 }
1326
1327 // Renderer processes are more performance-sensitive, increase thread cache
1328 // limits.
1329 if (process_type == switches::kRendererProcess &&
1330 base::FeatureList::IsEnabled(
1331 base::features::kPartitionAllocLargeThreadCacheSize)) {
1332 largest_cached_size_ =
1333 size_t(base::features::GetPartitionAllocLargeThreadCacheSizeValue());
1334
1335 #if BUILDFLAG(IS_ANDROID)
1336 // Use appropriately lower amount for Android devices with 3GB or less.
1337 // Devices almost always report less physical memory than what they actually
1338 // have, so use 3.2GB (a threshold commonly uses throughout code) to avoid
1339 // accidentally catching devices advertised as 4GB.
1340 if (base::SysInfo::AmountOfPhysicalMemoryMB() < 3.2 * 1024) {
1341 largest_cached_size_ = size_t(
1342 base::features::
1343 GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid());
1344 }
1345 #endif // BUILDFLAG(IS_ANDROID)
1346
1347 ::partition_alloc::ThreadCache::SetLargestCachedSize(largest_cached_size_);
1348 }
1349 #endif // PA_CONFIG(THREAD_CACHE_SUPPORTED) &&
1350 // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1351
1352 #if BUILDFLAG(USE_STARSCAN)
1353 if (base::FeatureList::IsEnabled(
1354 base::features::kPartitionAllocPCScanMUAwareScheduler)) {
1355 // Assign PCScan a task-based scheduling backend.
1356 static base::NoDestructor<
1357 partition_alloc::internal::MUAwareTaskBasedBackend>
1358 mu_aware_task_based_backend{
1359 partition_alloc::internal::PCScan::scheduler(),
1360 &partition_alloc::internal::PCScan::PerformDelayedScan};
1361 partition_alloc::internal::PCScan::scheduler().SetNewSchedulingBackend(
1362 *mu_aware_task_based_backend.get());
1363 }
1364 #endif // BUILDFLAG(USE_STARSCAN)
1365
1366 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1367 base::allocator::StartMemoryReclaimer(
1368 base::SingleThreadTaskRunner::GetCurrentDefault());
1369 #endif
1370
1371 partition_alloc::PartitionRoot::SetStraightenLargerSlotSpanFreeListsMode(
1372 base::FeatureList::IsEnabled(
1373 base::features::kPartitionAllocStraightenLargerSlotSpanFreeLists)
1374 ? features::kPartitionAllocStraightenLargerSlotSpanFreeListsMode.Get()
1375 : partition_alloc::StraightenLargerSlotSpanFreeListsMode::kNever);
1376 partition_alloc::PartitionRoot::SetSortSmallerSlotSpanFreeListsEnabled(
1377 base::FeatureList::IsEnabled(
1378 base::features::kPartitionAllocSortSmallerSlotSpanFreeLists));
1379 partition_alloc::PartitionRoot::SetSortActiveSlotSpansEnabled(
1380 base::FeatureList::IsEnabled(
1381 base::features::kPartitionAllocSortActiveSlotSpans));
1382 }
1383
OnForegrounded(bool has_main_frame)1384 void PartitionAllocSupport::OnForegrounded(bool has_main_frame) {
1385 #if PA_CONFIG(THREAD_CACHE_SUPPORTED) && \
1386 BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1387 {
1388 base::AutoLock scoped_lock(lock_);
1389 if (established_process_type_ != switches::kRendererProcess) {
1390 return;
1391 }
1392 }
1393
1394 if (!base::FeatureList::IsEnabled(
1395 features::kLowerPAMemoryLimitForNonMainRenderers) ||
1396 has_main_frame) {
1397 ::partition_alloc::ThreadCache::SetLargestCachedSize(largest_cached_size_);
1398 }
1399 #endif // PA_CONFIG(THREAD_CACHE_SUPPORTED) &&
1400 // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1401 }
1402
OnBackgrounded()1403 void PartitionAllocSupport::OnBackgrounded() {
1404 #if PA_CONFIG(THREAD_CACHE_SUPPORTED) && \
1405 BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1406 {
1407 base::AutoLock scoped_lock(lock_);
1408 if (established_process_type_ != switches::kRendererProcess) {
1409 return;
1410 }
1411 }
1412
1413 // Performance matters less for background renderers, don't pay the memory
1414 // cost.
1415 ::partition_alloc::ThreadCache::SetLargestCachedSize(
1416 ::partition_alloc::ThreadCacheLimits::kDefaultSizeThreshold);
1417
1418 // In renderers, memory reclaim uses the "idle time" task runner to run
1419 // periodic reclaim. This does not always run when the renderer is idle, and
1420 // in particular after the renderer gets backgrounded. As a result, empty slot
1421 // spans are potentially never decommitted. To mitigate that, run a one-off
1422 // reclaim a few seconds later. Even if the renderer comes back to foreground
1423 // in the meantime, the worst case is a few more system calls.
1424 //
1425 // TODO(lizeb): Remove once/if the behavior of idle tasks changes.
1426 base::SingleThreadTaskRunner::GetCurrentDefault()->PostDelayedTask(
1427 FROM_HERE, base::BindOnce([]() {
1428 ::partition_alloc::MemoryReclaimer::Instance()->ReclaimAll();
1429 }),
1430 base::Seconds(10));
1431
1432 #endif // PA_CONFIG(THREAD_CACHE_SUPPORTED) &&
1433 // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1434 }
1435
1436 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
ExtractDanglingPtrSignatureForTests(std::string stacktrace)1437 std::string PartitionAllocSupport::ExtractDanglingPtrSignatureForTests(
1438 std::string stacktrace) {
1439 return ExtractDanglingPtrSignature(stacktrace);
1440 }
1441 #endif
1442
1443 } // namespace base::allocator
1444