1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/allocator/partition_alloc_support.h"
6
7 #include <array>
8 #include <cinttypes>
9 #include <cstdint>
10 #include <map>
11 #include <optional>
12 #include <string>
13 #include <string_view>
14
15 #include "base/allocator/partition_alloc_features.h"
16 #include "base/at_exit.h"
17 #include "base/check.h"
18 #include "base/containers/span.h"
19 #include "base/cpu.h"
20 #include "base/debug/dump_without_crashing.h"
21 #include "base/debug/stack_trace.h"
22 #include "base/debug/task_trace.h"
23 #include "base/feature_list.h"
24 #include "base/functional/bind.h"
25 #include "base/functional/callback.h"
26 #include "base/immediate_crash.h"
27 #include "base/location.h"
28 #include "base/memory/post_delayed_memory_reduction_task.h"
29 #include "base/memory/raw_ptr_asan_service.h"
30 #include "base/metrics/histogram_functions.h"
31 #include "base/metrics/histogram_macros.h"
32 #include "base/no_destructor.h"
33 #include "base/pending_task.h"
34 #include "base/ranges/algorithm.h"
35 #include "base/strings/string_split.h"
36 #include "base/strings/stringprintf.h"
37 #include "base/system/sys_info.h"
38 #include "base/task/single_thread_task_runner.h"
39 #include "base/thread_annotations.h"
40 #include "base/threading/platform_thread.h"
41 #include "base/time/time.h"
42 #include "base/timer/timer.h"
43 #include "base/trace_event/base_tracing.h"
44 #include "build/build_config.h"
45 #include "partition_alloc/allocation_guard.h"
46 #include "partition_alloc/buildflags.h"
47 #include "partition_alloc/dangling_raw_ptr_checks.h"
48 #include "partition_alloc/memory_reclaimer.h"
49 #include "partition_alloc/page_allocator.h"
50 #include "partition_alloc/partition_alloc_base/debug/alias.h"
51 #include "partition_alloc/partition_alloc_base/immediate_crash.h"
52 #include "partition_alloc/partition_alloc_base/threading/platform_thread.h"
53 #include "partition_alloc/partition_alloc_check.h"
54 #include "partition_alloc/partition_alloc_config.h"
55 #include "partition_alloc/partition_alloc_constants.h"
56 #include "partition_alloc/partition_lock.h"
57 #include "partition_alloc/partition_root.h"
58 #include "partition_alloc/pointers/instance_tracer.h"
59 #include "partition_alloc/pointers/raw_ptr.h"
60 #include "partition_alloc/shim/allocator_shim.h"
61 #include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
62 #include "partition_alloc/shim/allocator_shim_dispatch_to_noop_on_free.h"
63 #include "partition_alloc/stack/stack.h"
64 #include "partition_alloc/thread_cache.h"
65
66 #if BUILDFLAG(IS_ANDROID)
67 #include "base/system/sys_info.h"
68 #endif
69
70 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
71 #include "partition_alloc/memory_reclaimer.h"
72 #endif
73
74 #if PA_BUILDFLAG( \
75 ENABLE_ALLOCATOR_SHIM_PARTITION_ALLOC_DISPATCH_WITH_ADVANCED_CHECKS_SUPPORT)
76 #include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc_with_advanced_checks.h"
77 #endif
78
79 #if BUILDFLAG(IS_ANDROID) && PA_BUILDFLAG(HAS_MEMORY_TAGGING)
80 #include <sys/system_properties.h>
81 #endif
82
83 namespace base::allocator {
84
85 namespace {
86
87 #if BUILDFLAG(IS_ANDROID) && PA_BUILDFLAG(HAS_MEMORY_TAGGING)
88 enum class BootloaderOverride {
89 kDefault,
90 kForceOn,
91 kForceOff,
92 };
93
GetBootloaderOverride()94 BootloaderOverride GetBootloaderOverride() {
95 char bootloader_override_str[PROP_VALUE_MAX];
96 __system_property_get(
97 "persist.device_config.runtime_native_boot.bootloader_override",
98 bootloader_override_str);
99
100 if (strcmp(bootloader_override_str, "force_on") == 0) {
101 return BootloaderOverride::kForceOn;
102 }
103 if (strcmp(bootloader_override_str, "force_off") == 0) {
104 return BootloaderOverride::kForceOff;
105 }
106 return BootloaderOverride::kDefault;
107 }
108 #endif
109
110 // When under this experiment avoid running periodic purging or reclaim for the
111 // first minute after the first attempt. This is based on the insight that
112 // processes often don't live paste this minute.
113 static BASE_FEATURE(kDelayFirstPeriodicPAPurgeOrReclaim,
114 "DelayFirstPeriodicPAPurgeOrReclaim",
115 base::FEATURE_ENABLED_BY_DEFAULT);
116 constexpr base::TimeDelta kFirstPAPurgeOrReclaimDelay = base::Minutes(1);
117
118 // This is defined in content/public/common/content_switches.h, which is not
119 // accessible in ::base. They must be kept in sync.
120 namespace switches {
121 [[maybe_unused]] constexpr char kRendererProcess[] = "renderer";
122 constexpr char kZygoteProcess[] = "zygote";
123 } // namespace switches
124
125 } // namespace
126
127 namespace {
128
RunThreadCachePeriodicPurge()129 void RunThreadCachePeriodicPurge() {
130 // Micros, since periodic purge should typically take at most a few ms.
131 SCOPED_UMA_HISTOGRAM_TIMER_MICROS("Memory.PartitionAlloc.PeriodicPurge");
132 TRACE_EVENT0("memory", "PeriodicPurge");
133 auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
134 instance.RunPeriodicPurge();
135 TimeDelta delay =
136 Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
137 SingleThreadTaskRunner::GetCurrentDefault()->PostDelayedTask(
138 FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
139 }
140
141 } // namespace
142
143 // When enabled, disable the memory reclaimer in background.
144 BASE_FEATURE(kDisableMemoryReclaimerInBackground,
145 "DisableMemoryReclaimerInBackground",
146 base::FEATURE_ENABLED_BY_DEFAULT);
147
148 // When enabled, limit the time memory reclaimer may take, returning early when
149 // exceeded.
150 BASE_FEATURE(kPartitionAllocShortMemoryReclaim,
151 "PartitionAllocShortMemoryReclaim",
152 base::FEATURE_ENABLED_BY_DEFAULT);
153
154 // static
Instance()155 MemoryReclaimerSupport& MemoryReclaimerSupport::Instance() {
156 static base::NoDestructor<MemoryReclaimerSupport> instance;
157 return *instance.get();
158 }
159 MemoryReclaimerSupport::~MemoryReclaimerSupport() = default;
160
161 MemoryReclaimerSupport::MemoryReclaimerSupport() = default;
162
Start(scoped_refptr<TaskRunner> task_runner)163 void MemoryReclaimerSupport::Start(scoped_refptr<TaskRunner> task_runner) {
164 if (!base::FeatureList::IsEnabled(
165 base::features::kPartitionAllocMemoryReclaimer)) {
166 return;
167 }
168
169 // Can be called several times.
170 if (has_pending_task_) {
171 return;
172 }
173
174 // The caller of the API fully controls where running the reclaim.
175 // However there are a few reasons to recommend that the caller runs
176 // it on the main thread:
177 // - Most of PartitionAlloc's usage is on the main thread, hence PA's metadata
178 // is more likely in cache when executing on the main thread.
179 // - Memory reclaim takes the partition lock for each partition. As a
180 // consequence, while reclaim is running, the main thread is unlikely to be
181 // able to make progress, as it would be waiting on the lock.
182 // - Finally, this runs in idle time only, so there should be no visible
183 // impact.
184 //
185 // From local testing, time to reclaim is 100us-1ms, and reclaiming every few
186 // seconds is useful. Since this is meant to run during idle time only, it is
187 // a reasonable starting point balancing effectivenes vs cost. See
188 // crbug.com/942512 for details and experimental results.
189 TimeDelta delay;
190 if (base::FeatureList::IsEnabled(kDelayFirstPeriodicPAPurgeOrReclaim)) {
191 delay = std::max(delay, kFirstPAPurgeOrReclaimDelay);
192 }
193
194 task_runner_ = task_runner;
195 MaybeScheduleTask(delay);
196 }
197
SetForegrounded(bool in_foreground)198 void MemoryReclaimerSupport::SetForegrounded(bool in_foreground) {
199 in_foreground_ = in_foreground;
200 if (in_foreground_) {
201 MaybeScheduleTask();
202 }
203 }
204
ResetForTesting()205 void MemoryReclaimerSupport::ResetForTesting() {
206 task_runner_ = nullptr;
207 has_pending_task_ = false;
208 in_foreground_ = true;
209 }
210
Run()211 void MemoryReclaimerSupport::Run() {
212 TRACE_EVENT0("base", "partition_alloc::MemoryReclaimer::Reclaim()");
213 has_pending_task_ = false;
214
215 {
216 // Micros, since memory reclaiming should typically take at most a few ms.
217 SCOPED_UMA_HISTOGRAM_TIMER_MICROS("Memory.PartitionAlloc.MemoryReclaim");
218 if (base::FeatureList::IsEnabled(kPartitionAllocShortMemoryReclaim)) {
219 ::partition_alloc::MemoryReclaimer::Instance()->ReclaimFast();
220 } else {
221 ::partition_alloc::MemoryReclaimer::Instance()->ReclaimNormal();
222 }
223 }
224
225 MaybeScheduleTask();
226 }
227
228 // static
GetInterval()229 TimeDelta MemoryReclaimerSupport::GetInterval() {
230 TimeDelta delay = features::kPartitionAllocMemoryReclaimerInterval.Get();
231 if (delay.is_positive()) {
232 return delay;
233 }
234
235 return Microseconds(::partition_alloc::MemoryReclaimer::Instance()
236 ->GetRecommendedReclaimIntervalInMicroseconds());
237 }
238
MaybeScheduleTask(TimeDelta delay)239 void MemoryReclaimerSupport::MaybeScheduleTask(TimeDelta delay) {
240 if (has_pending_task_ ||
241 (base::FeatureList::IsEnabled(kDisableMemoryReclaimerInBackground) &&
242 !in_foreground_) ||
243 !task_runner_) {
244 return;
245 }
246
247 has_pending_task_ = true;
248 TimeDelta actual_delay = std::max(delay, GetInterval());
249 task_runner_->PostDelayedTask(
250 FROM_HERE, BindOnce(&MemoryReclaimerSupport::Run, base::Unretained(this)),
251 actual_delay);
252 }
253
StartThreadCachePeriodicPurge()254 void StartThreadCachePeriodicPurge() {
255 auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
256 TimeDelta delay =
257 Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
258
259 if (base::FeatureList::IsEnabled(kDelayFirstPeriodicPAPurgeOrReclaim)) {
260 delay = std::max(delay, kFirstPAPurgeOrReclaimDelay);
261 }
262
263 SingleThreadTaskRunner::GetCurrentDefault()->PostDelayedTask(
264 FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
265 }
266
StartMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner)267 void StartMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner) {
268 MemoryReclaimerSupport::Instance().Start(task_runner);
269 }
270
ProposeSyntheticFinchTrials()271 std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
272 std::map<std::string, std::string> trials;
273
274 #if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
275 trials.emplace("DanglingPointerDetector", "Enabled");
276 #else
277 trials.emplace("DanglingPointerDetector", "Disabled");
278 #endif
279
280 // This value is not surrounded by build flags as it is meant to be updated
281 // manually in binary experiment patches.
282 trials.emplace("VectorRawPtrExperiment", "Disabled");
283
284 #if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
285 if (base::FeatureList::IsEnabled(
286 base::features::kPartitionAllocMemoryTagging)) {
287 bool has_mte = base::CPU::GetInstanceNoAllocation().has_mte();
288 if (has_mte) {
289 trials.emplace("MemoryTaggingDogfood", "Enabled");
290 } else {
291 trials.emplace("MemoryTaggingDogfood", "Disabled");
292 }
293 #if BUILDFLAG(IS_ANDROID)
294 BootloaderOverride bootloader_override = GetBootloaderOverride();
295 partition_alloc::TagViolationReportingMode reporting_mode =
296 partition_alloc::TagViolationReportingMode::kUndefined;
297 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
298 reporting_mode = allocator_shim::internal::PartitionAllocMalloc::Allocator()
299 ->memory_tagging_reporting_mode();
300 #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
301 switch (bootloader_override) {
302 case BootloaderOverride::kDefault:
303 trials.emplace("MemoryTaggingBootloaderOverride", "Default");
304 break;
305 case BootloaderOverride::kForceOn:
306 if (has_mte) {
307 switch (reporting_mode) {
308 case partition_alloc::TagViolationReportingMode::kAsynchronous:
309 trials.emplace("MemoryTaggingBootloaderOverride", "ForceOnAsync");
310 break;
311 case partition_alloc::TagViolationReportingMode::kSynchronous:
312 // This should not happen unless user forces it.
313 trials.emplace("MemoryTaggingBootloaderOverride", "ForceOnSync");
314 break;
315 default:
316 // This should not happen unless user forces it.
317 trials.emplace("MemoryTaggingBootloaderOverride",
318 "ForceOnDisabled");
319 }
320 } else {
321 // This should not happen unless user forces it.
322 trials.emplace("MemoryTaggingBootloaderOverride",
323 "ForceOnWithoutMte");
324 }
325 break;
326 case BootloaderOverride::kForceOff:
327 if (!has_mte) {
328 trials.emplace("MemoryTaggingBootloaderOverride", "ForceOff");
329 } else {
330 // This should not happen unless user forces it.
331 trials.emplace("MemoryTaggingBootloaderOverride", "ForceOffWithMte");
332 }
333 break;
334 }
335 #endif // BUILDFLAG(IS_ANDROID)
336 }
337 #endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
338
339 return trials;
340 }
341
342 namespace {
343
ShouldEnableFeatureOnProcess(features::internal::PAFeatureEnabledProcesses enabled_processes,const std::string & process_type)344 bool ShouldEnableFeatureOnProcess(
345 features::internal::PAFeatureEnabledProcesses enabled_processes,
346 const std::string& process_type) {
347 switch (enabled_processes) {
348 case features::internal::PAFeatureEnabledProcesses::kBrowserOnly:
349 return process_type.empty();
350 case features::internal::PAFeatureEnabledProcesses::kNonRenderer:
351 return process_type != switches::kRendererProcess;
352 case features::internal::PAFeatureEnabledProcesses::kBrowserAndRenderer:
353 return process_type.empty() || process_type == switches::kRendererProcess;
354 case features::internal::PAFeatureEnabledProcesses::kRendererOnly:
355 return process_type == switches::kRendererProcess;
356 case features::internal::PAFeatureEnabledProcesses::kAllChildProcesses:
357 return !process_type.empty() && process_type != switches::kZygoteProcess;
358 case features::internal::PAFeatureEnabledProcesses::kAllProcesses:
359 return true;
360 }
361 }
362
363 #if PA_CONFIG(ENABLE_SHADOW_METADATA)
ShouldEnableShadowMetadata(const std::string & process_type)364 bool ShouldEnableShadowMetadata(const std::string& process_type) {
365 if (!base::FeatureList::IsEnabled(
366 base::features::kPartitionAllocShadowMetadata)) {
367 return false;
368 }
369 return ShouldEnableFeatureOnProcess(
370 features::kShadowMetadataEnabledProcessesParam.Get(), process_type);
371 }
372 #endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
373
374 } // namespace
375
376 #if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
377
378 namespace {
379
380 internal::PartitionLock g_stack_trace_buffer_lock;
381
382 constexpr size_t kDanglingPtrStackTraceSize =
383 PA_BUILDFLAG(IS_DEBUG)
384 ? 32 // Symbolizing large stack traces can be expensive in debug
385 // builds. We prefer displaying a reasonably sized one instead
386 // of timing out.
387 : base::debug::StackTrace::kMaxTraces;
388
389 struct DanglingPointerFreeInfo {
390 debug::StackTrace stack_trace;
391 debug::TaskTrace task_trace;
392 uintptr_t id = 0;
393 };
394 using DanglingRawPtrBuffer =
395 std::array<std::optional<DanglingPointerFreeInfo>, 32>;
396 DanglingRawPtrBuffer g_stack_trace_buffer GUARDED_BY(g_stack_trace_buffer_lock);
397
DanglingRawPtrDetected(uintptr_t id)398 void DanglingRawPtrDetected(uintptr_t id) {
399 // This is called from inside the allocator. No allocation is allowed.
400
401 internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
402
403 #if DCHECK_IS_ON()
404 for (std::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
405 PA_DCHECK(!entry || entry->id != id);
406 }
407 #endif // DCHECK_IS_ON()
408
409 for (std::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
410 if (!entry) {
411 entry = {
412 debug::StackTrace(kDanglingPtrStackTraceSize),
413 debug::TaskTrace(),
414 id,
415 };
416 return;
417 }
418 }
419
420 // The StackTrace hasn't been recorded, because the buffer isn't large
421 // enough.
422 }
423
424 // From the traces recorded in |DanglingRawPtrDetected|, extract the one
425 // whose id match |id|. Return nullopt if not found.
TakeDanglingPointerFreeInfo(uintptr_t id)426 std::optional<DanglingPointerFreeInfo> TakeDanglingPointerFreeInfo(
427 uintptr_t id) {
428 internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
429 for (std::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
430 if (entry && entry->id == id) {
431 std::optional<DanglingPointerFreeInfo> result(entry);
432 entry = std::nullopt;
433 return result;
434 }
435 }
436 return std::nullopt;
437 }
438
439 // Extract from the StackTrace output, the signature of the pertinent caller.
440 // This function is meant to be used only by Chromium developers, to list what
441 // are all the dangling raw_ptr occurrences in a table.
ExtractDanglingPtrSignature(std::string stacktrace)442 std::string ExtractDanglingPtrSignature(std::string stacktrace) {
443 std::vector<std::string_view> lines = SplitStringPiece(
444 stacktrace, "\r\n", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
445
446 // We are looking for the callers of the function releasing the raw_ptr and
447 // freeing memory. This lists potential matching patterns. A pattern is a list
448 // of substrings that are all required to match.
449 const std::vector<std::string_view> callee_patterns[] = {
450 // Common signature patters:
451 {"internal::PartitionFree"},
452 {"base::", "::FreeFn"},
453 {"internal::RawPtrBackupRefImpl", "::ReleaseInternal"},
454
455 // Linux specific:
456 {"base::RefCountedThreadSafe<>::Release"},
457
458 // Windows specific:
459 {"_free_base"},
460
461 // Task traces are prefixed with "Task trace:" in
462 // |TaskTrace::OutputToStream|
463 {"Task trace:"},
464 };
465 size_t caller_index = 0;
466 for (size_t i = 0; i < lines.size(); ++i) {
467 for (const auto& patterns : callee_patterns) {
468 if (ranges::all_of(patterns, [&](std::string_view pattern) {
469 return lines[i].find(pattern) != std::string_view::npos;
470 })) {
471 caller_index = i + 1;
472 }
473 }
474 }
475 if (caller_index >= lines.size()) {
476 return "no_callee_match";
477 }
478 std::string_view caller = lines[caller_index];
479
480 if (caller.empty()) {
481 return "invalid_format";
482 }
483
484 // On Posix platforms |callers| follows the following format:
485 //
486 // #<index> <address> <symbol>
487 //
488 // See https://crsrc.org/c/base/debug/stack_trace_posix.cc
489 if (caller[0] == '#') {
490 const size_t address_start = caller.find(' ');
491 const size_t function_start = caller.find(' ', address_start + 1);
492
493 if (address_start == caller.npos || function_start == caller.npos) {
494 return "invalid_format";
495 }
496
497 return std::string(caller.substr(function_start + 1));
498 }
499
500 // On Windows platforms |callers| follows the following format:
501 //
502 // \t<symbol> [0x<address>]+<displacement>(<filename>:<line>)
503 //
504 // See https://crsrc.org/c/base/debug/stack_trace_win.cc
505 if (caller[0] == '\t') {
506 const size_t symbol_start = 1;
507 const size_t symbol_end = caller.find(' ');
508 if (symbol_end == caller.npos) {
509 return "invalid_format";
510 }
511 return std::string(caller.substr(symbol_start, symbol_end - symbol_start));
512 }
513
514 // On Mac platforms |callers| follows the following format:
515 //
516 // <index> <library> 0x<address> <symbol> + <line>
517 //
518 // See https://crsrc.org/c/base/debug/stack_trace_posix.cc
519 if (caller[0] >= '0' && caller[0] <= '9') {
520 const size_t address_start = caller.find("0x");
521 const size_t symbol_start = caller.find(' ', address_start + 1) + 1;
522 const size_t symbol_end = caller.find(' ', symbol_start);
523 if (symbol_start == caller.npos || symbol_end == caller.npos) {
524 return "invalid_format";
525 }
526 return std::string(caller.substr(symbol_start, symbol_end - symbol_start));
527 }
528
529 return "invalid_format";
530 }
531
ExtractDanglingPtrSignature(debug::TaskTrace task_trace)532 std::string ExtractDanglingPtrSignature(debug::TaskTrace task_trace) {
533 if (task_trace.empty()) {
534 return "No active task";
535 }
536 return ExtractDanglingPtrSignature(task_trace.ToString());
537 }
538
ExtractDanglingPtrSignature(std::optional<DanglingPointerFreeInfo> free_info,debug::StackTrace release_stack_trace,debug::TaskTrace release_task_trace)539 std::string ExtractDanglingPtrSignature(
540 std::optional<DanglingPointerFreeInfo> free_info,
541 debug::StackTrace release_stack_trace,
542 debug::TaskTrace release_task_trace) {
543 if (free_info) {
544 return StringPrintf(
545 "[DanglingSignature]\t%s\t%s\t%s\t%s",
546 ExtractDanglingPtrSignature(free_info->stack_trace.ToString()).c_str(),
547 ExtractDanglingPtrSignature(free_info->task_trace).c_str(),
548 ExtractDanglingPtrSignature(release_stack_trace.ToString()).c_str(),
549 ExtractDanglingPtrSignature(release_task_trace).c_str());
550 }
551 return StringPrintf(
552 "[DanglingSignature]\t%s\t%s\t%s\t%s", "missing", "missing",
553 ExtractDanglingPtrSignature(release_stack_trace.ToString()).c_str(),
554 ExtractDanglingPtrSignature(release_task_trace).c_str());
555 }
556
operator ==(const debug::TaskTrace & lhs,const debug::TaskTrace & rhs)557 bool operator==(const debug::TaskTrace& lhs, const debug::TaskTrace& rhs) {
558 // Compare the addresses contained in the task traces.
559 // The task traces are at most |PendingTask::kTaskBacktraceLength| long.
560 std::array<const void*, PendingTask::kTaskBacktraceLength> addresses_lhs = {};
561 std::array<const void*, PendingTask::kTaskBacktraceLength> addresses_rhs = {};
562 lhs.GetAddresses(addresses_lhs);
563 rhs.GetAddresses(addresses_rhs);
564 return addresses_lhs == addresses_rhs;
565 }
566
567 template <features::DanglingPtrMode dangling_pointer_mode,
568 features::DanglingPtrType dangling_pointer_type>
DanglingRawPtrReleased(uintptr_t id)569 void DanglingRawPtrReleased(uintptr_t id) {
570 // This is called from raw_ptr<>'s release operation. Making allocations is
571 // allowed. In particular, symbolizing and printing the StackTraces may
572 // allocate memory.
573
574 debug::StackTrace stack_trace_release(kDanglingPtrStackTraceSize);
575 debug::TaskTrace task_trace_release;
576 std::optional<DanglingPointerFreeInfo> free_info =
577 TakeDanglingPointerFreeInfo(id);
578
579 if constexpr (dangling_pointer_type ==
580 features::DanglingPtrType::kCrossTask) {
581 if (!free_info) {
582 return;
583 }
584 if (task_trace_release == free_info->task_trace) {
585 return;
586 }
587 }
588
589 std::string dangling_signature = ExtractDanglingPtrSignature(
590 free_info, stack_trace_release, task_trace_release);
591
592 {
593 // Log the full error in a single LogMessage. Printing StackTrace is
594 // expensive, so we want to avoid interleaving the output with other logs.
595 logging::LogMessage log_message(__FILE__, __LINE__, logging::LOGGING_ERROR);
596 std::ostream& error = log_message.stream();
597
598 // The dangling signature can be used by script to locate the origin of
599 // every dangling pointers.
600 error << "\n\n"
601 << ExtractDanglingPtrSignature(free_info, stack_trace_release,
602 task_trace_release)
603 << "\n\n";
604
605 error << "[DanglingPtr](1/3) A raw_ptr/raw_ref is dangling.\n\n";
606
607 auto print_traces = [](debug::StackTrace stack_trace,
608 debug::TaskTrace task_trace, std::ostream& error) {
609 error << "Stack trace:\n";
610 error << stack_trace << "\n";
611
612 // Printing "Task trace:" is implied by the TaskTrace itself.
613 if (!task_trace.empty()) {
614 error << task_trace << "\n";
615 }
616 };
617
618 error << "[DanglingPtr](2/3) ";
619 if (free_info) {
620 error << "First, the memory was freed at:\n\n";
621 print_traces(free_info->stack_trace, free_info->task_trace, error);
622 } else {
623 error << "It was not recorded where the memory was freed.\n";
624 }
625
626 error << "[DanglingPtr](3/3) Later, the dangling raw_ptr was released "
627 "at:\n\n";
628 print_traces(stack_trace_release, task_trace_release, error);
629
630 error << "Please check for more information on:\n";
631 error << "https://chromium.googlesource.com/chromium/src/+/main/docs/";
632 error << "dangling_ptr_guide.md\n";
633 error << "\n";
634 }
635
636 if constexpr (dangling_pointer_mode == features::DanglingPtrMode::kCrash) {
637 // We use `PA_IMMEDIATE_CRASH()` instead of base's ImmediateCrash() to avoid
638 // printing the raw_ptr release stack trace twice.
639 PA_IMMEDIATE_CRASH();
640 }
641 }
642
CheckDanglingRawPtrBufferEmpty()643 void CheckDanglingRawPtrBufferEmpty() {
644 internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
645
646 // TODO(crbug.com/40260713): Check for leaked refcount on Android.
647 #if BUILDFLAG(IS_ANDROID)
648 g_stack_trace_buffer = DanglingRawPtrBuffer();
649 #else
650 bool errors = false;
651 for (auto entry : g_stack_trace_buffer) {
652 if (!entry) {
653 continue;
654 }
655 errors = true;
656 LOG(ERROR) << "A freed allocation is still referenced by a dangling "
657 "pointer at exit, or at test end. Leaked raw_ptr/raw_ref "
658 "could cause PartitionAlloc's quarantine memory bloat."
659 "\n\n"
660 "Memory was released on:\n"
661 << entry->task_trace << "\n"
662 << entry->stack_trace << "\n";
663 #if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_INSTANCE_TRACER)
664 auto is_frame_ptr_not_null = [](const void* frame_ptr) {
665 return frame_ptr != nullptr;
666 };
667 std::vector<std::array<const void*, 32>> stack_traces =
668 internal::InstanceTracer::GetStackTracesForDanglingRefs(entry->id);
669 for (const auto& raw_stack_trace : stack_traces) {
670 CHECK(ranges::is_partitioned(raw_stack_trace, is_frame_ptr_not_null))
671 << "`raw_stack_trace` is expected to be partitioned: non-null values "
672 "at the begining followed by `nullptr`s.";
673 LOG(ERROR) << "Dangling reference from:\n";
674 LOG(ERROR) << debug::StackTrace(
675 // This call truncates the `nullptr` tail of the stack
676 // trace (see the `is_partitioned` CHECK above).
677 span(raw_stack_trace.begin(),
678 ranges::partition_point(raw_stack_trace,
679 is_frame_ptr_not_null)))
680 << "\n";
681 }
682 #else
683 LOG(ERROR) << "Building with enable_backup_ref_ptr_instance_tracer will "
684 "print out stack traces of any live but dangling references.";
685 #endif
686 }
687 CHECK(!errors);
688 #endif
689 }
690
691 } // namespace
692
InstallDanglingRawPtrChecks()693 void InstallDanglingRawPtrChecks() {
694 // Multiple tests can run within the same executable's execution. This line
695 // ensures problems detected from the previous test are causing error before
696 // entering the next one...
697 CheckDanglingRawPtrBufferEmpty();
698
699 // ... similarly, some allocation may stay forever in the quarantine and we
700 // might ignore them if the executable exists. This line makes sure dangling
701 // pointers errors are never ignored, by crashing at exit, as a last resort.
702 // This makes quarantine memory bloat more likely to be detected.
703 static bool first_run_in_process = true;
704 if (first_run_in_process) {
705 first_run_in_process = false;
706 AtExitManager::RegisterTask(base::BindOnce(CheckDanglingRawPtrBufferEmpty));
707 }
708
709 if (!FeatureList::IsEnabled(features::kPartitionAllocDanglingPtr)) {
710 partition_alloc::SetDanglingRawPtrDetectedFn([](uintptr_t) {});
711 partition_alloc::SetDanglingRawPtrReleasedFn([](uintptr_t) {});
712 return;
713 }
714
715 partition_alloc::SetDanglingRawPtrDetectedFn(&DanglingRawPtrDetected);
716 switch (features::kDanglingPtrModeParam.Get()) {
717 case features::DanglingPtrMode::kCrash:
718 switch (features::kDanglingPtrTypeParam.Get()) {
719 case features::DanglingPtrType::kAll:
720 partition_alloc::SetDanglingRawPtrReleasedFn(
721 &DanglingRawPtrReleased<features::DanglingPtrMode::kCrash,
722 features::DanglingPtrType::kAll>);
723 break;
724 case features::DanglingPtrType::kCrossTask:
725 partition_alloc::SetDanglingRawPtrReleasedFn(
726 &DanglingRawPtrReleased<features::DanglingPtrMode::kCrash,
727 features::DanglingPtrType::kCrossTask>);
728 break;
729 }
730 break;
731 case features::DanglingPtrMode::kLogOnly:
732 switch (features::kDanglingPtrTypeParam.Get()) {
733 case features::DanglingPtrType::kAll:
734 partition_alloc::SetDanglingRawPtrReleasedFn(
735 &DanglingRawPtrReleased<features::DanglingPtrMode::kLogOnly,
736 features::DanglingPtrType::kAll>);
737 break;
738 case features::DanglingPtrType::kCrossTask:
739 partition_alloc::SetDanglingRawPtrReleasedFn(
740 &DanglingRawPtrReleased<features::DanglingPtrMode::kLogOnly,
741 features::DanglingPtrType::kCrossTask>);
742 break;
743 }
744 break;
745 }
746 }
747
748 // TODO(arthursonzogni): There might exist long lived dangling raw_ptr. If there
749 // is a dangling pointer, we should crash at some point. Consider providing an
750 // API to periodically check the buffer.
751
752 #else // PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
InstallDanglingRawPtrChecks()753 void InstallDanglingRawPtrChecks() {}
754 #endif // PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
755
UnretainedDanglingRawPtrDetectedDumpWithoutCrashing(uintptr_t id)756 void UnretainedDanglingRawPtrDetectedDumpWithoutCrashing(uintptr_t id) {
757 PA_NO_CODE_FOLDING();
758 debug::DumpWithoutCrashing();
759 }
760
UnretainedDanglingRawPtrDetectedCrash(uintptr_t id)761 void UnretainedDanglingRawPtrDetectedCrash(uintptr_t id) {
762 static const char unretained_dangling_ptr_footer[] =
763 "\n"
764 "\n"
765 "Please check for more information on:\n"
766 "https://chromium.googlesource.com/chromium/src/+/main/docs/"
767 "unretained_dangling_ptr_guide.md\n";
768 debug::TaskTrace task_trace;
769 debug::StackTrace stack_trace;
770 LOG(FATAL) << "Detected dangling raw_ptr in unretained with id="
771 << StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
772 << task_trace << '\n'
773 << "Stack trace:\n"
774 << stack_trace << unretained_dangling_ptr_footer;
775 }
776
InstallUnretainedDanglingRawPtrChecks()777 void InstallUnretainedDanglingRawPtrChecks() {
778 if (!FeatureList::IsEnabled(features::kPartitionAllocUnretainedDanglingPtr)) {
779 partition_alloc::SetUnretainedDanglingRawPtrDetectedFn([](uintptr_t) {});
780 partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(/*enabled=*/false);
781 return;
782 }
783
784 partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(/*enabled=*/true);
785 switch (features::kUnretainedDanglingPtrModeParam.Get()) {
786 case features::UnretainedDanglingPtrMode::kCrash:
787 partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
788 &UnretainedDanglingRawPtrDetectedCrash);
789 break;
790
791 case features::UnretainedDanglingPtrMode::kDumpWithoutCrashing:
792 partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
793 &UnretainedDanglingRawPtrDetectedDumpWithoutCrashing);
794 break;
795 }
796 }
797
ReconfigurePartitionForKnownProcess(const std::string & process_type)798 void ReconfigurePartitionForKnownProcess(const std::string& process_type) {
799 DCHECK_NE(process_type, switches::kZygoteProcess);
800 // TODO(keishi): Move the code to enable BRP back here after Finch
801 // experiments.
802 }
803
MakeFreeNoOp()804 void MakeFreeNoOp() {
805 // Ignoring `free()` during Shutdown would allow developers to introduce new
806 // dangling pointers. So we want to avoid ignoring free when it is enabled.
807 // Note: For now, the DanglingPointerDetector is only enabled on 5 bots, and
808 // on linux non-official configuration.
809 #if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
810 CHECK(base::FeatureList::GetInstance());
811 if (base::FeatureList::IsEnabled(features::kPartitionAllocDanglingPtr)) {
812 return;
813 }
814 #endif // PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
815 #if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
816 allocator_shim::InsertNoOpOnFreeAllocatorShimOnShutDown();
817 #endif // PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
818 }
819
Get()820 PartitionAllocSupport* PartitionAllocSupport::Get() {
821 static auto* singleton = new PartitionAllocSupport();
822 return singleton;
823 }
824
825 PartitionAllocSupport::PartitionAllocSupport() = default;
826
ReconfigureForTests()827 void PartitionAllocSupport::ReconfigureForTests() {
828 ReconfigureEarlyish("");
829 base::AutoLock scoped_lock(lock_);
830 called_for_tests_ = true;
831 }
832
833 // static
ShouldEnableMemoryTagging(const std::string & process_type)834 bool PartitionAllocSupport::ShouldEnableMemoryTagging(
835 const std::string& process_type) {
836 // Check kPartitionAllocMemoryTagging first so the Feature is activated even
837 // when mte bootloader flag is disabled.
838 if (!base::FeatureList::IsEnabled(
839 base::features::kPartitionAllocMemoryTagging)) {
840 return false;
841 }
842 if (!base::CPU::GetInstanceNoAllocation().has_mte()) {
843 return false;
844 }
845
846 DCHECK(base::FeatureList::GetInstance());
847 if (base::FeatureList::IsEnabled(
848 base::features::kKillPartitionAllocMemoryTagging)) {
849 return false;
850 }
851 return ShouldEnableFeatureOnProcess(
852 base::features::kMemoryTaggingEnabledProcessesParam.Get(), process_type);
853 }
854
855 // static
ShouldEnableMemoryTaggingInRendererProcess()856 bool PartitionAllocSupport::ShouldEnableMemoryTaggingInRendererProcess() {
857 return ShouldEnableMemoryTagging(switches::kRendererProcess);
858 }
859
860 // static
ShouldEnablePartitionAllocWithAdvancedChecks(const std::string & process_type)861 bool PartitionAllocSupport::ShouldEnablePartitionAllocWithAdvancedChecks(
862 const std::string& process_type) {
863 #if !PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
864 return false;
865 #else
866 if (!base::FeatureList::IsEnabled(
867 base::features::kPartitionAllocWithAdvancedChecks)) {
868 return false;
869 }
870 return ShouldEnableFeatureOnProcess(
871 base::features::kPartitionAllocWithAdvancedChecksEnabledProcessesParam
872 .Get(),
873 process_type);
874 #endif // !PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
875 }
876
877 // static
878 PartitionAllocSupport::BrpConfiguration
GetBrpConfiguration(const std::string & process_type)879 PartitionAllocSupport::GetBrpConfiguration(const std::string& process_type) {
880 // TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
881 CHECK(base::FeatureList::GetInstance());
882
883 bool process_affected_by_brp_flag = false;
884 #if (PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
885 PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
886 !PA_BUILDFLAG(FORCE_DISABLE_BACKUP_REF_PTR_FEATURE)) || \
887 PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
888 if (base::FeatureList::IsEnabled(
889 base::features::kPartitionAllocBackupRefPtr)) {
890 // No specified process type means this is the Browser process.
891 process_affected_by_brp_flag = ShouldEnableFeatureOnProcess(
892 base::features::kBackupRefPtrEnabledProcessesParam.Get(), process_type);
893 }
894 #endif // (PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
895 // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)&&
896 // !PA_BUILDFLAG(FORCE_DISABLE_BACKUP_REF_PTR_FEATURE)) ||
897 // PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
898
899 const bool enable_brp =
900 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
901 PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
902 // kDisabled is equivalent to !IsEnabled(kPartitionAllocBackupRefPtr).
903 process_affected_by_brp_flag &&
904 base::features::kBackupRefPtrModeParam.Get() !=
905 base::features::BackupRefPtrMode::kDisabled;
906 #else
907 false;
908 #endif
909
910 return {
911 enable_brp,
912 process_affected_by_brp_flag,
913 };
914 }
915
ReconfigureEarlyish(const std::string & process_type)916 void PartitionAllocSupport::ReconfigureEarlyish(
917 const std::string& process_type) {
918 {
919 base::AutoLock scoped_lock(lock_);
920
921 // In tests, ReconfigureEarlyish() is called by ReconfigureForTest(), which
922 // is earlier than ContentMain().
923 if (called_for_tests_) {
924 DCHECK(called_earlyish_);
925 return;
926 }
927
928 // TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
929 CHECK(!called_earlyish_)
930 << "ReconfigureEarlyish was already called for process '"
931 << established_process_type_ << "'; current process: '" << process_type
932 << "'";
933
934 called_earlyish_ = true;
935 established_process_type_ = process_type;
936 }
937
938 if (process_type != switches::kZygoteProcess) {
939 ReconfigurePartitionForKnownProcess(process_type);
940 }
941
942 // These initializations are only relevant for PartitionAlloc-Everywhere
943 // builds.
944 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
945 allocator_shim::EnablePartitionAllocMemoryReclaimer();
946 #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
947 }
948
ReconfigureAfterZygoteFork(const std::string & process_type)949 void PartitionAllocSupport::ReconfigureAfterZygoteFork(
950 const std::string& process_type) {
951 {
952 base::AutoLock scoped_lock(lock_);
953 // TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
954 CHECK(!called_after_zygote_fork_)
955 << "ReconfigureAfterZygoteFork was already called for process '"
956 << established_process_type_ << "'; current process: '" << process_type
957 << "'";
958 DCHECK(called_earlyish_)
959 << "Attempt to call ReconfigureAfterZygoteFork without calling "
960 "ReconfigureEarlyish; current process: '"
961 << process_type << "'";
962 DCHECK_EQ(established_process_type_, switches::kZygoteProcess)
963 << "Attempt to call ReconfigureAfterZygoteFork while "
964 "ReconfigureEarlyish was called on non-zygote process '"
965 << established_process_type_ << "'; current process: '" << process_type
966 << "'";
967
968 called_after_zygote_fork_ = true;
969 established_process_type_ = process_type;
970 }
971
972 if (process_type != switches::kZygoteProcess) {
973 ReconfigurePartitionForKnownProcess(process_type);
974 }
975 }
976
ReconfigureAfterFeatureListInit(const std::string & process_type,bool configure_dangling_pointer_detector)977 void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
978 const std::string& process_type,
979 bool configure_dangling_pointer_detector) {
980 if (configure_dangling_pointer_detector) {
981 base::allocator::InstallDanglingRawPtrChecks();
982 }
983 base::allocator::InstallUnretainedDanglingRawPtrChecks();
984 {
985 base::AutoLock scoped_lock(lock_);
986 // Avoid initializing more than once.
987 if (called_after_feature_list_init_) {
988 DCHECK_EQ(established_process_type_, process_type)
989 << "ReconfigureAfterFeatureListInit was already called for process '"
990 << established_process_type_ << "'; current process: '"
991 << process_type << "'";
992 return;
993 }
994 DCHECK(called_earlyish_)
995 << "Attempt to call ReconfigureAfterFeatureListInit without calling "
996 "ReconfigureEarlyish; current process: '"
997 << process_type << "'";
998 DCHECK_NE(established_process_type_, switches::kZygoteProcess)
999 << "Attempt to call ReconfigureAfterFeatureListInit without calling "
1000 "ReconfigureAfterZygoteFork; current process: '"
1001 << process_type << "'";
1002 DCHECK_EQ(established_process_type_, process_type)
1003 << "ReconfigureAfterFeatureListInit wasn't called for an already "
1004 "established process '"
1005 << established_process_type_ << "'; current process: '" << process_type
1006 << "'";
1007
1008 called_after_feature_list_init_ = true;
1009 }
1010
1011 DCHECK_NE(process_type, switches::kZygoteProcess);
1012 [[maybe_unused]] BrpConfiguration brp_config =
1013 GetBrpConfiguration(process_type);
1014
1015 #if PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
1016 if (brp_config.process_affected_by_brp_flag) {
1017 base::RawPtrAsanService::GetInstance().Configure(
1018 base::EnableDereferenceCheck(
1019 base::features::kBackupRefPtrAsanEnableDereferenceCheckParam.Get()),
1020 base::EnableExtractionCheck(
1021 base::features::kBackupRefPtrAsanEnableExtractionCheckParam.Get()),
1022 base::EnableInstantiationCheck(
1023 base::features::kBackupRefPtrAsanEnableInstantiationCheckParam
1024 .Get()));
1025 } else {
1026 base::RawPtrAsanService::GetInstance().Configure(
1027 base::EnableDereferenceCheck(false), base::EnableExtractionCheck(false),
1028 base::EnableInstantiationCheck(false));
1029 }
1030 #endif // PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
1031
1032 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1033 auto bucket_distribution = allocator_shim::BucketDistribution::kNeutral;
1034 // No specified type means we are in the browser.
1035 switch (process_type == ""
1036 ? base::features::kPartitionAllocBucketDistributionParam.Get()
1037 : base::features::BucketDistributionMode::kDefault) {
1038 case base::features::BucketDistributionMode::kDefault:
1039 break;
1040 case base::features::BucketDistributionMode::kDenser:
1041 bucket_distribution = allocator_shim::BucketDistribution::kDenser;
1042 break;
1043 }
1044
1045 const bool scheduler_loop_quarantine = base::FeatureList::IsEnabled(
1046 base::features::kPartitionAllocSchedulerLoopQuarantine);
1047 const size_t scheduler_loop_quarantine_branch_capacity_in_bytes =
1048 static_cast<size_t>(
1049 base::features::kPartitionAllocSchedulerLoopQuarantineBranchCapacity
1050 .Get());
1051 const bool zapping_by_free_flags = base::FeatureList::IsEnabled(
1052 base::features::kPartitionAllocZappingByFreeFlags);
1053 const bool eventually_zero_freed_memory = base::FeatureList::IsEnabled(
1054 base::features::kPartitionAllocEventuallyZeroFreedMemory);
1055
1056 #if PA_BUILDFLAG(USE_FREELIST_DISPATCHER)
1057 const bool use_pool_offset_freelists =
1058 base::FeatureList::IsEnabled(base::features::kUsePoolOffsetFreelists);
1059 #else
1060 const bool use_pool_offset_freelists = false;
1061 #endif // PA_BUILDFLAG(USE_FREELIST_DISPATCHER)
1062
1063 bool enable_memory_tagging = false;
1064 partition_alloc::TagViolationReportingMode memory_tagging_reporting_mode =
1065 partition_alloc::TagViolationReportingMode::kUndefined;
1066
1067 #if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
1068 // ShouldEnableMemoryTagging() checks kKillPartitionAllocMemoryTagging but
1069 // check here too to wrap the GetMemoryTaggingModeForCurrentThread() call.
1070 if (!base::FeatureList::IsEnabled(
1071 base::features::kKillPartitionAllocMemoryTagging)) {
1072 // If synchronous mode is enabled from startup it means this is a test or it
1073 // was force enabled in Chrome some how so honor that choice.
1074 partition_alloc::TagViolationReportingMode
1075 startup_memory_tagging_reporting_mode =
1076 partition_alloc::internal::GetMemoryTaggingModeForCurrentThread();
1077 if (startup_memory_tagging_reporting_mode ==
1078 partition_alloc::TagViolationReportingMode::kSynchronous) {
1079 enable_memory_tagging = true;
1080 memory_tagging_reporting_mode =
1081 partition_alloc::TagViolationReportingMode::kSynchronous;
1082 // Not enabling permissive mode as this config is used to crash and detect
1083 // bugs.
1084 VLOG(1) << "PartitionAlloc: Memory tagging enabled in SYNC mode at "
1085 "startup (Process: "
1086 << process_type << ")";
1087 } else {
1088 enable_memory_tagging = ShouldEnableMemoryTagging(process_type);
1089 #if BUILDFLAG(IS_ANDROID)
1090 // Android Scudo does not allow MTE to be re-enabled if MTE was disabled.
1091 if (enable_memory_tagging &&
1092 startup_memory_tagging_reporting_mode ==
1093 partition_alloc::TagViolationReportingMode::kDisabled) {
1094 LOG(ERROR) << "PartitionAlloc: Failed to enable memory tagging due to "
1095 "MTE disabled at startup (Process: "
1096 << process_type << ")";
1097 debug::DumpWithoutCrashing();
1098 enable_memory_tagging = false;
1099 }
1100
1101 if (enable_memory_tagging) {
1102 // Configure MTE.
1103 switch (base::features::kMemtagModeParam.Get()) {
1104 case base::features::MemtagMode::kSync:
1105 memory_tagging_reporting_mode =
1106 partition_alloc::TagViolationReportingMode::kSynchronous;
1107 break;
1108 case base::features::MemtagMode::kAsync:
1109 memory_tagging_reporting_mode =
1110 partition_alloc::TagViolationReportingMode::kAsynchronous;
1111 break;
1112 }
1113 bool enable_permissive_mte = base::FeatureList::IsEnabled(
1114 base::features::kPartitionAllocPermissiveMte);
1115 partition_alloc::PermissiveMte::SetEnabled(enable_permissive_mte);
1116 CHECK(partition_alloc::internal::
1117 ChangeMemoryTaggingModeForAllThreadsPerProcess(
1118 memory_tagging_reporting_mode));
1119 CHECK_EQ(
1120 partition_alloc::internal::GetMemoryTaggingModeForCurrentThread(),
1121 memory_tagging_reporting_mode);
1122 VLOG(1)
1123 << "PartitionAlloc: Memory tagging enabled in "
1124 << (memory_tagging_reporting_mode ==
1125 partition_alloc::TagViolationReportingMode::kSynchronous
1126 ? "SYNC"
1127 : "ASYNC")
1128 << " mode (Process: " << process_type << ")";
1129 if (enable_permissive_mte) {
1130 VLOG(1) << "PartitionAlloc: Permissive MTE enabled (Process: "
1131 << process_type << ")";
1132 }
1133 } else if (base::CPU::GetInstanceNoAllocation().has_mte()) {
1134 // Disable MTE.
1135 memory_tagging_reporting_mode =
1136 partition_alloc::TagViolationReportingMode::kDisabled;
1137 CHECK(partition_alloc::internal::
1138 ChangeMemoryTaggingModeForAllThreadsPerProcess(
1139 memory_tagging_reporting_mode));
1140 CHECK_EQ(
1141 partition_alloc::internal::GetMemoryTaggingModeForCurrentThread(),
1142 memory_tagging_reporting_mode);
1143 VLOG(1) << "PartitionAlloc: Memory tagging disabled (Process: "
1144 << process_type << ")";
1145 }
1146 #endif // BUILDFLAG(IS_ANDROID)
1147 }
1148 }
1149 #endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
1150
1151 allocator_shim::UseSmallSingleSlotSpans use_small_single_slot_spans(
1152 base::FeatureList::IsEnabled(
1153 features::kPartitionAllocUseSmallSingleSlotSpans));
1154
1155 allocator_shim::ConfigurePartitions(
1156 allocator_shim::EnableBrp(brp_config.enable_brp),
1157 allocator_shim::EnableMemoryTagging(enable_memory_tagging),
1158 memory_tagging_reporting_mode, bucket_distribution,
1159 allocator_shim::SchedulerLoopQuarantine(scheduler_loop_quarantine),
1160 scheduler_loop_quarantine_branch_capacity_in_bytes,
1161 allocator_shim::ZappingByFreeFlags(zapping_by_free_flags),
1162 allocator_shim::EventuallyZeroFreedMemory(eventually_zero_freed_memory),
1163 allocator_shim::UsePoolOffsetFreelists(use_pool_offset_freelists),
1164 use_small_single_slot_spans);
1165
1166 const uint32_t extras_size = allocator_shim::GetMainPartitionRootExtrasSize();
1167 // As per description, extras are optional and are expected not to
1168 // exceed (cookie + max(BRP ref-count)) == 16 + 16 == 32 bytes.
1169 // 100 is a reasonable cap for this value.
1170 UmaHistogramCounts100("Memory.PartitionAlloc.PartitionRoot.ExtrasSize",
1171 int(extras_size));
1172
1173 partition_alloc::internal::StackTopRegistry::Get().NotifyThreadCreated(
1174 partition_alloc::internal::GetStackTop());
1175
1176 allocator_shim::internal::PartitionAllocMalloc::Allocator()
1177 ->EnableThreadCacheIfSupported();
1178
1179 if (base::FeatureList::IsEnabled(
1180 base::features::kPartitionAllocLargeEmptySlotSpanRing)) {
1181 allocator_shim::internal::PartitionAllocMalloc::Allocator()
1182 ->EnableLargeEmptySlotSpanRing();
1183 }
1184
1185 #if PA_BUILDFLAG( \
1186 ENABLE_ALLOCATOR_SHIM_PARTITION_ALLOC_DISPATCH_WITH_ADVANCED_CHECKS_SUPPORT)
1187 bool enable_pa_with_advanced_checks =
1188 ShouldEnablePartitionAllocWithAdvancedChecks(process_type);
1189 if (enable_pa_with_advanced_checks) {
1190 allocator_shim::InstallCustomDispatchForPartitionAllocWithAdvancedChecks();
1191 }
1192 #endif // PA_BUILDFLAG(
1193 // ENABLE_ALLOCATOR_SHIM_PARTITION_ALLOC_DISPATCH_WITH_ADVANCED_CHECKS_SUPPORT)
1194 #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1195
1196 #if BUILDFLAG(IS_WIN)
1197 // Browser process only, since this is the one we want to prevent from
1198 // crashing the most (as it takes down all the tabs).
1199 if (base::FeatureList::IsEnabled(
1200 base::features::kPageAllocatorRetryOnCommitFailure) &&
1201 process_type.empty()) {
1202 partition_alloc::SetRetryOnCommitFailure(true);
1203 }
1204 #endif
1205 }
1206
ReconfigureAfterTaskRunnerInit(const std::string & process_type)1207 void PartitionAllocSupport::ReconfigureAfterTaskRunnerInit(
1208 const std::string& process_type) {
1209 {
1210 base::AutoLock scoped_lock(lock_);
1211
1212 // Init only once.
1213 if (called_after_thread_pool_init_) {
1214 return;
1215 }
1216
1217 DCHECK_EQ(established_process_type_, process_type);
1218 // Enforce ordering.
1219 DCHECK(called_earlyish_);
1220 DCHECK(called_after_feature_list_init_);
1221
1222 called_after_thread_pool_init_ = true;
1223 }
1224
1225 #if PA_CONFIG(THREAD_CACHE_SUPPORTED) && \
1226 PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1227 // This should be called in specific processes, as the main thread is
1228 // initialized later.
1229 DCHECK(process_type != switches::kZygoteProcess);
1230
1231 partition_alloc::ThreadCacheRegistry::Instance().SetPurgingConfiguration(
1232 base::features::GetThreadCacheMinPurgeInterval(),
1233 base::features::GetThreadCacheMaxPurgeInterval(),
1234 base::features::GetThreadCacheDefaultPurgeInterval(),
1235 size_t(base::features::GetThreadCacheMinCachedMemoryForPurgingBytes()));
1236
1237 base::allocator::StartThreadCachePeriodicPurge();
1238
1239 if (base::FeatureList::IsEnabled(
1240 base::features::kEnableConfigurableThreadCacheMultiplier)) {
1241 // If kEnableConfigurableThreadCacheMultiplier is enabled, override the
1242 // multiplier value with the corresponding feature param.
1243 #if BUILDFLAG(IS_ANDROID)
1244 ::partition_alloc::ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
1245 base::features::GetThreadCacheMultiplierForAndroid());
1246 #else // BUILDFLAG(IS_ANDROID)
1247 ::partition_alloc::ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
1248 base::features::GetThreadCacheMultiplier());
1249 #endif // BUILDFLAG(IS_ANDROID)
1250 } else {
1251 #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
1252 // If kEnableConfigurableThreadCacheMultiplier is not enabled, lower
1253 // thread cache limits on Android low end device to avoid stranding too much
1254 // memory in the caches.
1255 if (SysInfo::IsLowEndDeviceOrPartialLowEndModeEnabled(
1256 features::kPartialLowEndModeExcludePartitionAllocSupport)) {
1257 ::partition_alloc::ThreadCacheRegistry::Instance()
1258 .SetThreadCacheMultiplier(
1259 ::partition_alloc::ThreadCache::kDefaultMultiplier / 2.);
1260 }
1261 #endif // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
1262 }
1263
1264 // Renderer processes are more performance-sensitive, increase thread cache
1265 // limits.
1266 if (process_type == switches::kRendererProcess &&
1267 base::FeatureList::IsEnabled(
1268 base::features::kPartitionAllocLargeThreadCacheSize)) {
1269 largest_cached_size_ =
1270 size_t(base::features::GetPartitionAllocLargeThreadCacheSizeValue());
1271
1272 #if BUILDFLAG(IS_ANDROID)
1273 // Use appropriately lower amount for Android devices with 3GB or less.
1274 // Devices almost always report less physical memory than what they actually
1275 // have, so use 3.2GB (a threshold commonly uses throughout code) to avoid
1276 // accidentally catching devices advertised as 4GB.
1277 if (base::SysInfo::AmountOfPhysicalMemoryMB() < 3.2 * 1024) {
1278 largest_cached_size_ = size_t(
1279 base::features::
1280 GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid());
1281 }
1282 #endif // BUILDFLAG(IS_ANDROID)
1283
1284 ::partition_alloc::ThreadCache::SetLargestCachedSize(largest_cached_size_);
1285 }
1286 #endif // PA_CONFIG(THREAD_CACHE_SUPPORTED) &&
1287 // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1288
1289 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1290 base::allocator::StartMemoryReclaimer(
1291 base::SingleThreadTaskRunner::GetCurrentDefault());
1292 #endif
1293
1294 partition_alloc::PartitionRoot::SetStraightenLargerSlotSpanFreeListsMode(
1295 base::FeatureList::IsEnabled(
1296 base::features::kPartitionAllocStraightenLargerSlotSpanFreeLists)
1297 ? features::kPartitionAllocStraightenLargerSlotSpanFreeListsMode.Get()
1298 : partition_alloc::StraightenLargerSlotSpanFreeListsMode::kNever);
1299 partition_alloc::PartitionRoot::SetSortSmallerSlotSpanFreeListsEnabled(
1300 base::FeatureList::IsEnabled(
1301 base::features::kPartitionAllocSortSmallerSlotSpanFreeLists));
1302 partition_alloc::PartitionRoot::SetSortActiveSlotSpansEnabled(
1303 base::FeatureList::IsEnabled(
1304 base::features::kPartitionAllocSortActiveSlotSpans));
1305
1306 #if PA_CONFIG(ENABLE_SHADOW_METADATA)
1307 if (ShouldEnableShadowMetadata(process_type)) {
1308 partition_alloc::PartitionRoot::EnableShadowMetadata(
1309 partition_alloc::internal::PoolHandleMask::kRegular |
1310 partition_alloc::internal::PoolHandleMask::kBRP);
1311 }
1312 #endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
1313 }
1314
OnForegrounded(bool has_main_frame)1315 void PartitionAllocSupport::OnForegrounded(bool has_main_frame) {
1316 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1317 // Other changes are renderer-only, not this one.
1318 MemoryReclaimerSupport::Instance().SetForegrounded(true);
1319
1320 {
1321 base::AutoLock scoped_lock(lock_);
1322 if (established_process_type_ != switches::kRendererProcess) {
1323 return;
1324 }
1325 }
1326 #if PA_CONFIG(THREAD_CACHE_SUPPORTED)
1327 if (!base::FeatureList::IsEnabled(
1328 features::kLowerPAMemoryLimitForNonMainRenderers) ||
1329 has_main_frame) {
1330 ::partition_alloc::ThreadCache::SetLargestCachedSize(largest_cached_size_);
1331 }
1332 #endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)
1333 if (base::FeatureList::IsEnabled(
1334 features::kPartitionAllocAdjustSizeWhenInForeground)) {
1335 allocator_shim::AdjustDefaultAllocatorForForeground();
1336 }
1337 #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1338 }
1339
OnBackgrounded()1340 void PartitionAllocSupport::OnBackgrounded() {
1341 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1342 // Other changes are renderer-only, not this one.
1343 MemoryReclaimerSupport::Instance().SetForegrounded(false);
1344
1345 {
1346 base::AutoLock scoped_lock(lock_);
1347 if (established_process_type_ != switches::kRendererProcess) {
1348 return;
1349 }
1350 }
1351 #if PA_CONFIG(THREAD_CACHE_SUPPORTED)
1352 // Performance matters less for background renderers, don't pay the memory
1353 // cost.
1354 ::partition_alloc::ThreadCache::SetLargestCachedSize(
1355 ::partition_alloc::kThreadCacheDefaultSizeThreshold);
1356
1357 // In renderers, memory reclaim uses the "idle time" task runner to run
1358 // periodic reclaim. This does not always run when the renderer is idle, and
1359 // in particular after the renderer gets backgrounded. As a result, empty slot
1360 // spans are potentially never decommitted. To mitigate that, run a one-off
1361 // reclaim a few seconds later. Even if the renderer comes back to foreground
1362 // in the meantime, the worst case is a few more system calls.
1363 //
1364 // TODO(lizeb): Remove once/if the behavior of idle tasks changes.
1365 base::PostDelayedMemoryReductionTask(
1366 base::SingleThreadTaskRunner::GetCurrentDefault(), FROM_HERE,
1367 base::BindOnce(
1368 [] { ::partition_alloc::MemoryReclaimer::Instance()->ReclaimAll(); }),
1369 base::Seconds(10));
1370
1371 #endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)
1372 if (base::FeatureList::IsEnabled(
1373 features::kPartitionAllocAdjustSizeWhenInForeground)) {
1374 allocator_shim::AdjustDefaultAllocatorForBackground();
1375 }
1376 #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1377 }
1378
1379 #if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
ExtractDanglingPtrSignatureForTests(std::string stacktrace)1380 std::string PartitionAllocSupport::ExtractDanglingPtrSignatureForTests(
1381 std::string stacktrace) {
1382 return ExtractDanglingPtrSignature(stacktrace);
1383 }
1384 #endif
1385
1386 } // namespace base::allocator
1387