1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
6
7 #include <atomic>
8 #include <cstddef>
9 #include <map>
10 #include <string>
11 #include <tuple>
12
13 #include "base/allocator/partition_alloc_features.h"
14 #include "base/allocator/partition_allocator/allocation_guard.h"
15 #include "base/allocator/partition_allocator/memory_reclaimer.h"
16 #include "base/allocator/partition_allocator/partition_alloc.h"
17 #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
18 #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
19 #include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
20 #include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h"
21 #include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
22 #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
23 #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
24 #include "base/allocator/partition_allocator/partition_alloc_check.h"
25 #include "base/allocator/partition_allocator/partition_alloc_constants.h"
26 #include "base/allocator/partition_allocator/partition_root.h"
27 #include "base/allocator/partition_allocator/partition_stats.h"
28 #include "base/allocator/partition_allocator/shim/allocator_shim_internals.h"
29 #include "base/memory/nonscannable_memory.h"
30 #include "base/threading/platform_thread.h"
31 #include "build/build_config.h"
32 #include "build/chromecast_buildflags.h"
33
34 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
35 #include <malloc.h>
36 #endif
37
38 using allocator_shim::AllocatorDispatch;
39
40 namespace {
41
42 class SimpleScopedSpinLocker {
43 public:
SimpleScopedSpinLocker(std::atomic<bool> & lock)44 explicit SimpleScopedSpinLocker(std::atomic<bool>& lock) : lock_(lock) {
45 // Lock. Semantically equivalent to base::Lock::Acquire().
46 bool expected = false;
47 // Weak CAS since we are in a retry loop, relaxed ordering for failure since
48 // in this case we don't imply any ordering.
49 //
50 // This matches partition_allocator/spinning_mutex.h fast path on Linux.
51 while (!lock_.compare_exchange_weak(
52 expected, true, std::memory_order_acquire, std::memory_order_relaxed)) {
53 expected = false;
54 }
55 }
56
~SimpleScopedSpinLocker()57 ~SimpleScopedSpinLocker() { lock_.store(false, std::memory_order_release); }
58
59 private:
60 std::atomic<bool>& lock_;
61 };
62
63 // We can't use a "static local" or a base::LazyInstance, as:
64 // - static local variables call into the runtime on Windows, which is not
65 // prepared to handle it, as the first allocation happens during CRT init.
66 // - We don't want to depend on base::LazyInstance, which may be converted to
67 // static locals one day.
68 //
69 // Nevertheless, this provides essentially the same thing.
70 template <typename T, typename Constructor>
71 class LeakySingleton {
72 public:
73 constexpr LeakySingleton() = default;
74
Get()75 PA_ALWAYS_INLINE T* Get() {
76 auto* instance = instance_.load(std::memory_order_acquire);
77 if (PA_LIKELY(instance))
78 return instance;
79
80 return GetSlowPath();
81 }
82
83 // Replaces the instance pointer with a new one.
Replace(T * new_instance)84 void Replace(T* new_instance) {
85 SimpleScopedSpinLocker scoped_lock{initialization_lock_};
86
87 // Modify under the lock to avoid race between |if (instance)| and
88 // |instance_.store()| in GetSlowPath().
89 instance_.store(new_instance, std::memory_order_release);
90 }
91
92 private:
93 T* GetSlowPath();
94
95 std::atomic<T*> instance_;
96 // Before C++20, having an initializer here causes a "variable does not have a
97 // constant initializer" error. In C++20, omitting it causes a similar error.
98 // Presumably this is due to the C++20 changes to make atomic initialization
99 // (of the other members of this class) sane, so guarding under that
100 // feature-test.
101 #if !defined(__cpp_lib_atomic_value_initialization) || \
102 __cpp_lib_atomic_value_initialization < 201911L
103 alignas(T) uint8_t instance_buffer_[sizeof(T)];
104 #else
105 alignas(T) uint8_t instance_buffer_[sizeof(T)] = {0};
106 #endif
107 std::atomic<bool> initialization_lock_;
108 };
109
110 template <typename T, typename Constructor>
GetSlowPath()111 T* LeakySingleton<T, Constructor>::GetSlowPath() {
112 // The instance has not been set, the proper way to proceed (correct
113 // double-checked locking) is:
114 //
115 // auto* instance = instance_.load(std::memory_order_acquire);
116 // if (!instance) {
117 // ScopedLock initialization_lock;
118 // root = instance_.load(std::memory_order_relaxed);
119 // if (root)
120 // return root;
121 // instance = Create new root;
122 // instance_.store(instance, std::memory_order_release);
123 // return instance;
124 // }
125 //
126 // However, we don't want to use a base::Lock here, so instead we use
127 // compare-and-exchange on a lock variable, which provides the same
128 // guarantees.
129 SimpleScopedSpinLocker scoped_lock{initialization_lock_};
130
131 T* instance = instance_.load(std::memory_order_relaxed);
132 // Someone beat us.
133 if (instance)
134 return instance;
135
136 instance = Constructor::New(reinterpret_cast<void*>(instance_buffer_));
137 instance_.store(instance, std::memory_order_release);
138
139 return instance;
140 }
141
142 class MainPartitionConstructor {
143 public:
New(void * buffer)144 static partition_alloc::ThreadSafePartitionRoot* New(void* buffer) {
145 constexpr partition_alloc::PartitionOptions::ThreadCache thread_cache =
146 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
147 // Additional partitions may be created in ConfigurePartitions(). Since
148 // only one partition can have thread cache enabled, postpone the
149 // decision to turn the thread cache on until after that call.
150 // TODO(bartekn): Enable it here by default, once the "split-only" mode
151 // is no longer needed.
152 partition_alloc::PartitionOptions::ThreadCache::kDisabled;
153 #else // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
154 // Other tests, such as the ThreadCache tests create a thread cache,
155 // and only one is supported at a time.
156 partition_alloc::PartitionOptions::ThreadCache::kDisabled;
157 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
158 auto* new_root = new (buffer) partition_alloc::ThreadSafePartitionRoot({
159 partition_alloc::PartitionOptions::AlignedAlloc::kAllowed,
160 thread_cache,
161 partition_alloc::PartitionOptions::Quarantine::kAllowed,
162 partition_alloc::PartitionOptions::Cookie::kAllowed,
163 partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
164 partition_alloc::PartitionOptions::BackupRefPtrZapping::kDisabled,
165 partition_alloc::PartitionOptions::UseConfigurablePool::kNo,
166 });
167
168 return new_root;
169 }
170 };
171
172 LeakySingleton<partition_alloc::ThreadSafePartitionRoot,
173 MainPartitionConstructor>
174 g_root PA_CONSTINIT = {};
Allocator()175 partition_alloc::ThreadSafePartitionRoot* Allocator() {
176 return g_root.Get();
177 }
178
179 // Original g_root_ if it was replaced by ConfigurePartitions().
180 std::atomic<partition_alloc::ThreadSafePartitionRoot*> g_original_root(nullptr);
181
182 class AlignedPartitionConstructor {
183 public:
New(void * buffer)184 static partition_alloc::ThreadSafePartitionRoot* New(void* buffer) {
185 return g_root.Get();
186 }
187 };
188
189 LeakySingleton<partition_alloc::ThreadSafePartitionRoot,
190 AlignedPartitionConstructor>
191 g_aligned_root PA_CONSTINIT = {};
192
OriginalAllocator()193 partition_alloc::ThreadSafePartitionRoot* OriginalAllocator() {
194 return g_original_root.load(std::memory_order_relaxed);
195 }
196
AlignedAllocator()197 partition_alloc::ThreadSafePartitionRoot* AlignedAllocator() {
198 return g_aligned_root.Get();
199 }
200
AllocateAlignedMemory(size_t alignment,size_t size)201 void* AllocateAlignedMemory(size_t alignment, size_t size) {
202 // Memory returned by the regular allocator *always* respects |kAlignment|,
203 // which is a power of two, and any valid alignment is also a power of two. So
204 // we can directly fulfill these requests with the main allocator.
205 //
206 // This has several advantages:
207 // - The thread cache is supported on the main partition
208 // - Reduced fragmentation
209 // - Better coverage for MiraclePtr variants requiring extras
210 //
211 // There are several call sites in Chromium where base::AlignedAlloc is called
212 // with a small alignment. Some may be due to overly-careful code, some are
213 // because the client code doesn't know the required alignment at compile
214 // time.
215 //
216 // Note that all "AlignedFree()" variants (_aligned_free() on Windows for
217 // instance) directly call PartitionFree(), so there is no risk of
218 // mismatch. (see below the default_dispatch definition).
219 if (alignment <= partition_alloc::internal::kAlignment) {
220 // This is mandated by |posix_memalign()| and friends, so should never fire.
221 PA_CHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(alignment));
222 // TODO(bartekn): See if the compiler optimizes branches down the stack on
223 // Mac, where PartitionPageSize() isn't constexpr.
224 return Allocator()->AllocWithFlagsNoHooks(
225 0, size, partition_alloc::PartitionPageSize());
226 }
227
228 return AlignedAllocator()->AlignedAllocWithFlags(
229 partition_alloc::AllocFlags::kNoHooks, alignment, size);
230 }
231
232 } // namespace
233
234 namespace allocator_shim::internal {
235
236 namespace {
237 #if BUILDFLAG(IS_APPLE)
238 unsigned int g_alloc_flags = 0;
239 #else
240 constexpr unsigned int g_alloc_flags = 0;
241 #endif
242 } // namespace
243
PartitionAllocSetCallNewHandlerOnMallocFailure(bool value)244 void PartitionAllocSetCallNewHandlerOnMallocFailure(bool value) {
245 #if BUILDFLAG(IS_APPLE)
246 // We generally prefer to always crash rather than returning nullptr for
247 // OOM. However, on some macOS releases, we have to locally allow it due to
248 // weirdness in OS code. See https://crbug.com/654695 for details.
249 //
250 // Apple only since it's not needed elsewhere, and there is a performance
251 // penalty.
252
253 if (value)
254 g_alloc_flags = 0;
255 else
256 g_alloc_flags = partition_alloc::AllocFlags::kReturnNull;
257 #endif
258 }
259
PartitionMalloc(const AllocatorDispatch *,size_t size,void * context)260 void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
261 partition_alloc::ScopedDisallowAllocations guard{};
262 return Allocator()->AllocWithFlagsNoHooks(
263 g_alloc_flags, size, partition_alloc::PartitionPageSize());
264 }
265
PartitionMallocUnchecked(const AllocatorDispatch *,size_t size,void * context)266 void* PartitionMallocUnchecked(const AllocatorDispatch*,
267 size_t size,
268 void* context) {
269 partition_alloc::ScopedDisallowAllocations guard{};
270 return Allocator()->AllocWithFlagsNoHooks(
271 partition_alloc::AllocFlags::kReturnNull | g_alloc_flags, size,
272 partition_alloc::PartitionPageSize());
273 }
274
PartitionCalloc(const AllocatorDispatch *,size_t n,size_t size,void * context)275 void* PartitionCalloc(const AllocatorDispatch*,
276 size_t n,
277 size_t size,
278 void* context) {
279 partition_alloc::ScopedDisallowAllocations guard{};
280 const size_t total =
281 partition_alloc::internal::base::CheckMul(n, size).ValueOrDie();
282 return Allocator()->AllocWithFlagsNoHooks(
283 partition_alloc::AllocFlags::kZeroFill | g_alloc_flags, total,
284 partition_alloc::PartitionPageSize());
285 }
286
PartitionMemalign(const AllocatorDispatch *,size_t alignment,size_t size,void * context)287 void* PartitionMemalign(const AllocatorDispatch*,
288 size_t alignment,
289 size_t size,
290 void* context) {
291 partition_alloc::ScopedDisallowAllocations guard{};
292 return AllocateAlignedMemory(alignment, size);
293 }
294
PartitionAlignedAlloc(const AllocatorDispatch * dispatch,size_t size,size_t alignment,void * context)295 void* PartitionAlignedAlloc(const AllocatorDispatch* dispatch,
296 size_t size,
297 size_t alignment,
298 void* context) {
299 partition_alloc::ScopedDisallowAllocations guard{};
300 return AllocateAlignedMemory(alignment, size);
301 }
302
303 // aligned_realloc documentation is
304 // https://docs.microsoft.com/ja-jp/cpp/c-runtime-library/reference/aligned-realloc
305 // TODO(tasak): Expand the given memory block to the given size if possible.
306 // This realloc always free the original memory block and allocates a new memory
307 // block.
308 // TODO(tasak): Implement PartitionRoot<thread_safe>::AlignedReallocWithFlags
309 // and use it.
PartitionAlignedRealloc(const AllocatorDispatch * dispatch,void * address,size_t size,size_t alignment,void * context)310 void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
311 void* address,
312 size_t size,
313 size_t alignment,
314 void* context) {
315 partition_alloc::ScopedDisallowAllocations guard{};
316 void* new_ptr = nullptr;
317 if (size > 0) {
318 new_ptr = AllocateAlignedMemory(alignment, size);
319 } else {
320 // size == 0 and address != null means just "free(address)".
321 if (address)
322 partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(address);
323 }
324 // The original memory block (specified by address) is unchanged if ENOMEM.
325 if (!new_ptr)
326 return nullptr;
327 // TODO(tasak): Need to compare the new alignment with the address' alignment.
328 // If the two alignments are not the same, need to return nullptr with EINVAL.
329 if (address) {
330 size_t usage =
331 partition_alloc::ThreadSafePartitionRoot::GetUsableSize(address);
332 size_t copy_size = usage > size ? size : usage;
333 memcpy(new_ptr, address, copy_size);
334
335 partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(address);
336 }
337 return new_ptr;
338 }
339
PartitionRealloc(const AllocatorDispatch *,void * address,size_t size,void * context)340 void* PartitionRealloc(const AllocatorDispatch*,
341 void* address,
342 size_t size,
343 void* context) {
344 partition_alloc::ScopedDisallowAllocations guard{};
345 #if BUILDFLAG(IS_APPLE)
346 if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
347 reinterpret_cast<uintptr_t>(address)) &&
348 address)) {
349 // A memory region allocated by the system allocator is passed in this
350 // function. Forward the request to `realloc` which supports zone-
351 // dispatching so that it appropriately selects the right zone.
352 return realloc(address, size);
353 }
354 #endif // BUILDFLAG(IS_APPLE)
355
356 return Allocator()->ReallocWithFlags(
357 partition_alloc::AllocFlags::kNoHooks | g_alloc_flags, address, size, "");
358 }
359
360 #if BUILDFLAG(IS_CAST_ANDROID)
361 extern "C" {
362 void __real_free(void*);
363 } // extern "C"
364 #endif // BUILDFLAG(IS_CAST_ANDROID)
365
PartitionFree(const AllocatorDispatch *,void * object,void * context)366 void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
367 partition_alloc::ScopedDisallowAllocations guard{};
368 #if BUILDFLAG(IS_APPLE)
369 // TODO(bartekn): Add MTE unmasking here (and below).
370 if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
371 reinterpret_cast<uintptr_t>(object)) &&
372 object)) {
373 // A memory region allocated by the system allocator is passed in this
374 // function. Forward the request to `free` which supports zone-
375 // dispatching so that it appropriately selects the right zone.
376 return free(object);
377 }
378 #endif // BUILDFLAG(IS_APPLE)
379
380 // On Android Chromecast devices, there is at least one case where a system
381 // malloc() pointer can be passed to PartitionAlloc's free(). If we don't own
382 // the pointer, pass it along. This should not have a runtime cost vs regular
383 // Android, since on Android we have a PA_CHECK() rather than the branch here.
384 #if BUILDFLAG(IS_CAST_ANDROID)
385 if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
386 reinterpret_cast<uintptr_t>(object)) &&
387 object)) {
388 // A memory region allocated by the system allocator is passed in this
389 // function. Forward the request to `free()`, which is `__real_free()`
390 // here.
391 return __real_free(object);
392 }
393 #endif // BUILDFLAG(IS_CAST_ANDROID)
394
395 partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(object);
396 }
397
398 #if BUILDFLAG(IS_APPLE)
399 // Normal free() path on Apple OSes:
400 // 1. size = GetSizeEstimate(ptr);
401 // 2. if (size) FreeDefiniteSize(ptr, size)
402 //
403 // So we don't need to re-check that the pointer is owned in Free(), and we
404 // can use the size.
PartitionFreeDefiniteSize(const AllocatorDispatch *,void * address,size_t size,void * context)405 void PartitionFreeDefiniteSize(const AllocatorDispatch*,
406 void* address,
407 size_t size,
408 void* context) {
409 partition_alloc::ScopedDisallowAllocations guard{};
410 // TODO(lizeb): Optimize PartitionAlloc to use the size information. This is
411 // still useful though, as we avoid double-checking that the address is owned.
412 partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(address);
413 }
414 #endif // BUILDFLAG(IS_APPLE)
415
PartitionGetSizeEstimate(const AllocatorDispatch *,void * address,void * context)416 size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
417 void* address,
418 void* context) {
419 // This is used to implement malloc_usable_size(3). Per its man page, "if ptr
420 // is NULL, 0 is returned".
421 if (!address)
422 return 0;
423
424 #if BUILDFLAG(IS_APPLE)
425 if (!partition_alloc::IsManagedByPartitionAlloc(
426 reinterpret_cast<uintptr_t>(address))) {
427 // The object pointed to by `address` is not allocated by the
428 // PartitionAlloc. The return value `0` means that the pointer does not
429 // belong to this malloc zone.
430 return 0;
431 }
432 #endif // BUILDFLAG(IS_APPLE)
433
434 // TODO(lizeb): Returns incorrect values for aligned allocations.
435 const size_t size = partition_alloc::ThreadSafePartitionRoot::
436 GetUsableSizeWithMac11MallocSizeHack(address);
437 #if BUILDFLAG(IS_APPLE)
438 // The object pointed to by `address` is allocated by the PartitionAlloc.
439 // So, this function must not return zero so that the malloc zone dispatcher
440 // finds the appropriate malloc zone.
441 PA_DCHECK(size);
442 #endif // BUILDFLAG(IS_APPLE)
443 return size;
444 }
445
446 #if BUILDFLAG(IS_APPLE)
PartitionClaimedAddress(const AllocatorDispatch *,void * address,void * context)447 bool PartitionClaimedAddress(const AllocatorDispatch*,
448 void* address,
449 void* context) {
450 return partition_alloc::IsManagedByPartitionAlloc(
451 reinterpret_cast<uintptr_t>(address));
452 }
453 #endif // BUILDFLAG(IS_APPLE)
454
PartitionBatchMalloc(const AllocatorDispatch *,size_t size,void ** results,unsigned num_requested,void * context)455 unsigned PartitionBatchMalloc(const AllocatorDispatch*,
456 size_t size,
457 void** results,
458 unsigned num_requested,
459 void* context) {
460 // No real batching: we could only acquire the lock once for instance, keep it
461 // simple for now.
462 for (unsigned i = 0; i < num_requested; i++) {
463 // No need to check the results, we crash if it fails.
464 results[i] = PartitionMalloc(nullptr, size, nullptr);
465 }
466
467 // Either all succeeded, or we crashed.
468 return num_requested;
469 }
470
PartitionBatchFree(const AllocatorDispatch *,void ** to_be_freed,unsigned num_to_be_freed,void * context)471 void PartitionBatchFree(const AllocatorDispatch*,
472 void** to_be_freed,
473 unsigned num_to_be_freed,
474 void* context) {
475 // No real batching: we could only acquire the lock once for instance, keep it
476 // simple for now.
477 for (unsigned i = 0; i < num_to_be_freed; i++) {
478 PartitionFree(nullptr, to_be_freed[i], nullptr);
479 }
480 }
481
482 #if BUILDFLAG(IS_APPLE)
PartitionTryFreeDefault(const AllocatorDispatch *,void * address,void * context)483 void PartitionTryFreeDefault(const AllocatorDispatch*,
484 void* address,
485 void* context) {
486 partition_alloc::ScopedDisallowAllocations guard{};
487
488 if (UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
489 reinterpret_cast<uintptr_t>(address)))) {
490 // The object pointed to by `address` is not allocated by the
491 // PartitionAlloc. Call find_zone_and_free.
492 return allocator_shim::TryFreeDefaultFallbackToFindZoneAndFree(address);
493 }
494
495 partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(address);
496 }
497 #endif // BUILDFLAG(IS_APPLE)
498
499 // static
Allocator()500 partition_alloc::ThreadSafePartitionRoot* PartitionAllocMalloc::Allocator() {
501 return ::Allocator();
502 }
503
504 // static
505 partition_alloc::ThreadSafePartitionRoot*
OriginalAllocator()506 PartitionAllocMalloc::OriginalAllocator() {
507 return ::OriginalAllocator();
508 }
509
510 // static
511 partition_alloc::ThreadSafePartitionRoot*
AlignedAllocator()512 PartitionAllocMalloc::AlignedAllocator() {
513 return ::AlignedAllocator();
514 }
515
516 } // namespace allocator_shim::internal
517
518 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
519
520 namespace allocator_shim {
521
EnablePartitionAllocMemoryReclaimer()522 void EnablePartitionAllocMemoryReclaimer() {
523 // Unlike other partitions, Allocator() and AlignedAllocator() do not register
524 // their PartitionRoots to the memory reclaimer, because doing so may allocate
525 // memory. Thus, the registration to the memory reclaimer has to be done
526 // some time later, when the main root is fully configured.
527 // TODO(bartekn): Aligned allocator can use the regular initialization path.
528 ::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
529 Allocator());
530 auto* original_root = OriginalAllocator();
531 if (original_root)
532 ::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
533 original_root);
534 if (AlignedAllocator() != Allocator()) {
535 ::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
536 AlignedAllocator());
537 }
538 }
539
ConfigurePartitions(EnableBrp enable_brp,EnableBrpZapping enable_brp_zapping,EnableBrpPartitionMemoryReclaimer enable_brp_memory_reclaimer,SplitMainPartition split_main_partition,UseDedicatedAlignedPartition use_dedicated_aligned_partition,AddDummyRefCount add_dummy_ref_count,AlternateBucketDistribution use_alternate_bucket_distribution)540 void ConfigurePartitions(
541 EnableBrp enable_brp,
542 EnableBrpZapping enable_brp_zapping,
543 EnableBrpPartitionMemoryReclaimer enable_brp_memory_reclaimer,
544 SplitMainPartition split_main_partition,
545 UseDedicatedAlignedPartition use_dedicated_aligned_partition,
546 AddDummyRefCount add_dummy_ref_count,
547 AlternateBucketDistribution use_alternate_bucket_distribution) {
548 // BRP cannot be enabled without splitting the main partition. Furthermore, in
549 // the "before allocation" mode, it can't be enabled without further splitting
550 // out the aligned partition.
551 PA_CHECK(!enable_brp || split_main_partition);
552 #if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
553 PA_CHECK(!enable_brp || use_dedicated_aligned_partition);
554 #endif
555 // Can't split out the aligned partition, without splitting the main one.
556 PA_CHECK(!use_dedicated_aligned_partition || split_main_partition);
557
558 static bool configured = false;
559 PA_CHECK(!configured);
560 configured = true;
561
562 // Calling Get() is actually important, even if the return values weren't
563 // used, because it has a side effect of initializing the variables, if they
564 // weren't already.
565 auto* current_root = g_root.Get();
566 auto* current_aligned_root = g_aligned_root.Get();
567
568 if (!split_main_partition) {
569 switch (use_alternate_bucket_distribution) {
570 case AlternateBucketDistribution::kDefault:
571 // We start in the 'default' case.
572 break;
573 case AlternateBucketDistribution::kDenser:
574 current_root->SwitchToDenserBucketDistribution();
575 current_aligned_root->SwitchToDenserBucketDistribution();
576 break;
577 }
578 PA_DCHECK(!enable_brp);
579 PA_DCHECK(!use_dedicated_aligned_partition);
580 PA_DCHECK(!current_root->flags.with_thread_cache);
581 return;
582 }
583
584 // We've been bitten before by using a static local when initializing a
585 // partition. For synchronization, static local variables call into the
586 // runtime on Windows, which may not be ready to handle it, if the path is
587 // invoked on an allocation during the runtime initialization.
588 // ConfigurePartitions() is invoked explicitly from Chromium code, so this
589 // shouldn't bite us here. Mentioning just in case we move this code earlier.
590 static partition_alloc::internal::base::NoDestructor<
591 partition_alloc::ThreadSafePartitionRoot>
592 new_main_partition(partition_alloc::PartitionOptions(
593 !use_dedicated_aligned_partition
594 ? partition_alloc::PartitionOptions::AlignedAlloc::kAllowed
595 : partition_alloc::PartitionOptions::AlignedAlloc::kDisallowed,
596 partition_alloc::PartitionOptions::ThreadCache::kDisabled,
597 partition_alloc::PartitionOptions::Quarantine::kAllowed,
598 partition_alloc::PartitionOptions::Cookie::kAllowed,
599 enable_brp
600 ? partition_alloc::PartitionOptions::BackupRefPtr::kEnabled
601 : partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
602 enable_brp_zapping
603 ? partition_alloc::PartitionOptions::BackupRefPtrZapping::kEnabled
604 : partition_alloc::PartitionOptions::BackupRefPtrZapping::
605 kDisabled,
606 partition_alloc::PartitionOptions::UseConfigurablePool::kNo,
607 add_dummy_ref_count
608 ? partition_alloc::PartitionOptions::AddDummyRefCount::kEnabled
609 : partition_alloc::PartitionOptions::AddDummyRefCount::
610 kDisabled));
611 partition_alloc::ThreadSafePartitionRoot* new_root = new_main_partition.get();
612
613 partition_alloc::ThreadSafePartitionRoot* new_aligned_root;
614 if (use_dedicated_aligned_partition) {
615 // TODO(bartekn): Use the original root instead of creating a new one. It'd
616 // result in one less partition, but come at a cost of commingling types.
617 static partition_alloc::internal::base::NoDestructor<
618 partition_alloc::ThreadSafePartitionRoot>
619 new_aligned_partition(partition_alloc::PartitionOptions{
620 partition_alloc::PartitionOptions::AlignedAlloc::kAllowed,
621 partition_alloc::PartitionOptions::ThreadCache::kDisabled,
622 partition_alloc::PartitionOptions::Quarantine::kAllowed,
623 partition_alloc::PartitionOptions::Cookie::kAllowed,
624 partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
625 partition_alloc::PartitionOptions::BackupRefPtrZapping::kDisabled,
626 partition_alloc::PartitionOptions::UseConfigurablePool::kNo,
627 });
628 new_aligned_root = new_aligned_partition.get();
629 } else {
630 // The new main root can also support AlignedAlloc.
631 new_aligned_root = new_root;
632 }
633
634 // Now switch traffic to the new partitions.
635 g_aligned_root.Replace(new_aligned_root);
636 g_root.Replace(new_root);
637
638 // g_original_root has to be set after g_root, because other code doesn't
639 // handle well both pointing to the same root.
640 // TODO(bartekn): Reorder, once handled well. It isn't ideal for one
641 // partition to be invisible temporarily.
642 g_original_root = current_root;
643
644 // No need for g_original_aligned_root, because in cases where g_aligned_root
645 // is replaced, it must've been g_original_root.
646 PA_CHECK(current_aligned_root == g_original_root);
647
648 if (enable_brp_memory_reclaimer) {
649 partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(new_root);
650 if (new_aligned_root != new_root) {
651 partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
652 new_aligned_root);
653 }
654 }
655
656 // Purge memory, now that the traffic to the original partition is cut off.
657 current_root->PurgeMemory(
658 partition_alloc::PurgeFlags::kDecommitEmptySlotSpans |
659 partition_alloc::PurgeFlags::kDiscardUnusedSystemPages);
660
661 switch (use_alternate_bucket_distribution) {
662 case AlternateBucketDistribution::kDefault:
663 // We start in the 'default' case.
664 break;
665 case AlternateBucketDistribution::kDenser:
666 g_root.Get()->SwitchToDenserBucketDistribution();
667 g_aligned_root.Get()->SwitchToDenserBucketDistribution();
668 break;
669 }
670 }
671
672 #if BUILDFLAG(USE_STARSCAN)
EnablePCScan(partition_alloc::internal::PCScan::InitConfig config)673 void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) {
674 partition_alloc::internal::base::PlatformThread::SetThreadNameHook(
675 &::base::PlatformThread::SetName);
676 partition_alloc::internal::PCScan::Initialize(config);
677
678 partition_alloc::internal::PCScan::RegisterScannableRoot(Allocator());
679 if (OriginalAllocator() != nullptr)
680 partition_alloc::internal::PCScan::RegisterScannableRoot(
681 OriginalAllocator());
682 if (Allocator() != AlignedAllocator())
683 partition_alloc::internal::PCScan::RegisterScannableRoot(
684 AlignedAllocator());
685
686 base::internal::NonScannableAllocator::Instance().NotifyPCScanEnabled();
687 base::internal::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled();
688 }
689 #endif // BUILDFLAG(USE_STARSCAN)
690 } // namespace allocator_shim
691
692 const AllocatorDispatch AllocatorDispatch::default_dispatch = {
693 &allocator_shim::internal::PartitionMalloc, // alloc_function
694 &allocator_shim::internal::
695 PartitionMallocUnchecked, // alloc_unchecked_function
696 &allocator_shim::internal::
697 PartitionCalloc, // alloc_zero_initialized_function
698 &allocator_shim::internal::PartitionMemalign, // alloc_aligned_function
699 &allocator_shim::internal::PartitionRealloc, // realloc_function
700 &allocator_shim::internal::PartitionFree, // free_function
701 &allocator_shim::internal::
702 PartitionGetSizeEstimate, // get_size_estimate_function
703 #if BUILDFLAG(IS_APPLE)
704 &allocator_shim::internal::PartitionClaimedAddress, // claimed_address
705 #else
706 nullptr, // claimed_address
707 #endif
708 &allocator_shim::internal::PartitionBatchMalloc, // batch_malloc_function
709 &allocator_shim::internal::PartitionBatchFree, // batch_free_function
710 #if BUILDFLAG(IS_APPLE)
711 // On Apple OSes, free_definite_size() is always called from free(), since
712 // get_size_estimate() is used to determine whether an allocation belongs to
713 // the current zone. It makes sense to optimize for it.
714 &allocator_shim::internal::PartitionFreeDefiniteSize,
715 // On Apple OSes, try_free_default() is sometimes called as an optimization
716 // of free().
717 &allocator_shim::internal::PartitionTryFreeDefault,
718 #else
719 nullptr, // free_definite_size_function
720 nullptr, // try_free_default_function
721 #endif
722 &allocator_shim::internal::
723 PartitionAlignedAlloc, // aligned_malloc_function
724 &allocator_shim::internal::
725 PartitionAlignedRealloc, // aligned_realloc_function
726 &allocator_shim::internal::PartitionFree, // aligned_free_function
727 nullptr, // next
728 };
729
730 // Intercept diagnostics symbols as well, even though they are not part of the
731 // unified shim layer.
732 //
733 // TODO(lizeb): Implement the ones that doable.
734
735 extern "C" {
736
737 #if !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
738
malloc_stats(void)739 SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {}
740
mallopt(int cmd,int value)741 SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
742 return 0;
743 }
744
745 #endif // !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
746
747 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
mallinfo(void)748 SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
749 partition_alloc::SimplePartitionStatsDumper allocator_dumper;
750 Allocator()->DumpStats("malloc", true, &allocator_dumper);
751 // TODO(bartekn): Dump OriginalAllocator() into "malloc" as well.
752
753 partition_alloc::SimplePartitionStatsDumper aligned_allocator_dumper;
754 if (AlignedAllocator() != Allocator()) {
755 AlignedAllocator()->DumpStats("posix_memalign", true,
756 &aligned_allocator_dumper);
757 }
758
759 // Dump stats for nonscannable and nonquarantinable allocators.
760 auto& nonscannable_allocator =
761 base::internal::NonScannableAllocator::Instance();
762 partition_alloc::SimplePartitionStatsDumper nonscannable_allocator_dumper;
763 if (auto* nonscannable_root = nonscannable_allocator.root())
764 nonscannable_root->DumpStats("malloc", true,
765 &nonscannable_allocator_dumper);
766 auto& nonquarantinable_allocator =
767 base::internal::NonQuarantinableAllocator::Instance();
768 partition_alloc::SimplePartitionStatsDumper nonquarantinable_allocator_dumper;
769 if (auto* nonquarantinable_root = nonquarantinable_allocator.root())
770 nonquarantinable_root->DumpStats("malloc", true,
771 &nonquarantinable_allocator_dumper);
772
773 struct mallinfo info = {0};
774 info.arena = 0; // Memory *not* allocated with mmap().
775
776 // Memory allocated with mmap(), aka virtual size.
777 info.hblks =
778 partition_alloc::internal::base::checked_cast<decltype(info.hblks)>(
779 allocator_dumper.stats().total_mmapped_bytes +
780 aligned_allocator_dumper.stats().total_mmapped_bytes +
781 nonscannable_allocator_dumper.stats().total_mmapped_bytes +
782 nonquarantinable_allocator_dumper.stats().total_mmapped_bytes);
783 // Resident bytes.
784 info.hblkhd =
785 partition_alloc::internal::base::checked_cast<decltype(info.hblkhd)>(
786 allocator_dumper.stats().total_resident_bytes +
787 aligned_allocator_dumper.stats().total_resident_bytes +
788 nonscannable_allocator_dumper.stats().total_resident_bytes +
789 nonquarantinable_allocator_dumper.stats().total_resident_bytes);
790 // Allocated bytes.
791 info.uordblks =
792 partition_alloc::internal::base::checked_cast<decltype(info.uordblks)>(
793 allocator_dumper.stats().total_active_bytes +
794 aligned_allocator_dumper.stats().total_active_bytes +
795 nonscannable_allocator_dumper.stats().total_active_bytes +
796 nonquarantinable_allocator_dumper.stats().total_active_bytes);
797
798 return info;
799 }
800 #endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
801
802 } // extern "C"
803
804 #if BUILDFLAG(IS_APPLE)
805
806 namespace allocator_shim {
807
InitializeDefaultAllocatorPartitionRoot()808 void InitializeDefaultAllocatorPartitionRoot() {
809 // On OS_APPLE, the initialization of PartitionRoot uses memory allocations
810 // internally, e.g. __builtin_available, and it's not easy to avoid it.
811 // Thus, we initialize the PartitionRoot with using the system default
812 // allocator before we intercept the system default allocator.
813 std::ignore = Allocator();
814 }
815
816 } // namespace allocator_shim
817
818 #endif // BUILDFLAG(IS_APPLE)
819
820 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
821