• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
6 
7 #include <atomic>
8 #include <bit>
9 #include <cstddef>
10 #include <map>
11 #include <string>
12 #include <tuple>
13 
14 #include "build/build_config.h"
15 #include "partition_alloc/allocation_guard.h"
16 #include "partition_alloc/chromecast_buildflags.h"
17 #include "partition_alloc/memory_reclaimer.h"
18 #include "partition_alloc/partition_alloc.h"
19 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
20 #include "partition_alloc/partition_alloc_base/no_destructor.h"
21 #include "partition_alloc/partition_alloc_base/numerics/checked_math.h"
22 #include "partition_alloc/partition_alloc_base/numerics/safe_conversions.h"
23 #include "partition_alloc/partition_alloc_buildflags.h"
24 #include "partition_alloc/partition_alloc_check.h"
25 #include "partition_alloc/partition_alloc_constants.h"
26 #include "partition_alloc/partition_root.h"
27 #include "partition_alloc/partition_stats.h"
28 #include "partition_alloc/shim/allocator_shim_internals.h"
29 #include "partition_alloc/shim/nonscannable_allocator.h"
30 
31 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
32 #include <malloc.h>
33 #endif
34 
35 using allocator_shim::AllocatorDispatch;
36 
37 namespace {
38 
39 class SimpleScopedSpinLocker {
40  public:
SimpleScopedSpinLocker(std::atomic<bool> & lock)41   explicit SimpleScopedSpinLocker(std::atomic<bool>& lock) : lock_(lock) {
42     // Lock. Semantically equivalent to base::Lock::Acquire().
43     bool expected = false;
44     // Weak CAS since we are in a retry loop, relaxed ordering for failure since
45     // in this case we don't imply any ordering.
46     //
47     // This matches partition_allocator/spinning_mutex.h fast path on Linux.
48     while (!lock_.compare_exchange_weak(
49         expected, true, std::memory_order_acquire, std::memory_order_relaxed)) {
50       expected = false;
51     }
52   }
53 
~SimpleScopedSpinLocker()54   ~SimpleScopedSpinLocker() { lock_.store(false, std::memory_order_release); }
55 
56  private:
57   std::atomic<bool>& lock_;
58 };
59 
60 // We can't use a "static local" or a base::LazyInstance, as:
61 // - static local variables call into the runtime on Windows, which is not
62 //   prepared to handle it, as the first allocation happens during CRT init.
63 // - We don't want to depend on base::LazyInstance, which may be converted to
64 //   static locals one day.
65 //
66 // Nevertheless, this provides essentially the same thing.
67 template <typename T, typename Constructor>
68 class LeakySingleton {
69  public:
70   constexpr LeakySingleton() = default;
71 
Get()72   PA_ALWAYS_INLINE T* Get() {
73     auto* instance = instance_.load(std::memory_order_acquire);
74     if (PA_LIKELY(instance)) {
75       return instance;
76     }
77 
78     return GetSlowPath();
79   }
80 
81   // Replaces the instance pointer with a new one.
Replace(T * new_instance)82   void Replace(T* new_instance) {
83     SimpleScopedSpinLocker scoped_lock{initialization_lock_};
84 
85     // Modify under the lock to avoid race between |if (instance)| and
86     // |instance_.store()| in GetSlowPath().
87     instance_.store(new_instance, std::memory_order_release);
88   }
89 
90  private:
91   T* GetSlowPath();
92 
93   std::atomic<T*> instance_;
94   // Before C++20, having an initializer here causes a "variable does not have a
95   // constant initializer" error.  In C++20, omitting it causes a similar error.
96   // Presumably this is due to the C++20 changes to make atomic initialization
97   // (of the other members of this class) sane, so guarding under that
98   // feature-test.
99 #if !defined(__cpp_lib_atomic_value_initialization) || \
100     __cpp_lib_atomic_value_initialization < 201911L
101   alignas(T) uint8_t instance_buffer_[sizeof(T)];
102 #else
103   alignas(T) uint8_t instance_buffer_[sizeof(T)] = {0};
104 #endif
105   std::atomic<bool> initialization_lock_;
106 };
107 
108 template <typename T, typename Constructor>
GetSlowPath()109 T* LeakySingleton<T, Constructor>::GetSlowPath() {
110   // The instance has not been set, the proper way to proceed (correct
111   // double-checked locking) is:
112   //
113   // auto* instance = instance_.load(std::memory_order_acquire);
114   // if (!instance) {
115   //   ScopedLock initialization_lock;
116   //   root = instance_.load(std::memory_order_relaxed);
117   //   if (root)
118   //     return root;
119   //   instance = Create new root;
120   //   instance_.store(instance, std::memory_order_release);
121   //   return instance;
122   // }
123   //
124   // However, we don't want to use a base::Lock here, so instead we use
125   // compare-and-exchange on a lock variable, which provides the same
126   // guarantees.
127   SimpleScopedSpinLocker scoped_lock{initialization_lock_};
128 
129   T* instance = instance_.load(std::memory_order_relaxed);
130   // Someone beat us.
131   if (instance) {
132     return instance;
133   }
134 
135   instance = Constructor::New(reinterpret_cast<void*>(instance_buffer_));
136   instance_.store(instance, std::memory_order_release);
137 
138   return instance;
139 }
140 
141 class MainPartitionConstructor {
142  public:
New(void * buffer)143   static partition_alloc::PartitionRoot* New(void* buffer) {
144     constexpr partition_alloc::PartitionOptions::EnableToggle thread_cache =
145 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
146         // Additional partitions may be created in ConfigurePartitions(). Since
147         // only one partition can have thread cache enabled, postpone the
148         // decision to turn the thread cache on until after that call.
149         // TODO(bartekn): Enable it here by default, once the "split-only" mode
150         // is no longer needed.
151         partition_alloc::PartitionOptions::kDisabled;
152 #else   // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
153         // Other tests, such as the ThreadCache tests create a thread cache,
154         // and only one is supported at a time.
155         partition_alloc::PartitionOptions::kDisabled;
156 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
157     partition_alloc::PartitionOptions opts;
158     opts.aligned_alloc = partition_alloc::PartitionOptions::kAllowed;
159     opts.thread_cache = thread_cache;
160     opts.star_scan_quarantine = partition_alloc::PartitionOptions::kAllowed;
161     opts.backup_ref_ptr = partition_alloc::PartitionOptions::kDisabled;
162     auto* new_root = new (buffer) partition_alloc::PartitionRoot(opts);
163 
164     return new_root;
165   }
166 };
167 
168 LeakySingleton<partition_alloc::PartitionRoot, MainPartitionConstructor> g_root
169     PA_CONSTINIT = {};
Allocator()170 partition_alloc::PartitionRoot* Allocator() {
171   return g_root.Get();
172 }
173 
174 // Original g_root_ if it was replaced by ConfigurePartitions().
175 std::atomic<partition_alloc::PartitionRoot*> g_original_root(nullptr);
176 
177 std::atomic<bool> g_roots_finalized = false;
178 
179 class AlignedPartitionConstructor {
180  public:
New(void * buffer)181   static partition_alloc::PartitionRoot* New(void* buffer) {
182     return g_root.Get();
183   }
184 };
185 
186 LeakySingleton<partition_alloc::PartitionRoot, AlignedPartitionConstructor>
187     g_aligned_root PA_CONSTINIT = {};
188 
OriginalAllocator()189 partition_alloc::PartitionRoot* OriginalAllocator() {
190   return g_original_root.load(std::memory_order_relaxed);
191 }
192 
AlignedAllocator()193 partition_alloc::PartitionRoot* AlignedAllocator() {
194   return g_aligned_root.Get();
195 }
196 
AllocatorConfigurationFinalized()197 bool AllocatorConfigurationFinalized() {
198   return g_roots_finalized.load();
199 }
200 
AllocateAlignedMemory(size_t alignment,size_t size)201 void* AllocateAlignedMemory(size_t alignment, size_t size) {
202   // Memory returned by the regular allocator *always* respects |kAlignment|,
203   // which is a power of two, and any valid alignment is also a power of two. So
204   // we can directly fulfill these requests with the main allocator.
205   //
206   // This has several advantages:
207   // - The thread cache is supported on the main partition
208   // - Reduced fragmentation
209   // - Better coverage for MiraclePtr variants requiring extras
210   //
211   // There are several call sites in Chromium where base::AlignedAlloc is called
212   // with a small alignment. Some may be due to overly-careful code, some are
213   // because the client code doesn't know the required alignment at compile
214   // time.
215   //
216   // Note that all "AlignedFree()" variants (_aligned_free() on Windows for
217   // instance) directly call PartitionFree(), so there is no risk of
218   // mismatch. (see below the default_dispatch definition).
219   if (alignment <= partition_alloc::internal::kAlignment) {
220     // This is mandated by |posix_memalign()| and friends, so should never fire.
221     PA_CHECK(std::has_single_bit(alignment));
222     // TODO(bartekn): See if the compiler optimizes branches down the stack on
223     // Mac, where PartitionPageSize() isn't constexpr.
224     return Allocator()->AllocInline<partition_alloc::AllocFlags::kNoHooks>(
225         size);
226   }
227 
228   return AlignedAllocator()
229       ->AlignedAllocInline<partition_alloc::AllocFlags::kNoHooks>(alignment,
230                                                                   size);
231 }
232 
233 }  // namespace
234 
235 namespace allocator_shim::internal {
236 
PartitionMalloc(const AllocatorDispatch *,size_t size,void * context)237 void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
238   partition_alloc::ScopedDisallowAllocations guard{};
239   return Allocator()->AllocInline<partition_alloc::AllocFlags::kNoHooks>(size);
240 }
241 
PartitionMallocUnchecked(const AllocatorDispatch *,size_t size,void * context)242 void* PartitionMallocUnchecked(const AllocatorDispatch*,
243                                size_t size,
244                                void* context) {
245   partition_alloc::ScopedDisallowAllocations guard{};
246   return Allocator()
247       ->AllocInline<partition_alloc::AllocFlags::kReturnNull |
248                     partition_alloc::AllocFlags::kNoHooks>(size);
249 }
250 
PartitionCalloc(const AllocatorDispatch *,size_t n,size_t size,void * context)251 void* PartitionCalloc(const AllocatorDispatch*,
252                       size_t n,
253                       size_t size,
254                       void* context) {
255   partition_alloc::ScopedDisallowAllocations guard{};
256   const size_t total =
257       partition_alloc::internal::base::CheckMul(n, size).ValueOrDie();
258   return Allocator()
259       ->AllocInline<partition_alloc::AllocFlags::kZeroFill |
260                     partition_alloc::AllocFlags::kNoHooks>(total);
261 }
262 
PartitionMemalign(const AllocatorDispatch *,size_t alignment,size_t size,void * context)263 void* PartitionMemalign(const AllocatorDispatch*,
264                         size_t alignment,
265                         size_t size,
266                         void* context) {
267   partition_alloc::ScopedDisallowAllocations guard{};
268   return AllocateAlignedMemory(alignment, size);
269 }
270 
PartitionAlignedAlloc(const AllocatorDispatch * dispatch,size_t size,size_t alignment,void * context)271 void* PartitionAlignedAlloc(const AllocatorDispatch* dispatch,
272                             size_t size,
273                             size_t alignment,
274                             void* context) {
275   partition_alloc::ScopedDisallowAllocations guard{};
276   return AllocateAlignedMemory(alignment, size);
277 }
278 
279 // aligned_realloc documentation is
280 // https://docs.microsoft.com/ja-jp/cpp/c-runtime-library/reference/aligned-realloc
281 // TODO(tasak): Expand the given memory block to the given size if possible.
282 // This realloc always free the original memory block and allocates a new memory
283 // block.
284 // TODO(tasak): Implement PartitionRoot::AlignedRealloc and use it.
PartitionAlignedRealloc(const AllocatorDispatch * dispatch,void * address,size_t size,size_t alignment,void * context)285 void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
286                               void* address,
287                               size_t size,
288                               size_t alignment,
289                               void* context) {
290   partition_alloc::ScopedDisallowAllocations guard{};
291   void* new_ptr = nullptr;
292   if (size > 0) {
293     new_ptr = AllocateAlignedMemory(alignment, size);
294   } else {
295     // size == 0 and address != null means just "free(address)".
296     if (address) {
297       partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
298           partition_alloc::FreeFlags::kNoHooks>(address);
299     }
300   }
301   // The original memory block (specified by address) is unchanged if ENOMEM.
302   if (!new_ptr) {
303     return nullptr;
304   }
305   // TODO(tasak): Need to compare the new alignment with the address' alignment.
306   // If the two alignments are not the same, need to return nullptr with EINVAL.
307   if (address) {
308     size_t usage = partition_alloc::PartitionRoot::GetUsableSize(address);
309     size_t copy_size = usage > size ? size : usage;
310     memcpy(new_ptr, address, copy_size);
311 
312     partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
313         partition_alloc::FreeFlags::kNoHooks>(address);
314   }
315   return new_ptr;
316 }
317 
PartitionRealloc(const AllocatorDispatch *,void * address,size_t size,void * context)318 void* PartitionRealloc(const AllocatorDispatch*,
319                        void* address,
320                        size_t size,
321                        void* context) {
322   partition_alloc::ScopedDisallowAllocations guard{};
323 #if BUILDFLAG(IS_APPLE)
324   if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
325                       reinterpret_cast<uintptr_t>(address)) &&
326                   address)) {
327     // A memory region allocated by the system allocator is passed in this
328     // function.  Forward the request to `realloc` which supports zone-
329     // dispatching so that it appropriately selects the right zone.
330     return realloc(address, size);
331   }
332 #endif  // BUILDFLAG(IS_APPLE)
333 
334   return Allocator()->Realloc<partition_alloc::AllocFlags::kNoHooks>(address,
335                                                                      size, "");
336 }
337 
338 #if BUILDFLAG(PA_IS_CAST_ANDROID)
339 extern "C" {
340 void __real_free(void*);
341 }       // extern "C"
342 #endif  // BUILDFLAG(PA_IS_CAST_ANDROID)
343 
PartitionFree(const AllocatorDispatch *,void * object,void * context)344 void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
345   partition_alloc::ScopedDisallowAllocations guard{};
346 #if BUILDFLAG(IS_APPLE)
347   // TODO(bartekn): Add MTE unmasking here (and below).
348   if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
349                       reinterpret_cast<uintptr_t>(object)) &&
350                   object)) {
351     // A memory region allocated by the system allocator is passed in this
352     // function.  Forward the request to `free` which supports zone-
353     // dispatching so that it appropriately selects the right zone.
354     return free(object);
355   }
356 #endif  // BUILDFLAG(IS_APPLE)
357 
358   // On Android Chromecast devices, there is at least one case where a system
359   // malloc() pointer can be passed to PartitionAlloc's free(). If we don't own
360   // the pointer, pass it along. This should not have a runtime cost vs regular
361   // Android, since on Android we have a PA_CHECK() rather than the branch here.
362 #if BUILDFLAG(PA_IS_CAST_ANDROID)
363   if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
364                       reinterpret_cast<uintptr_t>(object)) &&
365                   object)) {
366     // A memory region allocated by the system allocator is passed in this
367     // function.  Forward the request to `free()`, which is `__real_free()`
368     // here.
369     return __real_free(object);
370   }
371 #endif  // BUILDFLAG(PA_IS_CAST_ANDROID)
372 
373   partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
374       partition_alloc::FreeFlags::kNoHooks>(object);
375 }
376 
377 #if BUILDFLAG(IS_APPLE)
378 // Normal free() path on Apple OSes:
379 // 1. size = GetSizeEstimate(ptr);
380 // 2. if (size) FreeDefiniteSize(ptr, size)
381 //
382 // So we don't need to re-check that the pointer is owned in Free(), and we
383 // can use the size.
PartitionFreeDefiniteSize(const AllocatorDispatch *,void * address,size_t size,void * context)384 void PartitionFreeDefiniteSize(const AllocatorDispatch*,
385                                void* address,
386                                size_t size,
387                                void* context) {
388   partition_alloc::ScopedDisallowAllocations guard{};
389   // TODO(lizeb): Optimize PartitionAlloc to use the size information. This is
390   // still useful though, as we avoid double-checking that the address is owned.
391   partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
392       partition_alloc::FreeFlags::kNoHooks>(address);
393 }
394 #endif  // BUILDFLAG(IS_APPLE)
395 
PartitionGetSizeEstimate(const AllocatorDispatch *,void * address,void * context)396 size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
397                                 void* address,
398                                 void* context) {
399   // This is used to implement malloc_usable_size(3). Per its man page, "if ptr
400   // is NULL, 0 is returned".
401   if (!address) {
402     return 0;
403   }
404 
405 #if BUILDFLAG(IS_APPLE)
406   if (!partition_alloc::IsManagedByPartitionAlloc(
407           reinterpret_cast<uintptr_t>(address))) {
408     // The object pointed to by `address` is not allocated by the
409     // PartitionAlloc.  The return value `0` means that the pointer does not
410     // belong to this malloc zone.
411     return 0;
412   }
413 #endif  // BUILDFLAG(IS_APPLE)
414 
415   // TODO(lizeb): Returns incorrect values for aligned allocations.
416   const size_t size =
417       partition_alloc::PartitionRoot::GetUsableSizeWithMac11MallocSizeHack(
418           address);
419 #if BUILDFLAG(IS_APPLE)
420   // The object pointed to by `address` is allocated by the PartitionAlloc.
421   // So, this function must not return zero so that the malloc zone dispatcher
422   // finds the appropriate malloc zone.
423   PA_DCHECK(size);
424 #endif  // BUILDFLAG(IS_APPLE)
425   return size;
426 }
427 
428 #if BUILDFLAG(IS_APPLE)
PartitionGoodSize(const AllocatorDispatch *,size_t size,void * context)429 size_t PartitionGoodSize(const AllocatorDispatch*, size_t size, void* context) {
430   return Allocator()->AllocationCapacityFromRequestedSize(size);
431 }
432 
PartitionClaimedAddress(const AllocatorDispatch *,void * address,void * context)433 bool PartitionClaimedAddress(const AllocatorDispatch*,
434                              void* address,
435                              void* context) {
436   return partition_alloc::IsManagedByPartitionAlloc(
437       reinterpret_cast<uintptr_t>(address));
438 }
439 #endif  // BUILDFLAG(IS_APPLE)
440 
PartitionBatchMalloc(const AllocatorDispatch *,size_t size,void ** results,unsigned num_requested,void * context)441 unsigned PartitionBatchMalloc(const AllocatorDispatch*,
442                               size_t size,
443                               void** results,
444                               unsigned num_requested,
445                               void* context) {
446   // No real batching: we could only acquire the lock once for instance, keep it
447   // simple for now.
448   for (unsigned i = 0; i < num_requested; i++) {
449     // No need to check the results, we crash if it fails.
450     results[i] = PartitionMalloc(nullptr, size, nullptr);
451   }
452 
453   // Either all succeeded, or we crashed.
454   return num_requested;
455 }
456 
PartitionBatchFree(const AllocatorDispatch *,void ** to_be_freed,unsigned num_to_be_freed,void * context)457 void PartitionBatchFree(const AllocatorDispatch*,
458                         void** to_be_freed,
459                         unsigned num_to_be_freed,
460                         void* context) {
461   // No real batching: we could only acquire the lock once for instance, keep it
462   // simple for now.
463   for (unsigned i = 0; i < num_to_be_freed; i++) {
464     PartitionFree(nullptr, to_be_freed[i], nullptr);
465   }
466 }
467 
468 #if BUILDFLAG(IS_APPLE)
PartitionTryFreeDefault(const AllocatorDispatch *,void * address,void * context)469 void PartitionTryFreeDefault(const AllocatorDispatch*,
470                              void* address,
471                              void* context) {
472   partition_alloc::ScopedDisallowAllocations guard{};
473 
474   if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
475           reinterpret_cast<uintptr_t>(address)))) {
476     // The object pointed to by `address` is not allocated by the
477     // PartitionAlloc. Call find_zone_and_free.
478     return allocator_shim::TryFreeDefaultFallbackToFindZoneAndFree(address);
479   }
480 
481   partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
482       partition_alloc::FreeFlags::kNoHooks>(address);
483 }
484 #endif  // BUILDFLAG(IS_APPLE)
485 
486 // static
AllocatorConfigurationFinalized()487 bool PartitionAllocMalloc::AllocatorConfigurationFinalized() {
488   return ::AllocatorConfigurationFinalized();
489 }
490 
491 // static
Allocator()492 partition_alloc::PartitionRoot* PartitionAllocMalloc::Allocator() {
493   return ::Allocator();
494 }
495 
496 // static
OriginalAllocator()497 partition_alloc::PartitionRoot* PartitionAllocMalloc::OriginalAllocator() {
498   return ::OriginalAllocator();
499 }
500 
501 // static
AlignedAllocator()502 partition_alloc::PartitionRoot* PartitionAllocMalloc::AlignedAllocator() {
503   return ::AlignedAllocator();
504 }
505 
506 }  // namespace allocator_shim::internal
507 
508 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
509 
510 namespace allocator_shim {
511 
EnablePartitionAllocMemoryReclaimer()512 void EnablePartitionAllocMemoryReclaimer() {
513   // Unlike other partitions, Allocator() does not register its PartitionRoot to
514   // the memory reclaimer, because doing so may allocate memory. Thus, the
515   // registration to the memory reclaimer has to be done some time later, when
516   // the main root is fully configured.
517   ::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
518       Allocator());
519 
520   // There is only one PartitionAlloc-Everywhere partition at the moment. Any
521   // additional partitions will be created in ConfigurePartitions() and
522   // registered for memory reclaimer there.
523   PA_DCHECK(!AllocatorConfigurationFinalized());
524   PA_DCHECK(OriginalAllocator() == nullptr);
525   PA_DCHECK(AlignedAllocator() == Allocator());
526 }
527 
ConfigurePartitions(EnableBrp enable_brp,EnableMemoryTagging enable_memory_tagging,partition_alloc::TagViolationReportingMode memory_tagging_reporting_mode,SplitMainPartition split_main_partition,UseDedicatedAlignedPartition use_dedicated_aligned_partition,size_t ref_count_size,BucketDistribution distribution,size_t scheduler_loop_quarantine_capacity_in_bytes,ZappingByFreeFlags zapping_by_free_flags)528 void ConfigurePartitions(
529     EnableBrp enable_brp,
530     EnableMemoryTagging enable_memory_tagging,
531     partition_alloc::TagViolationReportingMode memory_tagging_reporting_mode,
532     SplitMainPartition split_main_partition,
533     UseDedicatedAlignedPartition use_dedicated_aligned_partition,
534     size_t ref_count_size,
535     BucketDistribution distribution,
536     size_t scheduler_loop_quarantine_capacity_in_bytes,
537     ZappingByFreeFlags zapping_by_free_flags) {
538   // BRP cannot be enabled without splitting the main partition. Furthermore, in
539   // the "before allocation" mode, it can't be enabled without further splitting
540   // out the aligned partition.
541   PA_CHECK(!enable_brp || split_main_partition);
542 #if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
543   PA_CHECK(!enable_brp || use_dedicated_aligned_partition);
544 #endif
545   // Can't split out the aligned partition, without splitting the main one.
546   PA_CHECK(!use_dedicated_aligned_partition || split_main_partition);
547 
548   // Calling Get() is actually important, even if the return values weren't
549   // used, because it has a side effect of initializing the variables, if they
550   // weren't already.
551   auto* current_root = g_root.Get();
552   auto* current_aligned_root = g_aligned_root.Get();
553   PA_DCHECK(current_root == current_aligned_root);
554 
555   if (!split_main_partition) {
556     switch (distribution) {
557       case BucketDistribution::kNeutral:
558         // We start in the 'default' case.
559         break;
560       case BucketDistribution::kDenser:
561         current_root->SwitchToDenserBucketDistribution();
562         break;
563     }
564     PA_DCHECK(!enable_brp);
565     PA_DCHECK(!use_dedicated_aligned_partition);
566     PA_DCHECK(!current_root->settings.with_thread_cache);
567     PA_CHECK(!g_roots_finalized.exchange(true));  // Ensure configured once.
568     return;
569   }
570 
571   // We've been bitten before by using a static local when initializing a
572   // partition. For synchronization, static local variables call into the
573   // runtime on Windows, which may not be ready to handle it, if the path is
574   // invoked on an allocation during the runtime initialization.
575   // ConfigurePartitions() is invoked explicitly from Chromium code, so this
576   // shouldn't bite us here. Mentioning just in case we move this code earlier.
577   static partition_alloc::internal::base::NoDestructor<
578       partition_alloc::PartitionAllocator>
579       new_main_allocator([&]() {
580         partition_alloc::PartitionOptions opts;
581         opts.aligned_alloc =
582             !use_dedicated_aligned_partition
583                 ? partition_alloc::PartitionOptions::kAllowed
584                 : partition_alloc::PartitionOptions::kDisallowed;
585         opts.thread_cache = partition_alloc::PartitionOptions::kDisabled;
586         opts.star_scan_quarantine = partition_alloc::PartitionOptions::kAllowed;
587         opts.backup_ref_ptr =
588             enable_brp ? partition_alloc::PartitionOptions::kEnabled
589                        : partition_alloc::PartitionOptions::kDisabled;
590         opts.ref_count_size = ref_count_size;
591         opts.zapping_by_free_flags =
592             zapping_by_free_flags
593                 ? partition_alloc::PartitionOptions::kEnabled
594                 : partition_alloc::PartitionOptions::kDisabled;
595         opts.scheduler_loop_quarantine_capacity_in_bytes =
596             scheduler_loop_quarantine_capacity_in_bytes;
597         opts.memory_tagging = {
598             .enabled = enable_memory_tagging
599                            ? partition_alloc::PartitionOptions::kEnabled
600                            : partition_alloc::PartitionOptions::kDisabled,
601             .reporting_mode = memory_tagging_reporting_mode};
602         return opts;
603       }());
604   partition_alloc::PartitionRoot* new_root = new_main_allocator->root();
605 
606   partition_alloc::PartitionRoot* new_aligned_root;
607   if (use_dedicated_aligned_partition) {
608     // TODO(bartekn): Use the original root instead of creating a new one. It'd
609     // result in one less partition, but come at a cost of commingling types.
610     static partition_alloc::internal::base::NoDestructor<
611         partition_alloc::PartitionAllocator>
612         new_aligned_allocator([&]() {
613           partition_alloc::PartitionOptions opts;
614           opts.aligned_alloc = partition_alloc::PartitionOptions::kAllowed;
615           opts.thread_cache = partition_alloc::PartitionOptions::kDisabled;
616           opts.star_scan_quarantine =
617               partition_alloc::PartitionOptions::kAllowed;
618           opts.backup_ref_ptr = partition_alloc::PartitionOptions::kDisabled;
619           return opts;
620         }());
621     new_aligned_root = new_aligned_allocator->root();
622   } else {
623     // The new main root can also support AlignedAlloc.
624     new_aligned_root = new_root;
625   }
626 
627   // Now switch traffic to the new partitions.
628   g_original_root = current_root;
629   g_aligned_root.Replace(new_aligned_root);
630   g_root.Replace(new_root);
631 
632   // No need for g_original_aligned_root, because in cases where g_aligned_root
633   // is replaced, it must've been g_original_root.
634   PA_CHECK(current_aligned_root == g_original_root);
635 
636   // Purge memory, now that the traffic to the original partition is cut off.
637   current_root->PurgeMemory(
638       partition_alloc::PurgeFlags::kDecommitEmptySlotSpans |
639       partition_alloc::PurgeFlags::kDiscardUnusedSystemPages);
640 
641   switch (distribution) {
642     case BucketDistribution::kNeutral:
643       // We start in the 'default' case.
644       break;
645     case BucketDistribution::kDenser:
646       new_root->SwitchToDenserBucketDistribution();
647       if (new_aligned_root != new_root) {
648         new_aligned_root->SwitchToDenserBucketDistribution();
649       }
650       break;
651   }
652 
653   PA_CHECK(!g_roots_finalized.exchange(true));  // Ensure configured once.
654 }
655 
656 // TODO(crbug.com/1137393): Remove this functions once pdfium has switched to
657 // the new version.
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)658 PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
659 void ConfigurePartitions(
660     EnableBrp enable_brp,
661     EnableMemoryTagging enable_memory_tagging,
662     SplitMainPartition split_main_partition,
663     UseDedicatedAlignedPartition use_dedicated_aligned_partition,
664     size_t ref_count_size,
665     BucketDistribution distribution) {
666   // Since the only user of this function is a test function, we use synchronous
667   // testing mode.
668   const partition_alloc::TagViolationReportingMode
669       memory_tagging_reporting_mode =
670           enable_memory_tagging
671               ? partition_alloc::TagViolationReportingMode::kSynchronous
672               : partition_alloc::TagViolationReportingMode::kDisabled;
673 
674   // We don't use these features in PDFium.
675   size_t scheduler_loop_quarantine_capacity_in_bytes = 0;
676   auto zapping_by_free_flags = ZappingByFreeFlags(false);
677 
678   ConfigurePartitions(enable_brp, enable_memory_tagging,
679                       memory_tagging_reporting_mode, split_main_partition,
680                       use_dedicated_aligned_partition, ref_count_size,
681                       distribution, scheduler_loop_quarantine_capacity_in_bytes,
682                       zapping_by_free_flags);
683 }
684 
685 // No synchronization provided: `PartitionRoot.flags` is only written
686 // to in `PartitionRoot::Init()`.
GetMainPartitionRootExtrasSize()687 uint32_t GetMainPartitionRootExtrasSize() {
688 #if PA_CONFIG(EXTRAS_REQUIRED)
689   return g_root.Get()->settings.extras_size;
690 #else
691   return 0;
692 #endif  // PA_CONFIG(EXTRAS_REQUIRED)
693 }
694 
695 #if BUILDFLAG(USE_STARSCAN)
EnablePCScan(partition_alloc::internal::PCScan::InitConfig config)696 void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) {
697   partition_alloc::internal::PCScan::Initialize(config);
698 
699   PA_CHECK(AllocatorConfigurationFinalized());
700   partition_alloc::internal::PCScan::RegisterScannableRoot(Allocator());
701   if (OriginalAllocator() != nullptr) {
702     partition_alloc::internal::PCScan::RegisterScannableRoot(
703         OriginalAllocator());
704   }
705   if (Allocator() != AlignedAllocator()) {
706     partition_alloc::internal::PCScan::RegisterScannableRoot(
707         AlignedAllocator());
708   }
709 
710   allocator_shim::NonScannableAllocator::Instance().NotifyPCScanEnabled();
711   allocator_shim::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled();
712 }
713 #endif  // BUILDFLAG(USE_STARSCAN)
714 }  // namespace allocator_shim
715 
716 const AllocatorDispatch AllocatorDispatch::default_dispatch = {
717     &allocator_shim::internal::PartitionMalloc,  // alloc_function
718     &allocator_shim::internal::
719         PartitionMallocUnchecked,  // alloc_unchecked_function
720     &allocator_shim::internal::
721         PartitionCalloc,  // alloc_zero_initialized_function
722     &allocator_shim::internal::PartitionMemalign,  // alloc_aligned_function
723     &allocator_shim::internal::PartitionRealloc,   // realloc_function
724     &allocator_shim::internal::PartitionFree,      // free_function
725     &allocator_shim::internal::
726         PartitionGetSizeEstimate,  // get_size_estimate_function
727 #if BUILDFLAG(IS_APPLE)
728     &allocator_shim::internal::PartitionGoodSize,        // good_size
729     &allocator_shim::internal::PartitionClaimedAddress,  // claimed_address
730 #else
731     nullptr,  // good_size
732     nullptr,  // claimed_address
733 #endif
734     &allocator_shim::internal::PartitionBatchMalloc,  // batch_malloc_function
735     &allocator_shim::internal::PartitionBatchFree,    // batch_free_function
736 #if BUILDFLAG(IS_APPLE)
737     // On Apple OSes, free_definite_size() is always called from free(), since
738     // get_size_estimate() is used to determine whether an allocation belongs to
739     // the current zone. It makes sense to optimize for it.
740     &allocator_shim::internal::PartitionFreeDefiniteSize,
741     // On Apple OSes, try_free_default() is sometimes called as an optimization
742     // of free().
743     &allocator_shim::internal::PartitionTryFreeDefault,
744 #else
745     nullptr,  // free_definite_size_function
746     nullptr,  // try_free_default_function
747 #endif
748     &allocator_shim::internal::
749         PartitionAlignedAlloc,  // aligned_malloc_function
750     &allocator_shim::internal::
751         PartitionAlignedRealloc,               // aligned_realloc_function
752     &allocator_shim::internal::PartitionFree,  // aligned_free_function
753     nullptr,                                   // next
754 };
755 
756 // Intercept diagnostics symbols as well, even though they are not part of the
757 // unified shim layer.
758 //
759 // TODO(lizeb): Implement the ones that doable.
760 
761 extern "C" {
762 
763 #if !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
764 
malloc_stats(void)765 SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {}
766 
mallopt(int cmd,int value)767 SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
768   return 0;
769 }
770 
771 #endif  // !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
772 
773 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
mallinfo(void)774 SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
775   partition_alloc::SimplePartitionStatsDumper allocator_dumper;
776   Allocator()->DumpStats("malloc", true, &allocator_dumper);
777   // TODO(bartekn): Dump OriginalAllocator() into "malloc" as well.
778 
779   partition_alloc::SimplePartitionStatsDumper aligned_allocator_dumper;
780   if (AlignedAllocator() != Allocator()) {
781     AlignedAllocator()->DumpStats("posix_memalign", true,
782                                   &aligned_allocator_dumper);
783   }
784 
785   // Dump stats for nonscannable and nonquarantinable allocators.
786   auto& nonscannable_allocator =
787       allocator_shim::NonScannableAllocator::Instance();
788   partition_alloc::SimplePartitionStatsDumper nonscannable_allocator_dumper;
789   if (auto* nonscannable_root = nonscannable_allocator.root()) {
790     nonscannable_root->DumpStats("malloc", true,
791                                  &nonscannable_allocator_dumper);
792   }
793   auto& nonquarantinable_allocator =
794       allocator_shim::NonQuarantinableAllocator::Instance();
795   partition_alloc::SimplePartitionStatsDumper nonquarantinable_allocator_dumper;
796   if (auto* nonquarantinable_root = nonquarantinable_allocator.root()) {
797     nonquarantinable_root->DumpStats("malloc", true,
798                                      &nonquarantinable_allocator_dumper);
799   }
800 
801   struct mallinfo info = {};
802   info.arena = 0;  // Memory *not* allocated with mmap().
803 
804   // Memory allocated with mmap(), aka virtual size.
805   info.hblks =
806       partition_alloc::internal::base::checked_cast<decltype(info.hblks)>(
807           allocator_dumper.stats().total_mmapped_bytes +
808           aligned_allocator_dumper.stats().total_mmapped_bytes +
809           nonscannable_allocator_dumper.stats().total_mmapped_bytes +
810           nonquarantinable_allocator_dumper.stats().total_mmapped_bytes);
811   // Resident bytes.
812   info.hblkhd =
813       partition_alloc::internal::base::checked_cast<decltype(info.hblkhd)>(
814           allocator_dumper.stats().total_resident_bytes +
815           aligned_allocator_dumper.stats().total_resident_bytes +
816           nonscannable_allocator_dumper.stats().total_resident_bytes +
817           nonquarantinable_allocator_dumper.stats().total_resident_bytes);
818   // Allocated bytes.
819   info.uordblks =
820       partition_alloc::internal::base::checked_cast<decltype(info.uordblks)>(
821           allocator_dumper.stats().total_active_bytes +
822           aligned_allocator_dumper.stats().total_active_bytes +
823           nonscannable_allocator_dumper.stats().total_active_bytes +
824           nonquarantinable_allocator_dumper.stats().total_active_bytes);
825 
826   return info;
827 }
828 #endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
829 
830 }  // extern "C"
831 
832 #if BUILDFLAG(IS_APPLE)
833 
834 namespace allocator_shim {
835 
InitializeDefaultAllocatorPartitionRoot()836 void InitializeDefaultAllocatorPartitionRoot() {
837   // On OS_APPLE, the initialization of PartitionRoot uses memory allocations
838   // internally, e.g. __builtin_available, and it's not easy to avoid it.
839   // Thus, we initialize the PartitionRoot with using the system default
840   // allocator before we intercept the system default allocator.
841   std::ignore = Allocator();
842 }
843 
844 }  // namespace allocator_shim
845 
846 #endif  // BUILDFLAG(IS_APPLE)
847 
848 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
849