• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifdef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
6 #error This header is meant to be included only once by allocator_shim.cc
7 #endif
8 
9 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
10 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
11 
12 #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
13 
14 #if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
15 #error This header must be included iff PartitionAlloc-Everywhere is enabled.
16 #endif
17 
18 #include <string.h>
19 
20 #include <atomic>
21 #include <tuple>
22 
23 #include "base/allocator/early_zone_registration_mac.h"
24 #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
25 #include "base/allocator/partition_allocator/partition_alloc_constants.h"
26 #include "base/logging.h"
27 
28 namespace partition_alloc {
29 
30 // Defined in base/allocator/partition_allocator/partition_root.cc
31 void PartitionAllocMallocHookOnBeforeForkInParent();
32 void PartitionAllocMallocHookOnAfterForkInParent();
33 void PartitionAllocMallocHookOnAfterForkInChild();
34 
35 }  // namespace partition_alloc
36 
37 namespace allocator_shim {
38 
39 namespace {
40 
41 // malloc_introspection_t's callback functions for our own zone
42 
MallocIntrospectionEnumerator(task_t task,void *,unsigned type_mask,vm_address_t zone_address,memory_reader_t reader,vm_range_recorder_t recorder)43 kern_return_t MallocIntrospectionEnumerator(task_t task,
44                                             void*,
45                                             unsigned type_mask,
46                                             vm_address_t zone_address,
47                                             memory_reader_t reader,
48                                             vm_range_recorder_t recorder) {
49   // Should enumerate all memory regions allocated by this allocator, but not
50   // implemented just because of no use case for now.
51   return KERN_FAILURE;
52 }
53 
MallocIntrospectionGoodSize(malloc_zone_t * zone,size_t size)54 size_t MallocIntrospectionGoodSize(malloc_zone_t* zone, size_t size) {
55   return partition_alloc::internal::base::bits::AlignUp(
56       size, partition_alloc::internal::kAlignment);
57 }
58 
MallocIntrospectionCheck(malloc_zone_t * zone)59 boolean_t MallocIntrospectionCheck(malloc_zone_t* zone) {
60   // Should check the consistency of the allocator implementing this malloc
61   // zone, but not implemented just because of no use case for now.
62   return true;
63 }
64 
MallocIntrospectionPrint(malloc_zone_t * zone,boolean_t verbose)65 void MallocIntrospectionPrint(malloc_zone_t* zone, boolean_t verbose) {
66   // Should print the current states of the zone for debugging / investigation
67   // purpose, but not implemented just because of no use case for now.
68 }
69 
MallocIntrospectionLog(malloc_zone_t * zone,void * address)70 void MallocIntrospectionLog(malloc_zone_t* zone, void* address) {
71   // Should enable logging of the activities on the given `address`, but not
72   // implemented just because of no use case for now.
73 }
74 
MallocIntrospectionForceLock(malloc_zone_t * zone)75 void MallocIntrospectionForceLock(malloc_zone_t* zone) {
76   // Called before fork(2) to acquire the lock.
77   partition_alloc::PartitionAllocMallocHookOnBeforeForkInParent();
78 }
79 
MallocIntrospectionForceUnlock(malloc_zone_t * zone)80 void MallocIntrospectionForceUnlock(malloc_zone_t* zone) {
81   // Called in the parent process after fork(2) to release the lock.
82   partition_alloc::PartitionAllocMallocHookOnAfterForkInParent();
83 }
84 
MallocIntrospectionStatistics(malloc_zone_t * zone,malloc_statistics_t * stats)85 void MallocIntrospectionStatistics(malloc_zone_t* zone,
86                                    malloc_statistics_t* stats) {
87   // Should report the memory usage correctly, but not implemented just because
88   // of no use case for now.
89   stats->blocks_in_use = 0;
90   stats->size_in_use = 0;
91   stats->max_size_in_use = 0;  // High water mark of touched memory
92   stats->size_allocated = 0;   // Reserved in memory
93 }
94 
MallocIntrospectionZoneLocked(malloc_zone_t * zone)95 boolean_t MallocIntrospectionZoneLocked(malloc_zone_t* zone) {
96   // Should return true if the underlying PartitionRoot is locked, but not
97   // implemented just because this function seems not used effectively.
98   return false;
99 }
100 
MallocIntrospectionEnableDischargeChecking(malloc_zone_t * zone)101 boolean_t MallocIntrospectionEnableDischargeChecking(malloc_zone_t* zone) {
102   // 'discharge' is not supported.
103   return false;
104 }
105 
MallocIntrospectionDisableDischargeChecking(malloc_zone_t * zone)106 void MallocIntrospectionDisableDischargeChecking(malloc_zone_t* zone) {
107   // 'discharge' is not supported.
108 }
109 
MallocIntrospectionDischarge(malloc_zone_t * zone,void * memory)110 void MallocIntrospectionDischarge(malloc_zone_t* zone, void* memory) {
111   // 'discharge' is not supported.
112 }
113 
114 void MallocIntrospectionEnumerateDischargedPointers(
115     malloc_zone_t* zone,
116     void (^report_discharged)(void* memory, void* info)) {
117   // 'discharge' is not supported.
118 }
119 
MallocIntrospectionReinitLock(malloc_zone_t * zone)120 void MallocIntrospectionReinitLock(malloc_zone_t* zone) {
121   // Called in a child process after fork(2) to re-initialize the lock.
122   partition_alloc::PartitionAllocMallocHookOnAfterForkInChild();
123 }
124 
MallocIntrospectionPrintTask(task_t task,unsigned level,vm_address_t zone_address,memory_reader_t reader,print_task_printer_t printer)125 void MallocIntrospectionPrintTask(task_t task,
126                                   unsigned level,
127                                   vm_address_t zone_address,
128                                   memory_reader_t reader,
129                                   print_task_printer_t printer) {
130   // Should print the current states of another process's zone for debugging /
131   // investigation purpose, but not implemented just because of no use case
132   // for now.
133 }
134 
MallocIntrospectionTaskStatistics(task_t task,vm_address_t zone_address,memory_reader_t reader,malloc_statistics_t * stats)135 void MallocIntrospectionTaskStatistics(task_t task,
136                                        vm_address_t zone_address,
137                                        memory_reader_t reader,
138                                        malloc_statistics_t* stats) {
139   // Should report the memory usage in another process's zone, but not
140   // implemented just because of no use case for now.
141   stats->blocks_in_use = 0;
142   stats->size_in_use = 0;
143   stats->max_size_in_use = 0;  // High water mark of touched memory
144   stats->size_allocated = 0;   // Reserved in memory
145 }
146 
147 // malloc_zone_t's callback functions for our own zone
148 
MallocZoneSize(malloc_zone_t * zone,const void * ptr)149 size_t MallocZoneSize(malloc_zone_t* zone, const void* ptr) {
150   return ShimGetSizeEstimate(ptr, nullptr);
151 }
152 
MallocZoneMalloc(malloc_zone_t * zone,size_t size)153 void* MallocZoneMalloc(malloc_zone_t* zone, size_t size) {
154   return ShimMalloc(size, nullptr);
155 }
156 
MallocZoneCalloc(malloc_zone_t * zone,size_t n,size_t size)157 void* MallocZoneCalloc(malloc_zone_t* zone, size_t n, size_t size) {
158   return ShimCalloc(n, size, nullptr);
159 }
160 
MallocZoneValloc(malloc_zone_t * zone,size_t size)161 void* MallocZoneValloc(malloc_zone_t* zone, size_t size) {
162   return ShimValloc(size, nullptr);
163 }
164 
MallocZoneFree(malloc_zone_t * zone,void * ptr)165 void MallocZoneFree(malloc_zone_t* zone, void* ptr) {
166   return ShimFree(ptr, nullptr);
167 }
168 
MallocZoneRealloc(malloc_zone_t * zone,void * ptr,size_t size)169 void* MallocZoneRealloc(malloc_zone_t* zone, void* ptr, size_t size) {
170   return ShimRealloc(ptr, size, nullptr);
171 }
172 
MallocZoneDestroy(malloc_zone_t * zone)173 void MallocZoneDestroy(malloc_zone_t* zone) {
174   // No support to destroy the zone for now.
175 }
176 
MallocZoneMemalign(malloc_zone_t * zone,size_t alignment,size_t size)177 void* MallocZoneMemalign(malloc_zone_t* zone, size_t alignment, size_t size) {
178   return ShimMemalign(alignment, size, nullptr);
179 }
180 
MallocZoneFreeDefiniteSize(malloc_zone_t * zone,void * ptr,size_t size)181 void MallocZoneFreeDefiniteSize(malloc_zone_t* zone, void* ptr, size_t size) {
182   return ShimFreeDefiniteSize(ptr, size, nullptr);
183 }
184 
MallocZoneBatchMalloc(malloc_zone_t * zone,size_t size,void ** results,unsigned num_requested)185 unsigned MallocZoneBatchMalloc(malloc_zone_t* zone,
186                                size_t size,
187                                void** results,
188                                unsigned num_requested) {
189   return ShimBatchMalloc(size, results, num_requested, nullptr);
190 }
191 
MallocZoneBatchFree(malloc_zone_t * zone,void ** to_be_freed,unsigned num)192 void MallocZoneBatchFree(malloc_zone_t* zone,
193                          void** to_be_freed,
194                          unsigned num) {
195   return ShimBatchFree(to_be_freed, num, nullptr);
196 }
197 
MallocZoneClaimedAddress(malloc_zone_t * zone,void * ptr)198 boolean_t MallocZoneClaimedAddress(malloc_zone_t* zone, void* ptr) {
199   return static_cast<boolean_t>(ShimClaimedAddress(ptr, nullptr));
200 }
201 
202 #if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
MallocZoneTryFreeDefault(malloc_zone_t * zone,void * ptr)203 void MallocZoneTryFreeDefault(malloc_zone_t* zone, void* ptr) {
204   return ShimTryFreeDefault(ptr, nullptr);
205 }
206 #endif
207 
208 malloc_introspection_t g_mac_malloc_introspection{};
209 malloc_zone_t g_mac_malloc_zone{};
210 
GetDefaultMallocZone()211 malloc_zone_t* GetDefaultMallocZone() {
212   // malloc_default_zone() does not return... the default zone, but the initial
213   // one. The default one is the first element of the default zone array.
214   unsigned int zone_count = 0;
215   vm_address_t* zones = nullptr;
216   kern_return_t result =
217       malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
218   MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
219   return reinterpret_cast<malloc_zone_t*>(zones[0]);
220 }
221 
IsAlreadyRegistered()222 bool IsAlreadyRegistered() {
223   // HACK: This should really only be called once, but it is not.
224   //
225   // This function is a static constructor of its binary. If it is included in a
226   // dynamic library, then the same process may end up executing this code
227   // multiple times, once per library. As a consequence, each new library will
228   // add its own allocator as the default zone. Aside from splitting the heap
229   // further, the main issue arises if/when the last library to be loaded
230   // (dlopen()-ed) gets dlclose()-ed.
231   //
232   // See crbug.com/1271139 for details.
233   //
234   // In this case, subsequent free() will be routed by libmalloc to the deleted
235   // zone (since its code has been unloaded from memory), and crash inside
236   // libsystem's free(). This in practice happens as soon as dlclose() is
237   // called, inside the dynamic linker (dyld).
238   //
239   // Since we are talking about different library, and issues inside the dynamic
240   // linker, we cannot use a global static variable (which would be
241   // per-library), or anything from pthread.
242   //
243   // The solution used here is to check whether the current default zone is
244   // already ours, in which case we are not the first dynamic library here, and
245   // should do nothing. This is racy, and hacky.
246   vm_address_t* zones = nullptr;
247   unsigned int zone_count = 0;
248   // *Not* using malloc_default_zone(), as it seems to be hardcoded to return
249   // something else than the default zone. See the difference between
250   // malloc_default_zone() and inline_malloc_default_zone() in Apple's malloc.c
251   // (in libmalloc).
252   kern_return_t result =
253       malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
254   MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
255   // Checking all the zones, in case someone registered their own zone on top of
256   // our own.
257   for (unsigned int i = 0; i < zone_count; i++) {
258     malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
259 
260     // strcmp() and not a pointer comparison, as the zone was registered from
261     // another library, the pointers don't match.
262     if (zone->zone_name &&
263         (strcmp(zone->zone_name, partition_alloc::kPartitionAllocZoneName) ==
264          0)) {
265       // This zone is provided by PartitionAlloc, so this function has been
266       // called from another library (or the main executable), nothing to do.
267       //
268       // This should be a crash, ideally, but callers do it, so only warn, for
269       // now.
270       RAW_LOG(ERROR,
271               "Trying to load the allocator multiple times. This is *not* "
272               "supported.");
273       return true;
274     }
275   }
276 
277   return false;
278 }
279 
InitializeZone()280 void InitializeZone() {
281   g_mac_malloc_introspection.enumerator = MallocIntrospectionEnumerator;
282   g_mac_malloc_introspection.good_size = MallocIntrospectionGoodSize;
283   g_mac_malloc_introspection.check = MallocIntrospectionCheck;
284   g_mac_malloc_introspection.print = MallocIntrospectionPrint;
285   g_mac_malloc_introspection.log = MallocIntrospectionLog;
286   g_mac_malloc_introspection.force_lock = MallocIntrospectionForceLock;
287   g_mac_malloc_introspection.force_unlock = MallocIntrospectionForceUnlock;
288   g_mac_malloc_introspection.statistics = MallocIntrospectionStatistics;
289   g_mac_malloc_introspection.zone_locked = MallocIntrospectionZoneLocked;
290   g_mac_malloc_introspection.enable_discharge_checking =
291       MallocIntrospectionEnableDischargeChecking;
292   g_mac_malloc_introspection.disable_discharge_checking =
293       MallocIntrospectionDisableDischargeChecking;
294   g_mac_malloc_introspection.discharge = MallocIntrospectionDischarge;
295   g_mac_malloc_introspection.enumerate_discharged_pointers =
296       MallocIntrospectionEnumerateDischargedPointers;
297   g_mac_malloc_introspection.reinit_lock = MallocIntrospectionReinitLock;
298   g_mac_malloc_introspection.print_task = MallocIntrospectionPrintTask;
299   g_mac_malloc_introspection.task_statistics =
300       MallocIntrospectionTaskStatistics;
301   // `version` member indicates which APIs are supported in this zone.
302   //   version >= 5: memalign is supported
303   //   version >= 6: free_definite_size is supported
304   //   version >= 7: introspect's discharge family is supported
305   //   version >= 8: pressure_relief is supported
306   //   version >= 9: introspect.reinit_lock is supported
307   //   version >= 10: claimed_address is supported
308   //   version >= 11: introspect.print_task is supported
309   //   version >= 12: introspect.task_statistics is supported
310   //   version >= 13: try_free_default is supported
311   g_mac_malloc_zone.version = partition_alloc::kZoneVersion;
312   g_mac_malloc_zone.zone_name = partition_alloc::kPartitionAllocZoneName;
313   g_mac_malloc_zone.introspect = &g_mac_malloc_introspection;
314   g_mac_malloc_zone.size = MallocZoneSize;
315   g_mac_malloc_zone.malloc = MallocZoneMalloc;
316   g_mac_malloc_zone.calloc = MallocZoneCalloc;
317   g_mac_malloc_zone.valloc = MallocZoneValloc;
318   g_mac_malloc_zone.free = MallocZoneFree;
319   g_mac_malloc_zone.realloc = MallocZoneRealloc;
320   g_mac_malloc_zone.destroy = MallocZoneDestroy;
321   g_mac_malloc_zone.batch_malloc = MallocZoneBatchMalloc;
322   g_mac_malloc_zone.batch_free = MallocZoneBatchFree;
323   g_mac_malloc_zone.memalign = MallocZoneMemalign;
324   g_mac_malloc_zone.free_definite_size = MallocZoneFreeDefiniteSize;
325   g_mac_malloc_zone.pressure_relief = nullptr;
326   g_mac_malloc_zone.claimed_address = MallocZoneClaimedAddress;
327 #if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
328   g_mac_malloc_zone.try_free_default = MallocZoneTryFreeDefault;
329 #endif
330 }
331 
332 namespace {
333 static std::atomic<bool> g_initialization_is_done;
334 }
335 
336 // Replaces the default malloc zone with our own malloc zone backed by
337 // PartitionAlloc.  Since we'd like to make as much code as possible to use our
338 // own memory allocator (and reduce bugs caused by mixed use of the system
339 // allocator and our own allocator), run the following function
340 // `InitializeDefaultAllocatorPartitionRoot` with the highest priority.
341 //
342 // Note that, despite of the highest priority of the initialization order,
343 // [NSThread init] runs before InitializeDefaultMallocZoneWithPartitionAlloc
344 // unfortunately and allocates memory with the system allocator.  Plus, the
345 // allocated memory will be deallocated with the default zone's `free` at that
346 // moment without using a zone dispatcher.  Hence, our own `free` function
347 // receives an address allocated by the system allocator.
348 __attribute__((constructor(0))) void
InitializeDefaultMallocZoneWithPartitionAlloc()349 InitializeDefaultMallocZoneWithPartitionAlloc() {
350   if (IsAlreadyRegistered())
351     return;
352 
353   // Instantiate the existing regular and purgeable zones in order to make the
354   // existing purgeable zone use the existing regular zone since PartitionAlloc
355   // doesn't support a purgeable zone.
356   std::ignore = malloc_default_zone();
357   std::ignore = malloc_default_purgeable_zone();
358 
359   // Initialize the default allocator's PartitionRoot with the existing zone.
360   InitializeDefaultAllocatorPartitionRoot();
361 
362   // Create our own malloc zone.
363   InitializeZone();
364 
365   malloc_zone_t* system_default_zone = GetDefaultMallocZone();
366   if (strcmp(system_default_zone->zone_name,
367              partition_alloc::kDelegatingZoneName) == 0) {
368     // The first zone is our zone, we can unregister it, replacing it with the
369     // new one. This relies on a precise zone setup, done in
370     // |EarlyMallocZoneRegistration()|.
371     malloc_zone_register(&g_mac_malloc_zone);
372     malloc_zone_unregister(system_default_zone);
373     g_initialization_is_done.store(true, std::memory_order_release);
374     return;
375   }
376 
377   // Not in the path where the zone was registered early. This is either racy,
378   // or fine if the current process is not hosting multiple threads.
379   //
380   // This path is fine for e.g. most unit tests.
381   //
382   // Make our own zone the default zone.
383   //
384   // Put our own zone at the last position, so that it promotes to the default
385   // zone.  The implementation logic of malloc_zone_unregister is:
386   //   zone_table.swap(unregistered_zone, last_zone);
387   //   zone_table.shrink_size_by_1();
388   malloc_zone_register(&g_mac_malloc_zone);
389   malloc_zone_unregister(system_default_zone);
390   // Between malloc_zone_unregister(system_default_zone) (above) and
391   // malloc_zone_register(system_default_zone) (below), i.e. while absence of
392   // system_default_zone, it's possible that another thread calls free(ptr) and
393   // "no zone found" error is hit, crashing the process.
394   malloc_zone_register(system_default_zone);
395 
396   // Confirm that our own zone is now the default zone.
397   CHECK_EQ(GetDefaultMallocZone(), &g_mac_malloc_zone);
398   g_initialization_is_done.store(true, std::memory_order_release);
399 }
400 
401 }  // namespace
402 
IsDefaultAllocatorPartitionRootInitialized()403 bool IsDefaultAllocatorPartitionRootInitialized() {
404   // Even though zone registration is not thread-safe, let's not make it worse,
405   // and use acquire/release ordering.
406   return g_initialization_is_done.load(std::memory_order_acquire);
407 }
408 
409 }  // namespace allocator_shim
410 
411 #endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
412