1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifdef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_APPLE_DEFAULT_ZONE_H_
6 #error This header is meant to be included only once by allocator_shim.cc
7 #endif
8
9 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_APPLE_DEFAULT_ZONE_H_
10 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_APPLE_DEFAULT_ZONE_H_
11
12 #if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
13 #error This header must be included iff PartitionAlloc-Everywhere is enabled.
14 #endif
15
16 #include <string.h>
17
18 #include <atomic>
19 #include <tuple>
20
21 #include "partition_alloc/partition_alloc_base/apple/mach_logging.h"
22 #include "partition_alloc/partition_alloc_base/bits.h"
23 #include "partition_alloc/partition_alloc_base/logging.h"
24 #include "partition_alloc/partition_alloc_buildflags.h"
25 #include "partition_alloc/partition_alloc_check.h"
26 #include "partition_alloc/partition_alloc_constants.h"
27 #include "partition_alloc/shim/early_zone_registration_constants.h"
28
29 namespace partition_alloc {
30
31 // Defined in
32 // partition_alloc/partition_root.cc
33 void PartitionAllocMallocHookOnBeforeForkInParent();
34 void PartitionAllocMallocHookOnAfterForkInParent();
35 void PartitionAllocMallocHookOnAfterForkInChild();
36
37 } // namespace partition_alloc
38
39 namespace allocator_shim {
40
41 namespace {
42
43 // malloc_introspection_t's callback functions for our own zone
44
MallocIntrospectionEnumerator(task_t task,void *,unsigned type_mask,vm_address_t zone_address,memory_reader_t reader,vm_range_recorder_t recorder)45 kern_return_t MallocIntrospectionEnumerator(task_t task,
46 void*,
47 unsigned type_mask,
48 vm_address_t zone_address,
49 memory_reader_t reader,
50 vm_range_recorder_t recorder) {
51 // Should enumerate all memory regions allocated by this allocator, but not
52 // implemented just because of no use case for now.
53 return KERN_FAILURE;
54 }
55
MallocIntrospectionGoodSize(malloc_zone_t * zone,size_t size)56 size_t MallocIntrospectionGoodSize(malloc_zone_t* zone, size_t size) {
57 return ShimGoodSize(size, nullptr);
58 }
59
MallocIntrospectionCheck(malloc_zone_t * zone)60 boolean_t MallocIntrospectionCheck(malloc_zone_t* zone) {
61 // Should check the consistency of the allocator implementing this malloc
62 // zone, but not implemented just because of no use case for now.
63 return true;
64 }
65
MallocIntrospectionPrint(malloc_zone_t * zone,boolean_t verbose)66 void MallocIntrospectionPrint(malloc_zone_t* zone, boolean_t verbose) {
67 // Should print the current states of the zone for debugging / investigation
68 // purpose, but not implemented just because of no use case for now.
69 }
70
MallocIntrospectionLog(malloc_zone_t * zone,void * address)71 void MallocIntrospectionLog(malloc_zone_t* zone, void* address) {
72 // Should enable logging of the activities on the given `address`, but not
73 // implemented just because of no use case for now.
74 }
75
MallocIntrospectionForceLock(malloc_zone_t * zone)76 void MallocIntrospectionForceLock(malloc_zone_t* zone) {
77 // Called before fork(2) to acquire the lock.
78 partition_alloc::PartitionAllocMallocHookOnBeforeForkInParent();
79 }
80
MallocIntrospectionForceUnlock(malloc_zone_t * zone)81 void MallocIntrospectionForceUnlock(malloc_zone_t* zone) {
82 // Called in the parent process after fork(2) to release the lock.
83 partition_alloc::PartitionAllocMallocHookOnAfterForkInParent();
84 }
85
MallocIntrospectionStatistics(malloc_zone_t * zone,malloc_statistics_t * stats)86 void MallocIntrospectionStatistics(malloc_zone_t* zone,
87 malloc_statistics_t* stats) {
88 // Should report the memory usage correctly, but not implemented just because
89 // of no use case for now.
90 stats->blocks_in_use = 0;
91 stats->size_in_use = 0;
92 stats->max_size_in_use = 0; // High water mark of touched memory
93 stats->size_allocated = 0; // Reserved in memory
94 }
95
MallocIntrospectionZoneLocked(malloc_zone_t * zone)96 boolean_t MallocIntrospectionZoneLocked(malloc_zone_t* zone) {
97 // Should return true if the underlying PartitionRoot is locked, but not
98 // implemented just because this function seems not used effectively.
99 return false;
100 }
101
MallocIntrospectionEnableDischargeChecking(malloc_zone_t * zone)102 boolean_t MallocIntrospectionEnableDischargeChecking(malloc_zone_t* zone) {
103 // 'discharge' is not supported.
104 return false;
105 }
106
MallocIntrospectionDisableDischargeChecking(malloc_zone_t * zone)107 void MallocIntrospectionDisableDischargeChecking(malloc_zone_t* zone) {
108 // 'discharge' is not supported.
109 }
110
MallocIntrospectionDischarge(malloc_zone_t * zone,void * memory)111 void MallocIntrospectionDischarge(malloc_zone_t* zone, void* memory) {
112 // 'discharge' is not supported.
113 }
114
115 void MallocIntrospectionEnumerateDischargedPointers(
116 malloc_zone_t* zone,
117 void (^report_discharged)(void* memory, void* info)) {
118 // 'discharge' is not supported.
119 }
120
MallocIntrospectionReinitLock(malloc_zone_t * zone)121 void MallocIntrospectionReinitLock(malloc_zone_t* zone) {
122 // Called in a child process after fork(2) to re-initialize the lock.
123 partition_alloc::PartitionAllocMallocHookOnAfterForkInChild();
124 }
125
MallocIntrospectionPrintTask(task_t task,unsigned level,vm_address_t zone_address,memory_reader_t reader,print_task_printer_t printer)126 void MallocIntrospectionPrintTask(task_t task,
127 unsigned level,
128 vm_address_t zone_address,
129 memory_reader_t reader,
130 print_task_printer_t printer) {
131 // Should print the current states of another process's zone for debugging /
132 // investigation purpose, but not implemented just because of no use case
133 // for now.
134 }
135
MallocIntrospectionTaskStatistics(task_t task,vm_address_t zone_address,memory_reader_t reader,malloc_statistics_t * stats)136 void MallocIntrospectionTaskStatistics(task_t task,
137 vm_address_t zone_address,
138 memory_reader_t reader,
139 malloc_statistics_t* stats) {
140 // Should report the memory usage in another process's zone, but not
141 // implemented just because of no use case for now.
142 stats->blocks_in_use = 0;
143 stats->size_in_use = 0;
144 stats->max_size_in_use = 0; // High water mark of touched memory
145 stats->size_allocated = 0; // Reserved in memory
146 }
147
148 // malloc_zone_t's callback functions for our own zone
149
MallocZoneSize(malloc_zone_t * zone,const void * ptr)150 size_t MallocZoneSize(malloc_zone_t* zone, const void* ptr) {
151 return ShimGetSizeEstimate(ptr, nullptr);
152 }
153
MallocZoneMalloc(malloc_zone_t * zone,size_t size)154 void* MallocZoneMalloc(malloc_zone_t* zone, size_t size) {
155 return ShimMalloc(size, nullptr);
156 }
157
MallocZoneCalloc(malloc_zone_t * zone,size_t n,size_t size)158 void* MallocZoneCalloc(malloc_zone_t* zone, size_t n, size_t size) {
159 return ShimCalloc(n, size, nullptr);
160 }
161
MallocZoneValloc(malloc_zone_t * zone,size_t size)162 void* MallocZoneValloc(malloc_zone_t* zone, size_t size) {
163 return ShimValloc(size, nullptr);
164 }
165
MallocZoneFree(malloc_zone_t * zone,void * ptr)166 void MallocZoneFree(malloc_zone_t* zone, void* ptr) {
167 return ShimFree(ptr, nullptr);
168 }
169
MallocZoneRealloc(malloc_zone_t * zone,void * ptr,size_t size)170 void* MallocZoneRealloc(malloc_zone_t* zone, void* ptr, size_t size) {
171 return ShimRealloc(ptr, size, nullptr);
172 }
173
MallocZoneDestroy(malloc_zone_t * zone)174 void MallocZoneDestroy(malloc_zone_t* zone) {
175 // No support to destroy the zone for now.
176 }
177
MallocZoneMemalign(malloc_zone_t * zone,size_t alignment,size_t size)178 void* MallocZoneMemalign(malloc_zone_t* zone, size_t alignment, size_t size) {
179 return ShimMemalign(alignment, size, nullptr);
180 }
181
MallocZoneFreeDefiniteSize(malloc_zone_t * zone,void * ptr,size_t size)182 void MallocZoneFreeDefiniteSize(malloc_zone_t* zone, void* ptr, size_t size) {
183 return ShimFreeDefiniteSize(ptr, size, nullptr);
184 }
185
MallocZoneBatchMalloc(malloc_zone_t * zone,size_t size,void ** results,unsigned num_requested)186 unsigned MallocZoneBatchMalloc(malloc_zone_t* zone,
187 size_t size,
188 void** results,
189 unsigned num_requested) {
190 return ShimBatchMalloc(size, results, num_requested, nullptr);
191 }
192
MallocZoneBatchFree(malloc_zone_t * zone,void ** to_be_freed,unsigned num)193 void MallocZoneBatchFree(malloc_zone_t* zone,
194 void** to_be_freed,
195 unsigned num) {
196 return ShimBatchFree(to_be_freed, num, nullptr);
197 }
198
MallocZoneClaimedAddress(malloc_zone_t * zone,void * ptr)199 boolean_t MallocZoneClaimedAddress(malloc_zone_t* zone, void* ptr) {
200 return static_cast<boolean_t>(ShimClaimedAddress(ptr, nullptr));
201 }
202
203 #if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
MallocZoneTryFreeDefault(malloc_zone_t * zone,void * ptr)204 void MallocZoneTryFreeDefault(malloc_zone_t* zone, void* ptr) {
205 return ShimTryFreeDefault(ptr, nullptr);
206 }
207 #endif
208
209 malloc_introspection_t g_mac_malloc_introspection{};
210 malloc_zone_t g_mac_malloc_zone{};
211
GetDefaultMallocZone()212 malloc_zone_t* GetDefaultMallocZone() {
213 // malloc_default_zone() does not return... the default zone, but the initial
214 // one. The default one is the first element of the default zone array.
215 unsigned int zone_count = 0;
216 vm_address_t* zones = nullptr;
217 kern_return_t result =
218 malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
219 PA_MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
220 return reinterpret_cast<malloc_zone_t*>(zones[0]);
221 }
222
IsAlreadyRegistered()223 bool IsAlreadyRegistered() {
224 // HACK: This should really only be called once, but it is not.
225 //
226 // This function is a static constructor of its binary. If it is included in a
227 // dynamic library, then the same process may end up executing this code
228 // multiple times, once per library. As a consequence, each new library will
229 // add its own allocator as the default zone. Aside from splitting the heap
230 // further, the main issue arises if/when the last library to be loaded
231 // (dlopen()-ed) gets dlclose()-ed.
232 //
233 // See crbug.com/1271139 for details.
234 //
235 // In this case, subsequent free() will be routed by libmalloc to the deleted
236 // zone (since its code has been unloaded from memory), and crash inside
237 // libsystem's free(). This in practice happens as soon as dlclose() is
238 // called, inside the dynamic linker (dyld).
239 //
240 // Since we are talking about different library, and issues inside the dynamic
241 // linker, we cannot use a global static variable (which would be
242 // per-library), or anything from pthread.
243 //
244 // The solution used here is to check whether the current default zone is
245 // already ours, in which case we are not the first dynamic library here, and
246 // should do nothing. This is racy, and hacky.
247 vm_address_t* zones = nullptr;
248 unsigned int zone_count = 0;
249 // *Not* using malloc_default_zone(), as it seems to be hardcoded to return
250 // something else than the default zone. See the difference between
251 // malloc_default_zone() and inline_malloc_default_zone() in Apple's malloc.c
252 // (in libmalloc).
253 kern_return_t result =
254 malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
255 PA_MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
256 // Checking all the zones, in case someone registered their own zone on top of
257 // our own.
258 for (unsigned int i = 0; i < zone_count; i++) {
259 malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
260
261 // strcmp() and not a pointer comparison, as the zone was registered from
262 // another library, the pointers don't match.
263 if (zone->zone_name &&
264 (strcmp(zone->zone_name, kPartitionAllocZoneName) == 0)) {
265 // This zone is provided by PartitionAlloc, so this function has been
266 // called from another library (or the main executable), nothing to do.
267 //
268 // This should be a crash, ideally, but callers do it, so only warn, for
269 // now.
270 PA_RAW_LOG(ERROR,
271 "Trying to load the allocator multiple times. This is *not* "
272 "supported.");
273 return true;
274 }
275 }
276
277 return false;
278 }
279
InitializeZone()280 void InitializeZone() {
281 g_mac_malloc_introspection.enumerator = MallocIntrospectionEnumerator;
282 g_mac_malloc_introspection.good_size = MallocIntrospectionGoodSize;
283 g_mac_malloc_introspection.check = MallocIntrospectionCheck;
284 g_mac_malloc_introspection.print = MallocIntrospectionPrint;
285 g_mac_malloc_introspection.log = MallocIntrospectionLog;
286 g_mac_malloc_introspection.force_lock = MallocIntrospectionForceLock;
287 g_mac_malloc_introspection.force_unlock = MallocIntrospectionForceUnlock;
288 g_mac_malloc_introspection.statistics = MallocIntrospectionStatistics;
289 g_mac_malloc_introspection.zone_locked = MallocIntrospectionZoneLocked;
290 g_mac_malloc_introspection.enable_discharge_checking =
291 MallocIntrospectionEnableDischargeChecking;
292 g_mac_malloc_introspection.disable_discharge_checking =
293 MallocIntrospectionDisableDischargeChecking;
294 g_mac_malloc_introspection.discharge = MallocIntrospectionDischarge;
295 g_mac_malloc_introspection.enumerate_discharged_pointers =
296 MallocIntrospectionEnumerateDischargedPointers;
297 g_mac_malloc_introspection.reinit_lock = MallocIntrospectionReinitLock;
298 g_mac_malloc_introspection.print_task = MallocIntrospectionPrintTask;
299 g_mac_malloc_introspection.task_statistics =
300 MallocIntrospectionTaskStatistics;
301 // `version` member indicates which APIs are supported in this zone.
302 // version >= 5: memalign is supported
303 // version >= 6: free_definite_size is supported
304 // version >= 7: introspect's discharge family is supported
305 // version >= 8: pressure_relief is supported
306 // version >= 9: introspect.reinit_lock is supported
307 // version >= 10: claimed_address is supported
308 // version >= 11: introspect.print_task is supported
309 // version >= 12: introspect.task_statistics is supported
310 // version >= 13: try_free_default is supported
311 g_mac_malloc_zone.version = kZoneVersion;
312 g_mac_malloc_zone.zone_name = kPartitionAllocZoneName;
313 g_mac_malloc_zone.introspect = &g_mac_malloc_introspection;
314 g_mac_malloc_zone.size = MallocZoneSize;
315 g_mac_malloc_zone.malloc = MallocZoneMalloc;
316 g_mac_malloc_zone.calloc = MallocZoneCalloc;
317 g_mac_malloc_zone.valloc = MallocZoneValloc;
318 g_mac_malloc_zone.free = MallocZoneFree;
319 g_mac_malloc_zone.realloc = MallocZoneRealloc;
320 g_mac_malloc_zone.destroy = MallocZoneDestroy;
321 g_mac_malloc_zone.batch_malloc = MallocZoneBatchMalloc;
322 g_mac_malloc_zone.batch_free = MallocZoneBatchFree;
323 g_mac_malloc_zone.memalign = MallocZoneMemalign;
324 g_mac_malloc_zone.free_definite_size = MallocZoneFreeDefiniteSize;
325 g_mac_malloc_zone.pressure_relief = nullptr;
326 g_mac_malloc_zone.claimed_address = MallocZoneClaimedAddress;
327 #if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
328 g_mac_malloc_zone.try_free_default = MallocZoneTryFreeDefault;
329 #endif
330 }
331
332 namespace {
333 static std::atomic<bool> g_initialization_is_done;
334 }
335
336 // Replaces the default malloc zone with our own malloc zone backed by
337 // PartitionAlloc. Since we'd like to make as much code as possible to use our
338 // own memory allocator (and reduce bugs caused by mixed use of the system
339 // allocator and our own allocator), run the following function
340 // `InitializeDefaultAllocatorPartitionRoot` with the highest priority.
341 //
342 // Note that, despite of the highest priority of the initialization order,
343 // [NSThread init] runs before InitializeDefaultMallocZoneWithPartitionAlloc
344 // unfortunately and allocates memory with the system allocator. Plus, the
345 // allocated memory will be deallocated with the default zone's `free` at that
346 // moment without using a zone dispatcher. Hence, our own `free` function
347 // receives an address allocated by the system allocator.
348 __attribute__((constructor(0))) void
InitializeDefaultMallocZoneWithPartitionAlloc()349 InitializeDefaultMallocZoneWithPartitionAlloc() {
350 if (IsAlreadyRegistered()) {
351 return;
352 }
353
354 // Instantiate the existing regular and purgeable zones in order to make the
355 // existing purgeable zone use the existing regular zone since PartitionAlloc
356 // doesn't support a purgeable zone.
357 std::ignore = malloc_default_zone();
358 std::ignore = malloc_default_purgeable_zone();
359
360 // Initialize the default allocator's PartitionRoot with the existing zone.
361 InitializeDefaultAllocatorPartitionRoot();
362
363 // Create our own malloc zone.
364 InitializeZone();
365
366 malloc_zone_t* system_default_zone = GetDefaultMallocZone();
367 if (strcmp(system_default_zone->zone_name, kDelegatingZoneName) == 0) {
368 // The first zone is our zone, we can unregister it, replacing it with the
369 // new one. This relies on a precise zone setup, done in
370 // |EarlyMallocZoneRegistration()|.
371 malloc_zone_register(&g_mac_malloc_zone);
372 malloc_zone_unregister(system_default_zone);
373 g_initialization_is_done.store(true, std::memory_order_release);
374 return;
375 }
376
377 // Not in the path where the zone was registered early. This is either racy,
378 // or fine if the current process is not hosting multiple threads.
379 //
380 // This path is fine for e.g. most unit tests.
381 //
382 // Make our own zone the default zone.
383 //
384 // Put our own zone at the last position, so that it promotes to the default
385 // zone. The implementation logic of malloc_zone_unregister is:
386 // zone_table.swap(unregistered_zone, last_zone);
387 // zone_table.shrink_size_by_1();
388 malloc_zone_register(&g_mac_malloc_zone);
389 malloc_zone_unregister(system_default_zone);
390 // Between malloc_zone_unregister(system_default_zone) (above) and
391 // malloc_zone_register(system_default_zone) (below), i.e. while absence of
392 // system_default_zone, it's possible that another thread calls free(ptr) and
393 // "no zone found" error is hit, crashing the process.
394 malloc_zone_register(system_default_zone);
395
396 // Confirm that our own zone is now the default zone.
397 PA_CHECK(GetDefaultMallocZone() == &g_mac_malloc_zone);
398 g_initialization_is_done.store(true, std::memory_order_release);
399 }
400
401 } // namespace
402
IsDefaultAllocatorPartitionRootInitialized()403 bool IsDefaultAllocatorPartitionRootInitialized() {
404 // Even though zone registration is not thread-safe, let's not make it worse,
405 // and use acquire/release ordering.
406 return g_initialization_is_done.load(std::memory_order_acquire);
407 }
408
409 } // namespace allocator_shim
410
411 #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_APPLE_DEFAULT_ZONE_H_
412