1 // Copyright 2016 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "partition_alloc/shim/allocator_shim.h"
6
7 #include <errno.h>
8
9 #include <atomic>
10 #include <bit>
11 #include <new>
12
13 #include "build/build_config.h"
14 #include "partition_alloc/partition_alloc_base/bits.h"
15 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
16 #include "partition_alloc/partition_alloc_base/memory/page_size.h"
17 #include "partition_alloc/partition_alloc_base/notreached.h"
18 #include "partition_alloc/partition_alloc_buildflags.h"
19 #include "partition_alloc/partition_alloc_check.h"
20
21 #if !BUILDFLAG(IS_WIN)
22 #include <unistd.h>
23 #else
24 #include "partition_alloc/shim/winheap_stubs_win.h"
25 #endif
26
27 #if BUILDFLAG(IS_APPLE)
28 #include <malloc/malloc.h>
29
30 #include "partition_alloc/partition_alloc_base/apple/mach_logging.h"
31 #include "partition_alloc/shim/allocator_interception_apple.h"
32 #endif
33
34 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
35 #include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
36 #endif
37
38 // No calls to malloc / new in this file. They would would cause re-entrancy of
39 // the shim, which is hard to deal with. Keep this code as simple as possible
40 // and don't use any external C++ object here, not even //base ones. Even if
41 // they are safe to use today, in future they might be refactored.
42
43 namespace {
44
45 std::atomic<const allocator_shim::AllocatorDispatch*> g_chain_head{
46 &allocator_shim::AllocatorDispatch::default_dispatch};
47
48 bool g_call_new_handler_on_malloc_failure = false;
49
GetCachedPageSize()50 PA_ALWAYS_INLINE size_t GetCachedPageSize() {
51 static size_t pagesize = 0;
52 if (!pagesize) {
53 pagesize = partition_alloc::internal::base::GetPageSize();
54 }
55 return pagesize;
56 }
57
58 // Calls the std::new handler thread-safely. Returns true if a new_handler was
59 // set and called, false if no new_handler was set.
CallNewHandler(size_t size)60 bool CallNewHandler(size_t size) {
61 #if BUILDFLAG(IS_WIN)
62 return allocator_shim::WinCallNewHandler(size);
63 #else
64 std::new_handler nh = std::get_new_handler();
65 if (!nh) {
66 return false;
67 }
68 (*nh)();
69 // Assume the new_handler will abort if it fails. Exception are disabled and
70 // we don't support the case of a new_handler throwing std::bad_balloc.
71 return true;
72 #endif
73 }
74
GetChainHead()75 PA_ALWAYS_INLINE const allocator_shim::AllocatorDispatch* GetChainHead() {
76 return g_chain_head.load(std::memory_order_relaxed);
77 }
78
79 } // namespace
80
81 namespace allocator_shim {
82
SetCallNewHandlerOnMallocFailure(bool value)83 void SetCallNewHandlerOnMallocFailure(bool value) {
84 g_call_new_handler_on_malloc_failure = value;
85 }
86
UncheckedAlloc(size_t size)87 void* UncheckedAlloc(size_t size) {
88 const AllocatorDispatch* const chain_head = GetChainHead();
89 return chain_head->alloc_unchecked_function(chain_head, size, nullptr);
90 }
91
UncheckedFree(void * ptr)92 void UncheckedFree(void* ptr) {
93 const AllocatorDispatch* const chain_head = GetChainHead();
94 return chain_head->free_function(chain_head, ptr, nullptr);
95 }
96
InsertAllocatorDispatch(AllocatorDispatch * dispatch)97 void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
98 // Loop in case of (an unlikely) race on setting the list head.
99 size_t kMaxRetries = 7;
100 for (size_t i = 0; i < kMaxRetries; ++i) {
101 const AllocatorDispatch* chain_head = GetChainHead();
102 dispatch->next = chain_head;
103
104 // This function guarantees to be thread-safe w.r.t. concurrent
105 // insertions. It also has to guarantee that all the threads always
106 // see a consistent chain, hence the atomic_thread_fence() below.
107 // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
108 // we don't really want this to be a release-store with a corresponding
109 // acquire-load during malloc().
110 std::atomic_thread_fence(std::memory_order_seq_cst);
111 // Set the chain head to the new dispatch atomically. If we lose the race,
112 // retry.
113 if (g_chain_head.compare_exchange_strong(chain_head, dispatch,
114 std::memory_order_relaxed,
115 std::memory_order_relaxed)) {
116 // Success.
117 return;
118 }
119 }
120
121 PA_CHECK(false); // Too many retries, this shouldn't happen.
122 }
123
RemoveAllocatorDispatchForTesting(AllocatorDispatch * dispatch)124 void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
125 PA_DCHECK(GetChainHead() == dispatch);
126 g_chain_head.store(dispatch->next, std::memory_order_relaxed);
127 }
128
129 #if BUILDFLAG(IS_APPLE)
TryFreeDefaultFallbackToFindZoneAndFree(void * ptr)130 void TryFreeDefaultFallbackToFindZoneAndFree(void* ptr) {
131 unsigned int zone_count = 0;
132 vm_address_t* zones = nullptr;
133 kern_return_t result =
134 malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
135 PA_MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
136
137 // "find_zone_and_free" expected by try_free_default.
138 //
139 // libmalloc's zones call find_registered_zone() in case the default one
140 // doesn't handle the allocation. We can't, so we try to emulate it. See the
141 // implementation in libmalloc/src/malloc.c for details.
142 // https://github.com/apple-oss-distributions/libmalloc/blob/main/src/malloc.c
143 for (unsigned int i = 0; i < zone_count; ++i) {
144 malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
145 if (size_t size = zone->size(zone, ptr)) {
146 if (zone->version >= 6 && zone->free_definite_size) {
147 zone->free_definite_size(zone, ptr, size);
148 } else {
149 zone->free(zone, ptr);
150 }
151 return;
152 }
153 }
154
155 // There must be an owner zone.
156 PA_CHECK(false);
157 }
158 #endif // BUILDFLAG(IS_APPLE)
159
160 } // namespace allocator_shim
161
162 // The Shim* functions below are the entry-points into the shim-layer and
163 // are supposed to be invoked by the allocator_shim_override_*
164 // headers to route the malloc / new symbols through the shim layer.
165 // They are defined as ALWAYS_INLINE in order to remove a level of indirection
166 // between the system-defined entry points and the shim implementations.
167 extern "C" {
168
169 // The general pattern for allocations is:
170 // - Try to allocate, if succeded return the pointer.
171 // - If the allocation failed:
172 // - Call the std::new_handler if it was a C++ allocation.
173 // - Call the std::new_handler if it was a malloc() (or calloc() or similar)
174 // AND SetCallNewHandlerOnMallocFailure(true).
175 // - If the std::new_handler is NOT set just return nullptr.
176 // - If the std::new_handler is set:
177 // - Assume it will abort() if it fails (very likely the new_handler will
178 // just suicide printing a message).
179 // - Assume it did succeed if it returns, in which case reattempt the alloc.
180
ShimCppNew(size_t size)181 PA_ALWAYS_INLINE void* ShimCppNew(size_t size) {
182 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
183 void* ptr;
184 do {
185 void* context = nullptr;
186 #if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
187 context = malloc_default_zone();
188 #endif
189 ptr = chain_head->alloc_function(chain_head, size, context);
190 } while (!ptr && CallNewHandler(size));
191 return ptr;
192 }
193
ShimCppNewNoThrow(size_t size)194 PA_ALWAYS_INLINE void* ShimCppNewNoThrow(size_t size) {
195 void* context = nullptr;
196 #if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
197 context = malloc_default_zone();
198 #endif
199 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
200 return chain_head->alloc_unchecked_function(chain_head, size, context);
201 }
202
ShimCppAlignedNew(size_t size,size_t alignment)203 PA_ALWAYS_INLINE void* ShimCppAlignedNew(size_t size, size_t alignment) {
204 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
205 void* ptr;
206 do {
207 void* context = nullptr;
208 #if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
209 context = malloc_default_zone();
210 #endif
211 ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
212 context);
213 } while (!ptr && CallNewHandler(size));
214 return ptr;
215 }
216
ShimCppDelete(void * address)217 PA_ALWAYS_INLINE void ShimCppDelete(void* address) {
218 void* context = nullptr;
219 #if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
220 context = malloc_default_zone();
221 #endif
222 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
223 return chain_head->free_function(chain_head, address, context);
224 }
225
ShimMalloc(size_t size,void * context)226 PA_ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
227 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
228 void* ptr;
229 do {
230 ptr = chain_head->alloc_function(chain_head, size, context);
231 } while (!ptr && g_call_new_handler_on_malloc_failure &&
232 CallNewHandler(size));
233 return ptr;
234 }
235
ShimCalloc(size_t n,size_t size,void * context)236 PA_ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
237 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
238 void* ptr;
239 do {
240 ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
241 context);
242 } while (!ptr && g_call_new_handler_on_malloc_failure &&
243 CallNewHandler(size));
244 return ptr;
245 }
246
ShimRealloc(void * address,size_t size,void * context)247 PA_ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
248 // realloc(size == 0) means free() and might return a nullptr. We should
249 // not call the std::new_handler in that case, though.
250 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
251 void* ptr;
252 do {
253 ptr = chain_head->realloc_function(chain_head, address, size, context);
254 } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
255 CallNewHandler(size));
256 return ptr;
257 }
258
ShimMemalign(size_t alignment,size_t size,void * context)259 PA_ALWAYS_INLINE void* ShimMemalign(size_t alignment,
260 size_t size,
261 void* context) {
262 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
263 void* ptr;
264 do {
265 ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
266 context);
267 } while (!ptr && g_call_new_handler_on_malloc_failure &&
268 CallNewHandler(size));
269 return ptr;
270 }
271
ShimPosixMemalign(void ** res,size_t alignment,size_t size)272 PA_ALWAYS_INLINE int ShimPosixMemalign(void** res,
273 size_t alignment,
274 size_t size) {
275 // posix_memalign is supposed to check the arguments. See tc_posix_memalign()
276 // in tc_malloc.cc.
277 if (((alignment % sizeof(void*)) != 0) || !std::has_single_bit(alignment)) {
278 return EINVAL;
279 }
280 void* ptr = ShimMemalign(alignment, size, nullptr);
281 *res = ptr;
282 return ptr ? 0 : ENOMEM;
283 }
284
ShimValloc(size_t size,void * context)285 PA_ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
286 return ShimMemalign(GetCachedPageSize(), size, context);
287 }
288
ShimPvalloc(size_t size)289 PA_ALWAYS_INLINE void* ShimPvalloc(size_t size) {
290 // pvalloc(0) should allocate one page, according to its man page.
291 if (size == 0) {
292 size = GetCachedPageSize();
293 } else {
294 size = partition_alloc::internal::base::bits::AlignUp(size,
295 GetCachedPageSize());
296 }
297 // The third argument is nullptr because pvalloc is glibc only and does not
298 // exist on OSX/BSD systems.
299 return ShimMemalign(GetCachedPageSize(), size, nullptr);
300 }
301
ShimFree(void * address,void * context)302 PA_ALWAYS_INLINE void ShimFree(void* address, void* context) {
303 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
304 return chain_head->free_function(chain_head, address, context);
305 }
306
ShimGetSizeEstimate(const void * address,void * context)307 PA_ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address,
308 void* context) {
309 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
310 return chain_head->get_size_estimate_function(
311 chain_head, const_cast<void*>(address), context);
312 }
313
ShimGoodSize(size_t size,void * context)314 PA_ALWAYS_INLINE size_t ShimGoodSize(size_t size, void* context) {
315 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
316 return chain_head->good_size_function(chain_head, size, context);
317 }
318
ShimClaimedAddress(void * address,void * context)319 PA_ALWAYS_INLINE bool ShimClaimedAddress(void* address, void* context) {
320 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
321 return chain_head->claimed_address_function(chain_head, address, context);
322 }
323
ShimBatchMalloc(size_t size,void ** results,unsigned num_requested,void * context)324 PA_ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
325 void** results,
326 unsigned num_requested,
327 void* context) {
328 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
329 return chain_head->batch_malloc_function(chain_head, size, results,
330 num_requested, context);
331 }
332
ShimBatchFree(void ** to_be_freed,unsigned num_to_be_freed,void * context)333 PA_ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
334 unsigned num_to_be_freed,
335 void* context) {
336 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
337 return chain_head->batch_free_function(chain_head, to_be_freed,
338 num_to_be_freed, context);
339 }
340
ShimFreeDefiniteSize(void * ptr,size_t size,void * context)341 PA_ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr,
342 size_t size,
343 void* context) {
344 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
345 return chain_head->free_definite_size_function(chain_head, ptr, size,
346 context);
347 }
348
ShimTryFreeDefault(void * ptr,void * context)349 PA_ALWAYS_INLINE void ShimTryFreeDefault(void* ptr, void* context) {
350 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
351 return chain_head->try_free_default_function(chain_head, ptr, context);
352 }
353
ShimAlignedMalloc(size_t size,size_t alignment,void * context)354 PA_ALWAYS_INLINE void* ShimAlignedMalloc(size_t size,
355 size_t alignment,
356 void* context) {
357 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
358 void* ptr = nullptr;
359 do {
360 ptr = chain_head->aligned_malloc_function(chain_head, size, alignment,
361 context);
362 } while (!ptr && g_call_new_handler_on_malloc_failure &&
363 CallNewHandler(size));
364 return ptr;
365 }
366
ShimAlignedRealloc(void * address,size_t size,size_t alignment,void * context)367 PA_ALWAYS_INLINE void* ShimAlignedRealloc(void* address,
368 size_t size,
369 size_t alignment,
370 void* context) {
371 // _aligned_realloc(size == 0) means _aligned_free() and might return a
372 // nullptr. We should not call the std::new_handler in that case, though.
373 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
374 void* ptr = nullptr;
375 do {
376 ptr = chain_head->aligned_realloc_function(chain_head, address, size,
377 alignment, context);
378 } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
379 CallNewHandler(size));
380 return ptr;
381 }
382
ShimAlignedFree(void * address,void * context)383 PA_ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) {
384 const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
385 return chain_head->aligned_free_function(chain_head, address, context);
386 }
387
388 } // extern "C"
389
390 #if !BUILDFLAG(IS_WIN) && \
391 !(BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC))
392 // Cpp symbols (new / delete) should always be routed through the shim layer
393 // except on Windows and macOS (except for PartitionAlloc-Everywhere) where the
394 // malloc intercept is deep enough that it also catches the cpp calls.
395 //
396 // In case of PartitionAlloc-Everywhere on macOS, malloc backed by
397 // allocator_shim::internal::PartitionMalloc crashes on OOM, and we need to
398 // avoid crashes in case of operator new() noexcept. Thus, operator new()
399 // noexcept needs to be routed to
400 // allocator_shim::internal::PartitionMallocUnchecked through the shim layer.
401 #include "partition_alloc/shim/allocator_shim_override_cpp_symbols.h"
402 #endif
403
404 #if BUILDFLAG(IS_ANDROID)
405 // Android does not support symbol interposition. The way malloc symbols are
406 // intercepted on Android is by using link-time -wrap flags.
407 #include "partition_alloc/shim/allocator_shim_override_linker_wrapped_symbols.h"
408 #elif BUILDFLAG(IS_WIN)
409 // On Windows we use plain link-time overriding of the CRT symbols.
410 #include "partition_alloc/shim/allocator_shim_override_ucrt_symbols_win.h"
411 #elif BUILDFLAG(IS_APPLE)
412 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
413 #include "partition_alloc/shim/allocator_shim_override_apple_default_zone.h"
414 #else // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
415 #include "partition_alloc/shim/allocator_shim_override_apple_symbols.h"
416 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
417 #else
418 #include "partition_alloc/shim/allocator_shim_override_libc_symbols.h"
419 #endif
420
421 // Some glibc versions (until commit 6c444ad6e953dbdf9c7be065308a0a777)
422 // incorrectly call __libc_memalign() to allocate memory (see elf/dl-tls.c in
423 // glibc 2.23 for instance), and free() to free it. This causes issues for us,
424 // as we are then asked to free memory we didn't allocate.
425 //
426 // This only happened in glibc to allocate TLS storage metadata, and there are
427 // no other callers of __libc_memalign() there as of September 2020. To work
428 // around this issue, intercept this internal libc symbol to make sure that both
429 // the allocation and the free() are caught by the shim.
430 //
431 // This seems fragile, and is, but there is ample precedent for it, making it
432 // quite likely to keep working in the future. For instance, LLVM for LSAN uses
433 // this mechanism.
434
435 #if defined(LIBC_GLIBC) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
436 #include "partition_alloc/shim/allocator_shim_override_glibc_weak_symbols.h"
437 #endif
438
439 #if BUILDFLAG(IS_APPLE)
440 namespace allocator_shim {
441
InitializeAllocatorShim()442 void InitializeAllocatorShim() {
443 #if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
444 // Prepares the default dispatch. After the intercepted malloc calls have
445 // traversed the shim this will route them to the default malloc zone.
446 InitializeDefaultDispatchToMacAllocator();
447
448 MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
449
450 // This replaces the default malloc zone, causing calls to malloc & friends
451 // from the codebase to be routed to ShimMalloc() above.
452 ReplaceFunctionsForStoredZones(&functions);
453 #endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
454 }
455
456 } // namespace allocator_shim
457 #endif
458
459 // Cross-checks.
460
461 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
462 #error The allocator shim should not be compiled when building for memory tools.
463 #endif
464
465 #if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
466 (defined(_MSC_VER) && defined(_CPPUNWIND))
467 #error This code cannot be used when exceptions are turned on.
468 #endif
469