• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/allocator/partition_allocator/shim/allocator_shim.h"
6 
7 #include <errno.h>
8 
9 #include <atomic>
10 #include <new>
11 
12 #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
13 #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
14 #include "base/allocator/partition_allocator/partition_alloc_check.h"
15 #include "base/allocator/partition_allocator/partition_alloc_notreached.h"
16 #include "base/memory/page_size.h"
17 #include "base/threading/platform_thread.h"
18 #include "build/build_config.h"
19 
20 #if !BUILDFLAG(IS_WIN)
21 #include <unistd.h>
22 #else
23 #include "base/allocator/partition_allocator/shim/winheap_stubs_win.h"
24 #endif
25 
26 #if BUILDFLAG(IS_APPLE)
27 #include <malloc/malloc.h>
28 
29 #include "base/allocator/partition_allocator/shim/allocator_interception_mac.h"
30 #include "base/mac/mach_logging.h"
31 #endif
32 
33 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
34 #include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
35 #endif
36 
37 // No calls to malloc / new in this file. They would would cause re-entrancy of
38 // the shim, which is hard to deal with. Keep this code as simple as possible
39 // and don't use any external C++ object here, not even //base ones. Even if
40 // they are safe to use today, in future they might be refactored.
41 
42 namespace {
43 
44 std::atomic<const allocator_shim::AllocatorDispatch*> g_chain_head{
45     &allocator_shim::AllocatorDispatch::default_dispatch};
46 
47 bool g_call_new_handler_on_malloc_failure = false;
48 
GetCachedPageSize()49 ALWAYS_INLINE size_t GetCachedPageSize() {
50   static size_t pagesize = 0;
51   if (!pagesize)
52     pagesize = base::GetPageSize();
53   return pagesize;
54 }
55 
56 // Calls the std::new handler thread-safely. Returns true if a new_handler was
57 // set and called, false if no new_handler was set.
CallNewHandler(size_t size)58 bool CallNewHandler(size_t size) {
59 #if BUILDFLAG(IS_WIN)
60   return allocator_shim::WinCallNewHandler(size);
61 #else
62   std::new_handler nh = std::get_new_handler();
63   if (!nh)
64     return false;
65   (*nh)();
66   // Assume the new_handler will abort if it fails. Exception are disabled and
67   // we don't support the case of a new_handler throwing std::bad_balloc.
68   return true;
69 #endif
70 }
71 
GetChainHead()72 ALWAYS_INLINE const allocator_shim::AllocatorDispatch* GetChainHead() {
73   return g_chain_head.load(std::memory_order_relaxed);
74 }
75 
76 }  // namespace
77 
78 namespace allocator_shim {
79 
SetCallNewHandlerOnMallocFailure(bool value)80 void SetCallNewHandlerOnMallocFailure(bool value) {
81   g_call_new_handler_on_malloc_failure = value;
82 
83 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
84   allocator_shim::internal::PartitionAllocSetCallNewHandlerOnMallocFailure(
85       value);
86 #endif
87 }
88 
UncheckedAlloc(size_t size)89 void* UncheckedAlloc(size_t size) {
90   const AllocatorDispatch* const chain_head = GetChainHead();
91   return chain_head->alloc_unchecked_function(chain_head, size, nullptr);
92 }
93 
UncheckedFree(void * ptr)94 void UncheckedFree(void* ptr) {
95   const AllocatorDispatch* const chain_head = GetChainHead();
96   return chain_head->free_function(chain_head, ptr, nullptr);
97 }
98 
InsertAllocatorDispatch(AllocatorDispatch * dispatch)99 void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
100   // Loop in case of (an unlikely) race on setting the list head.
101   size_t kMaxRetries = 7;
102   for (size_t i = 0; i < kMaxRetries; ++i) {
103     const AllocatorDispatch* chain_head = GetChainHead();
104     dispatch->next = chain_head;
105 
106     // This function guarantees to be thread-safe w.r.t. concurrent
107     // insertions. It also has to guarantee that all the threads always
108     // see a consistent chain, hence the atomic_thread_fence() below.
109     // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
110     // we don't really want this to be a release-store with a corresponding
111     // acquire-load during malloc().
112     std::atomic_thread_fence(std::memory_order_seq_cst);
113     // Set the chain head to the new dispatch atomically. If we lose the race,
114     // retry.
115     if (g_chain_head.compare_exchange_strong(chain_head, dispatch,
116                                              std::memory_order_relaxed,
117                                              std::memory_order_relaxed)) {
118       // Success.
119       return;
120     }
121   }
122 
123   PA_CHECK(false);  // Too many retries, this shouldn't happen.
124 }
125 
RemoveAllocatorDispatchForTesting(AllocatorDispatch * dispatch)126 void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
127   PA_DCHECK(GetChainHead() == dispatch);
128   g_chain_head.store(dispatch->next, std::memory_order_relaxed);
129 }
130 
131 #if BUILDFLAG(IS_APPLE)
TryFreeDefaultFallbackToFindZoneAndFree(void * ptr)132 void TryFreeDefaultFallbackToFindZoneAndFree(void* ptr) {
133   unsigned int zone_count = 0;
134   vm_address_t* zones = nullptr;
135   kern_return_t result =
136       malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
137   MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
138 
139   // "find_zone_and_free" expected by try_free_default.
140   //
141   // libmalloc's zones call find_registered_zone() in case the default one
142   // doesn't handle the allocation. We can't, so we try to emulate it. See the
143   // implementation in libmalloc/src/malloc.c for details.
144   // https://github.com/apple-oss-distributions/libmalloc/blob/main/src/malloc.c
145   for (unsigned int i = 0; i < zone_count; ++i) {
146     malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
147     if (size_t size = zone->size(zone, ptr)) {
148       if (zone->version >= 6 && zone->free_definite_size) {
149         zone->free_definite_size(zone, ptr, size);
150       } else {
151         zone->free(zone, ptr);
152       }
153       return;
154     }
155   }
156 
157   // There must be an owner zone.
158   PA_CHECK(false);
159 }
160 #endif  // BUILDFLAG(IS_APPLE)
161 
162 }  // namespace allocator_shim
163 
164 // The Shim* functions below are the entry-points into the shim-layer and
165 // are supposed to be invoked by the allocator_shim_override_*
166 // headers to route the malloc / new symbols through the shim layer.
167 // They are defined as ALWAYS_INLINE in order to remove a level of indirection
168 // between the system-defined entry points and the shim implementations.
169 extern "C" {
170 
171 // The general pattern for allocations is:
172 // - Try to allocate, if succeded return the pointer.
173 // - If the allocation failed:
174 //   - Call the std::new_handler if it was a C++ allocation.
175 //   - Call the std::new_handler if it was a malloc() (or calloc() or similar)
176 //     AND SetCallNewHandlerOnMallocFailure(true).
177 //   - If the std::new_handler is NOT set just return nullptr.
178 //   - If the std::new_handler is set:
179 //     - Assume it will abort() if it fails (very likely the new_handler will
180 //       just suicide printing a message).
181 //     - Assume it did succeed if it returns, in which case reattempt the alloc.
182 
ShimCppNew(size_t size)183 ALWAYS_INLINE void* ShimCppNew(size_t size) {
184   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
185   void* ptr;
186   do {
187     void* context = nullptr;
188 #if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
189     context = malloc_default_zone();
190 #endif
191     ptr = chain_head->alloc_function(chain_head, size, context);
192   } while (!ptr && CallNewHandler(size));
193   return ptr;
194 }
195 
ShimCppNewNoThrow(size_t size)196 ALWAYS_INLINE void* ShimCppNewNoThrow(size_t size) {
197   void* context = nullptr;
198 #if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
199   context = malloc_default_zone();
200 #endif
201   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
202   return chain_head->alloc_unchecked_function(chain_head, size, context);
203 }
204 
ShimCppAlignedNew(size_t size,size_t alignment)205 ALWAYS_INLINE void* ShimCppAlignedNew(size_t size, size_t alignment) {
206   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
207   void* ptr;
208   do {
209     void* context = nullptr;
210 #if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
211     context = malloc_default_zone();
212 #endif
213     ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
214                                              context);
215   } while (!ptr && CallNewHandler(size));
216   return ptr;
217 }
218 
ShimCppDelete(void * address)219 ALWAYS_INLINE void ShimCppDelete(void* address) {
220   void* context = nullptr;
221 #if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
222   context = malloc_default_zone();
223 #endif
224   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
225   return chain_head->free_function(chain_head, address, context);
226 }
227 
ShimMalloc(size_t size,void * context)228 ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
229   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
230   void* ptr;
231   do {
232     ptr = chain_head->alloc_function(chain_head, size, context);
233   } while (!ptr && g_call_new_handler_on_malloc_failure &&
234            CallNewHandler(size));
235   return ptr;
236 }
237 
ShimCalloc(size_t n,size_t size,void * context)238 ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
239   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
240   void* ptr;
241   do {
242     ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
243                                                       context);
244   } while (!ptr && g_call_new_handler_on_malloc_failure &&
245            CallNewHandler(size));
246   return ptr;
247 }
248 
ShimRealloc(void * address,size_t size,void * context)249 ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
250   // realloc(size == 0) means free() and might return a nullptr. We should
251   // not call the std::new_handler in that case, though.
252   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
253   void* ptr;
254   do {
255     ptr = chain_head->realloc_function(chain_head, address, size, context);
256   } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
257            CallNewHandler(size));
258   return ptr;
259 }
260 
ShimMemalign(size_t alignment,size_t size,void * context)261 ALWAYS_INLINE void* ShimMemalign(size_t alignment, size_t size, void* context) {
262   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
263   void* ptr;
264   do {
265     ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
266                                              context);
267   } while (!ptr && g_call_new_handler_on_malloc_failure &&
268            CallNewHandler(size));
269   return ptr;
270 }
271 
ShimPosixMemalign(void ** res,size_t alignment,size_t size)272 ALWAYS_INLINE int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
273   // posix_memalign is supposed to check the arguments. See tc_posix_memalign()
274   // in tc_malloc.cc.
275   if (((alignment % sizeof(void*)) != 0) ||
276       !partition_alloc::internal::base::bits::IsPowerOfTwo(alignment)) {
277     return EINVAL;
278   }
279   void* ptr = ShimMemalign(alignment, size, nullptr);
280   *res = ptr;
281   return ptr ? 0 : ENOMEM;
282 }
283 
ShimValloc(size_t size,void * context)284 ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
285   return ShimMemalign(GetCachedPageSize(), size, context);
286 }
287 
ShimPvalloc(size_t size)288 ALWAYS_INLINE void* ShimPvalloc(size_t size) {
289   // pvalloc(0) should allocate one page, according to its man page.
290   if (size == 0) {
291     size = GetCachedPageSize();
292   } else {
293     size = partition_alloc::internal::base::bits::AlignUp(size,
294                                                           GetCachedPageSize());
295   }
296   // The third argument is nullptr because pvalloc is glibc only and does not
297   // exist on OSX/BSD systems.
298   return ShimMemalign(GetCachedPageSize(), size, nullptr);
299 }
300 
ShimFree(void * address,void * context)301 ALWAYS_INLINE void ShimFree(void* address, void* context) {
302   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
303   return chain_head->free_function(chain_head, address, context);
304 }
305 
ShimGetSizeEstimate(const void * address,void * context)306 ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address, void* context) {
307   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
308   return chain_head->get_size_estimate_function(
309       chain_head, const_cast<void*>(address), context);
310 }
311 
ShimClaimedAddress(void * address,void * context)312 ALWAYS_INLINE bool ShimClaimedAddress(void* address, void* context) {
313   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
314   return chain_head->claimed_address_function(chain_head, address, context);
315 }
316 
ShimBatchMalloc(size_t size,void ** results,unsigned num_requested,void * context)317 ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
318                                        void** results,
319                                        unsigned num_requested,
320                                        void* context) {
321   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
322   return chain_head->batch_malloc_function(chain_head, size, results,
323                                            num_requested, context);
324 }
325 
ShimBatchFree(void ** to_be_freed,unsigned num_to_be_freed,void * context)326 ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
327                                  unsigned num_to_be_freed,
328                                  void* context) {
329   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
330   return chain_head->batch_free_function(chain_head, to_be_freed,
331                                          num_to_be_freed, context);
332 }
333 
ShimFreeDefiniteSize(void * ptr,size_t size,void * context)334 ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr, size_t size, void* context) {
335   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
336   return chain_head->free_definite_size_function(chain_head, ptr, size,
337                                                  context);
338 }
339 
ShimTryFreeDefault(void * ptr,void * context)340 ALWAYS_INLINE void ShimTryFreeDefault(void* ptr, void* context) {
341   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
342   return chain_head->try_free_default_function(chain_head, ptr, context);
343 }
344 
ShimAlignedMalloc(size_t size,size_t alignment,void * context)345 ALWAYS_INLINE void* ShimAlignedMalloc(size_t size,
346                                       size_t alignment,
347                                       void* context) {
348   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
349   void* ptr = nullptr;
350   do {
351     ptr = chain_head->aligned_malloc_function(chain_head, size, alignment,
352                                               context);
353   } while (!ptr && g_call_new_handler_on_malloc_failure &&
354            CallNewHandler(size));
355   return ptr;
356 }
357 
ShimAlignedRealloc(void * address,size_t size,size_t alignment,void * context)358 ALWAYS_INLINE void* ShimAlignedRealloc(void* address,
359                                        size_t size,
360                                        size_t alignment,
361                                        void* context) {
362   // _aligned_realloc(size == 0) means _aligned_free() and might return a
363   // nullptr. We should not call the std::new_handler in that case, though.
364   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
365   void* ptr = nullptr;
366   do {
367     ptr = chain_head->aligned_realloc_function(chain_head, address, size,
368                                                alignment, context);
369   } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
370            CallNewHandler(size));
371   return ptr;
372 }
373 
ShimAlignedFree(void * address,void * context)374 ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) {
375   const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
376   return chain_head->aligned_free_function(chain_head, address, context);
377 }
378 
379 }  // extern "C"
380 
381 #if !BUILDFLAG(IS_WIN) && \
382     !(BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC))
383 // Cpp symbols (new / delete) should always be routed through the shim layer
384 // except on Windows and macOS (except for PartitionAlloc-Everywhere) where the
385 // malloc intercept is deep enough that it also catches the cpp calls.
386 //
387 // In case of PartitionAlloc-Everywhere on macOS, malloc backed by
388 // allocator_shim::internal::PartitionMalloc crashes on OOM, and we need to
389 // avoid crashes in case of operator new() noexcept.  Thus, operator new()
390 // noexcept needs to be routed to
391 // allocator_shim::internal::PartitionMallocUnchecked through the shim layer.
392 #include "base/allocator/partition_allocator/shim/allocator_shim_override_cpp_symbols.h"
393 #endif
394 
395 #if BUILDFLAG(IS_ANDROID)
396 // Android does not support symbol interposition. The way malloc symbols are
397 // intercepted on Android is by using link-time -wrap flags.
398 #include "base/allocator/partition_allocator/shim/allocator_shim_override_linker_wrapped_symbols.h"
399 #elif BUILDFLAG(IS_WIN)
400 // On Windows we use plain link-time overriding of the CRT symbols.
401 #include "base/allocator/partition_allocator/shim/allocator_shim_override_ucrt_symbols_win.h"
402 #elif BUILDFLAG(IS_APPLE)
403 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
404 #include "base/allocator/partition_allocator/shim/allocator_shim_override_mac_default_zone.h"
405 #else  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
406 #include "base/allocator/partition_allocator/shim/allocator_shim_override_mac_symbols.h"
407 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
408 #else
409 #include "base/allocator/partition_allocator/shim/allocator_shim_override_libc_symbols.h"
410 #endif
411 
412 // Some glibc versions (until commit 6c444ad6e953dbdf9c7be065308a0a777)
413 // incorrectly call __libc_memalign() to allocate memory (see elf/dl-tls.c in
414 // glibc 2.23 for instance), and free() to free it. This causes issues for us,
415 // as we are then asked to free memory we didn't allocate.
416 //
417 // This only happened in glibc to allocate TLS storage metadata, and there are
418 // no other callers of __libc_memalign() there as of September 2020. To work
419 // around this issue, intercept this internal libc symbol to make sure that both
420 // the allocation and the free() are caught by the shim.
421 //
422 // This seems fragile, and is, but there is ample precedent for it, making it
423 // quite likely to keep working in the future. For instance, LLVM for LSAN uses
424 // this mechanism.
425 
426 #if defined(LIBC_GLIBC) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
427 #include "base/allocator/partition_allocator/shim/allocator_shim_override_glibc_weak_symbols.h"
428 #endif
429 
430 #if BUILDFLAG(IS_APPLE)
431 namespace allocator_shim {
432 
InitializeAllocatorShim()433 void InitializeAllocatorShim() {
434 #if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
435   // Prepares the default dispatch. After the intercepted malloc calls have
436   // traversed the shim this will route them to the default malloc zone.
437   InitializeDefaultDispatchToMacAllocator();
438 
439   MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
440 
441   // This replaces the default malloc zone, causing calls to malloc & friends
442   // from the codebase to be routed to ShimMalloc() above.
443   ReplaceFunctionsForStoredZones(&functions);
444 #endif  // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
445 }
446 
447 }  // namespace allocator_shim
448 #endif
449 
450 // Cross-checks.
451 
452 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
453 #error The allocator shim should not be compiled when building for memory tools.
454 #endif
455 
456 #if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
457     (defined(_MSC_VER) && defined(_CPPUNWIND))
458 #error This code cannot be used when exceptions are turned on.
459 #endif
460