• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/allocator/allocator_shim.h"
6 
7 #include <errno.h>
8 
9 #include <new>
10 
11 #include "base/atomicops.h"
12 #include "base/logging.h"
13 #include "base/macros.h"
14 #include "base/process/process_metrics.h"
15 #include "base/threading/platform_thread.h"
16 #include "build/build_config.h"
17 
18 #if !defined(OS_WIN)
19 #include <unistd.h>
20 #else
21 #include "base/allocator/winheap_stubs_win.h"
22 #endif
23 
24 #if defined(OS_MACOSX)
25 #include <malloc/malloc.h>
26 
27 #include "base/allocator/allocator_interception_mac.h"
28 #endif
29 
30 // No calls to malloc / new in this file. They would would cause re-entrancy of
31 // the shim, which is hard to deal with. Keep this code as simple as possible
32 // and don't use any external C++ object here, not even //base ones. Even if
33 // they are safe to use today, in future they might be refactored.
34 
35 namespace {
36 
37 using namespace base;
38 
39 subtle::AtomicWord g_chain_head = reinterpret_cast<subtle::AtomicWord>(
40     &allocator::AllocatorDispatch::default_dispatch);
41 
42 bool g_call_new_handler_on_malloc_failure = false;
43 
44 #if !defined(OS_WIN)
45 subtle::Atomic32 g_new_handler_lock = 0;
46 #endif
47 
GetCachedPageSize()48 inline size_t GetCachedPageSize() {
49   static size_t pagesize = 0;
50   if (!pagesize)
51     pagesize = base::GetPageSize();
52   return pagesize;
53 }
54 
55 // Calls the std::new handler thread-safely. Returns true if a new_handler was
56 // set and called, false if no new_handler was set.
CallNewHandler(size_t size)57 bool CallNewHandler(size_t size) {
58 #if defined(OS_WIN)
59   return base::allocator::WinCallNewHandler(size);
60 #else
61   // TODO(primiano): C++11 has introduced ::get_new_handler() which is supposed
62   // to be thread safe and would avoid the spinlock boilerplate here. However
63   // it doesn't seem to be available yet in the Linux chroot headers yet.
64   std::new_handler nh;
65   {
66     while (subtle::Acquire_CompareAndSwap(&g_new_handler_lock, 0, 1))
67       PlatformThread::YieldCurrentThread();
68     nh = std::set_new_handler(0);
69     ignore_result(std::set_new_handler(nh));
70     subtle::Release_Store(&g_new_handler_lock, 0);
71   }
72   if (!nh)
73     return false;
74   (*nh)();
75   // Assume the new_handler will abort if it fails. Exception are disabled and
76   // we don't support the case of a new_handler throwing std::bad_balloc.
77   return true;
78 #endif
79 }
80 
GetChainHead()81 inline const allocator::AllocatorDispatch* GetChainHead() {
82   // TODO(primiano): Just use NoBarrier_Load once crbug.com/593344 is fixed.
83   // Unfortunately due to that bug NoBarrier_Load() is mistakenly fully
84   // barriered on Linux+Clang, and that causes visible perf regressons.
85   return reinterpret_cast<const allocator::AllocatorDispatch*>(
86 #if defined(OS_LINUX) && defined(__clang__)
87       *static_cast<const volatile subtle::AtomicWord*>(&g_chain_head)
88 #else
89       subtle::NoBarrier_Load(&g_chain_head)
90 #endif
91   );
92 }
93 
94 }  // namespace
95 
96 namespace base {
97 namespace allocator {
98 
SetCallNewHandlerOnMallocFailure(bool value)99 void SetCallNewHandlerOnMallocFailure(bool value) {
100   g_call_new_handler_on_malloc_failure = value;
101 }
102 
UncheckedAlloc(size_t size)103 void* UncheckedAlloc(size_t size) {
104   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
105   return chain_head->alloc_function(chain_head, size, nullptr);
106 }
107 
InsertAllocatorDispatch(AllocatorDispatch * dispatch)108 void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
109   // Loop in case of (an unlikely) race on setting the list head.
110   size_t kMaxRetries = 7;
111   for (size_t i = 0; i < kMaxRetries; ++i) {
112     const AllocatorDispatch* chain_head = GetChainHead();
113     dispatch->next = chain_head;
114 
115     // This function guarantees to be thread-safe w.r.t. concurrent
116     // insertions. It also has to guarantee that all the threads always
117     // see a consistent chain, hence the MemoryBarrier() below.
118     // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
119     // we don't really want this to be a release-store with a corresponding
120     // acquire-load during malloc().
121     subtle::MemoryBarrier();
122     subtle::AtomicWord old_value =
123         reinterpret_cast<subtle::AtomicWord>(chain_head);
124     // Set the chain head to the new dispatch atomically. If we lose the race,
125     // the comparison will fail, and the new head of chain will be returned.
126     if (subtle::NoBarrier_CompareAndSwap(
127             &g_chain_head, old_value,
128             reinterpret_cast<subtle::AtomicWord>(dispatch)) == old_value) {
129       // Success.
130       return;
131     }
132   }
133 
134   CHECK(false);  // Too many retries, this shouldn't happen.
135 }
136 
RemoveAllocatorDispatchForTesting(AllocatorDispatch * dispatch)137 void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
138   DCHECK_EQ(GetChainHead(), dispatch);
139   subtle::NoBarrier_Store(&g_chain_head,
140                           reinterpret_cast<subtle::AtomicWord>(dispatch->next));
141 }
142 
143 }  // namespace allocator
144 }  // namespace base
145 
146 // The Shim* functions below are the entry-points into the shim-layer and
147 // are supposed to be invoked by the allocator_shim_override_*
148 // headers to route the malloc / new symbols through the shim layer.
149 // They are defined as ALWAYS_INLINE in order to remove a level of indirection
150 // between the system-defined entry points and the shim implementations.
151 extern "C" {
152 
153 // The general pattern for allocations is:
154 // - Try to allocate, if succeded return the pointer.
155 // - If the allocation failed:
156 //   - Call the std::new_handler if it was a C++ allocation.
157 //   - Call the std::new_handler if it was a malloc() (or calloc() or similar)
158 //     AND SetCallNewHandlerOnMallocFailure(true).
159 //   - If the std::new_handler is NOT set just return nullptr.
160 //   - If the std::new_handler is set:
161 //     - Assume it will abort() if it fails (very likely the new_handler will
162 //       just suicide priting a message).
163 //     - Assume it did succeed if it returns, in which case reattempt the alloc.
164 
ShimCppNew(size_t size)165 ALWAYS_INLINE void* ShimCppNew(size_t size) {
166   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
167   void* ptr;
168   do {
169     void* context = nullptr;
170 #if defined(OS_MACOSX)
171     context = malloc_default_zone();
172 #endif
173     ptr = chain_head->alloc_function(chain_head, size, context);
174   } while (!ptr && CallNewHandler(size));
175   return ptr;
176 }
177 
ShimCppDelete(void * address)178 ALWAYS_INLINE void ShimCppDelete(void* address) {
179   void* context = nullptr;
180 #if defined(OS_MACOSX)
181   context = malloc_default_zone();
182 #endif
183   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
184   return chain_head->free_function(chain_head, address, context);
185 }
186 
ShimMalloc(size_t size,void * context)187 ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
188   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
189   void* ptr;
190   do {
191     ptr = chain_head->alloc_function(chain_head, size, context);
192   } while (!ptr && g_call_new_handler_on_malloc_failure &&
193            CallNewHandler(size));
194   return ptr;
195 }
196 
ShimCalloc(size_t n,size_t size,void * context)197 ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
198   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
199   void* ptr;
200   do {
201     ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
202                                                       context);
203   } while (!ptr && g_call_new_handler_on_malloc_failure &&
204            CallNewHandler(size));
205   return ptr;
206 }
207 
ShimRealloc(void * address,size_t size,void * context)208 ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
209   // realloc(size == 0) means free() and might return a nullptr. We should
210   // not call the std::new_handler in that case, though.
211   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
212   void* ptr;
213   do {
214     ptr = chain_head->realloc_function(chain_head, address, size, context);
215   } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
216            CallNewHandler(size));
217   return ptr;
218 }
219 
ShimMemalign(size_t alignment,size_t size,void * context)220 ALWAYS_INLINE void* ShimMemalign(size_t alignment, size_t size, void* context) {
221   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
222   void* ptr;
223   do {
224     ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
225                                              context);
226   } while (!ptr && g_call_new_handler_on_malloc_failure &&
227            CallNewHandler(size));
228   return ptr;
229 }
230 
ShimPosixMemalign(void ** res,size_t alignment,size_t size)231 ALWAYS_INLINE int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
232   // posix_memalign is supposed to check the arguments. See tc_posix_memalign()
233   // in tc_malloc.cc.
234   if (((alignment % sizeof(void*)) != 0) ||
235       ((alignment & (alignment - 1)) != 0) || (alignment == 0)) {
236     return EINVAL;
237   }
238   void* ptr = ShimMemalign(alignment, size, nullptr);
239   *res = ptr;
240   return ptr ? 0 : ENOMEM;
241 }
242 
ShimValloc(size_t size,void * context)243 ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
244   return ShimMemalign(GetCachedPageSize(), size, context);
245 }
246 
ShimPvalloc(size_t size)247 ALWAYS_INLINE void* ShimPvalloc(size_t size) {
248   // pvalloc(0) should allocate one page, according to its man page.
249   if (size == 0) {
250     size = GetCachedPageSize();
251   } else {
252     size = (size + GetCachedPageSize() - 1) & ~(GetCachedPageSize() - 1);
253   }
254   // The third argument is nullptr because pvalloc is glibc only and does not
255   // exist on OSX/BSD systems.
256   return ShimMemalign(GetCachedPageSize(), size, nullptr);
257 }
258 
ShimFree(void * address,void * context)259 ALWAYS_INLINE void ShimFree(void* address, void* context) {
260   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
261   return chain_head->free_function(chain_head, address, context);
262 }
263 
ShimGetSizeEstimate(const void * address,void * context)264 ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address, void* context) {
265   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
266   return chain_head->get_size_estimate_function(
267       chain_head, const_cast<void*>(address), context);
268 }
269 
ShimBatchMalloc(size_t size,void ** results,unsigned num_requested,void * context)270 ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
271                                        void** results,
272                                        unsigned num_requested,
273                                        void* context) {
274   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
275   return chain_head->batch_malloc_function(chain_head, size, results,
276                                            num_requested, context);
277 }
278 
ShimBatchFree(void ** to_be_freed,unsigned num_to_be_freed,void * context)279 ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
280                                  unsigned num_to_be_freed,
281                                  void* context) {
282   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
283   return chain_head->batch_free_function(chain_head, to_be_freed,
284                                          num_to_be_freed, context);
285 }
286 
ShimFreeDefiniteSize(void * ptr,size_t size,void * context)287 ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr, size_t size, void* context) {
288   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
289   return chain_head->free_definite_size_function(chain_head, ptr, size,
290                                                  context);
291 }
292 
293 }  // extern "C"
294 
295 #if !defined(OS_WIN) && !defined(OS_MACOSX)
296 // Cpp symbols (new / delete) should always be routed through the shim layer
297 // except on Windows and macOS where the malloc intercept is deep enough that it
298 // also catches the cpp calls.
299 #include "base/allocator/allocator_shim_override_cpp_symbols.h"
300 #endif
301 
302 #if defined(OS_ANDROID) || defined(ANDROID)
303 // Android does not support symbol interposition. The way malloc symbols are
304 // intercepted on Android is by using link-time -wrap flags.
305 #include "base/allocator/allocator_shim_override_linker_wrapped_symbols.h"
306 #elif defined(OS_WIN)
307 // On Windows we use plain link-time overriding of the CRT symbols.
308 #include "base/allocator/allocator_shim_override_ucrt_symbols_win.h"
309 #elif defined(OS_MACOSX)
310 #include "base/allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.h"
311 #include "base/allocator/allocator_shim_override_mac_symbols.h"
312 #else
313 #include "base/allocator/allocator_shim_override_libc_symbols.h"
314 #endif
315 
316 // In the case of tcmalloc we also want to plumb into the glibc hooks
317 // to avoid that allocations made in glibc itself (e.g., strdup()) get
318 // accidentally performed on the glibc heap instead of the tcmalloc one.
319 #if defined(USE_TCMALLOC)
320 #include "base/allocator/allocator_shim_override_glibc_weak_symbols.h"
321 #endif
322 
323 #if defined(OS_MACOSX)
324 namespace base {
325 namespace allocator {
InitializeAllocatorShim()326 void InitializeAllocatorShim() {
327   // Prepares the default dispatch. After the intercepted malloc calls have
328   // traversed the shim this will route them to the default malloc zone.
329   InitializeDefaultDispatchToMacAllocator();
330 
331   MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
332 
333   // This replaces the default malloc zone, causing calls to malloc & friends
334   // from the codebase to be routed to ShimMalloc() above.
335   base::allocator::ReplaceFunctionsForStoredZones(&functions);
336 }
337 }  // namespace allocator
338 }  // namespace base
339 #endif
340 
341 // Cross-checks.
342 
343 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
344 #error The allocator shim should not be compiled when building for memory tools.
345 #endif
346 
347 #if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
348     (defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS)
349 #error This code cannot be used when exceptions are turned on.
350 #endif
351