1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/allocator/allocator_shim.h"
6
7 #include <errno.h>
8
9 #include <new>
10
11 #include "base/atomicops.h"
12 #include "base/logging.h"
13 #include "base/macros.h"
14 #include "base/process/process_metrics.h"
15 #include "base/threading/platform_thread.h"
16 #include "build/build_config.h"
17
18 #if !defined(OS_WIN)
19 #include <unistd.h>
20 #else
21 #include "base/allocator/winheap_stubs_win.h"
22 #endif
23
24 #if defined(OS_MACOSX)
25 #include <malloc/malloc.h>
26
27 #include "base/allocator/allocator_interception_mac.h"
28 #endif
29
30 // No calls to malloc / new in this file. They would would cause re-entrancy of
31 // the shim, which is hard to deal with. Keep this code as simple as possible
32 // and don't use any external C++ object here, not even //base ones. Even if
33 // they are safe to use today, in future they might be refactored.
34
35 namespace {
36
37 using namespace base;
38
39 subtle::AtomicWord g_chain_head = reinterpret_cast<subtle::AtomicWord>(
40 &allocator::AllocatorDispatch::default_dispatch);
41
42 bool g_call_new_handler_on_malloc_failure = false;
43
GetCachedPageSize()44 inline size_t GetCachedPageSize() {
45 static size_t pagesize = 0;
46 if (!pagesize)
47 pagesize = base::GetPageSize();
48 return pagesize;
49 }
50
51 // Calls the std::new handler thread-safely. Returns true if a new_handler was
52 // set and called, false if no new_handler was set.
CallNewHandler(size_t size)53 bool CallNewHandler(size_t size) {
54 #if defined(OS_WIN)
55 return base::allocator::WinCallNewHandler(size);
56 #else
57 std::new_handler nh = std::get_new_handler();
58 if (!nh)
59 return false;
60 (*nh)();
61 // Assume the new_handler will abort if it fails. Exception are disabled and
62 // we don't support the case of a new_handler throwing std::bad_balloc.
63 return true;
64 #endif
65 }
66
GetChainHead()67 inline const allocator::AllocatorDispatch* GetChainHead() {
68 // TODO(primiano): Just use NoBarrier_Load once crbug.com/593344 is fixed.
69 // Unfortunately due to that bug NoBarrier_Load() is mistakenly fully
70 // barriered on Linux+Clang, and that causes visible perf regressons.
71 return reinterpret_cast<const allocator::AllocatorDispatch*>(
72 #if defined(OS_LINUX) && defined(__clang__)
73 *static_cast<const volatile subtle::AtomicWord*>(&g_chain_head)
74 #else
75 subtle::NoBarrier_Load(&g_chain_head)
76 #endif
77 );
78 }
79
80 } // namespace
81
82 namespace base {
83 namespace allocator {
84
SetCallNewHandlerOnMallocFailure(bool value)85 void SetCallNewHandlerOnMallocFailure(bool value) {
86 g_call_new_handler_on_malloc_failure = value;
87 }
88
UncheckedAlloc(size_t size)89 void* UncheckedAlloc(size_t size) {
90 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
91 return chain_head->alloc_function(chain_head, size, nullptr);
92 }
93
InsertAllocatorDispatch(AllocatorDispatch * dispatch)94 void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
95 // Loop in case of (an unlikely) race on setting the list head.
96 size_t kMaxRetries = 7;
97 for (size_t i = 0; i < kMaxRetries; ++i) {
98 const AllocatorDispatch* chain_head = GetChainHead();
99 dispatch->next = chain_head;
100
101 // This function guarantees to be thread-safe w.r.t. concurrent
102 // insertions. It also has to guarantee that all the threads always
103 // see a consistent chain, hence the MemoryBarrier() below.
104 // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
105 // we don't really want this to be a release-store with a corresponding
106 // acquire-load during malloc().
107 subtle::MemoryBarrier();
108 subtle::AtomicWord old_value =
109 reinterpret_cast<subtle::AtomicWord>(chain_head);
110 // Set the chain head to the new dispatch atomically. If we lose the race,
111 // the comparison will fail, and the new head of chain will be returned.
112 if (subtle::NoBarrier_CompareAndSwap(
113 &g_chain_head, old_value,
114 reinterpret_cast<subtle::AtomicWord>(dispatch)) == old_value) {
115 // Success.
116 return;
117 }
118 }
119
120 CHECK(false); // Too many retries, this shouldn't happen.
121 }
122
RemoveAllocatorDispatchForTesting(AllocatorDispatch * dispatch)123 void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
124 DCHECK_EQ(GetChainHead(), dispatch);
125 subtle::NoBarrier_Store(&g_chain_head,
126 reinterpret_cast<subtle::AtomicWord>(dispatch->next));
127 }
128
129 } // namespace allocator
130 } // namespace base
131
132 // The Shim* functions below are the entry-points into the shim-layer and
133 // are supposed to be invoked by the allocator_shim_override_*
134 // headers to route the malloc / new symbols through the shim layer.
135 // They are defined as ALWAYS_INLINE in order to remove a level of indirection
136 // between the system-defined entry points and the shim implementations.
137 extern "C" {
138
139 // The general pattern for allocations is:
140 // - Try to allocate, if succeded return the pointer.
141 // - If the allocation failed:
142 // - Call the std::new_handler if it was a C++ allocation.
143 // - Call the std::new_handler if it was a malloc() (or calloc() or similar)
144 // AND SetCallNewHandlerOnMallocFailure(true).
145 // - If the std::new_handler is NOT set just return nullptr.
146 // - If the std::new_handler is set:
147 // - Assume it will abort() if it fails (very likely the new_handler will
148 // just suicide priting a message).
149 // - Assume it did succeed if it returns, in which case reattempt the alloc.
150
ShimCppNew(size_t size)151 ALWAYS_INLINE void* ShimCppNew(size_t size) {
152 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
153 void* ptr;
154 do {
155 void* context = nullptr;
156 #if defined(OS_MACOSX)
157 context = malloc_default_zone();
158 #endif
159 ptr = chain_head->alloc_function(chain_head, size, context);
160 } while (!ptr && CallNewHandler(size));
161 return ptr;
162 }
163
ShimCppDelete(void * address)164 ALWAYS_INLINE void ShimCppDelete(void* address) {
165 void* context = nullptr;
166 #if defined(OS_MACOSX)
167 context = malloc_default_zone();
168 #endif
169 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
170 return chain_head->free_function(chain_head, address, context);
171 }
172
ShimMalloc(size_t size,void * context)173 ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
174 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
175 void* ptr;
176 do {
177 ptr = chain_head->alloc_function(chain_head, size, context);
178 } while (!ptr && g_call_new_handler_on_malloc_failure &&
179 CallNewHandler(size));
180 return ptr;
181 }
182
ShimCalloc(size_t n,size_t size,void * context)183 ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
184 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
185 void* ptr;
186 do {
187 ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
188 context);
189 } while (!ptr && g_call_new_handler_on_malloc_failure &&
190 CallNewHandler(size));
191 return ptr;
192 }
193
ShimRealloc(void * address,size_t size,void * context)194 ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
195 // realloc(size == 0) means free() and might return a nullptr. We should
196 // not call the std::new_handler in that case, though.
197 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
198 void* ptr;
199 do {
200 ptr = chain_head->realloc_function(chain_head, address, size, context);
201 } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
202 CallNewHandler(size));
203 return ptr;
204 }
205
ShimMemalign(size_t alignment,size_t size,void * context)206 ALWAYS_INLINE void* ShimMemalign(size_t alignment, size_t size, void* context) {
207 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
208 void* ptr;
209 do {
210 ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
211 context);
212 } while (!ptr && g_call_new_handler_on_malloc_failure &&
213 CallNewHandler(size));
214 return ptr;
215 }
216
ShimPosixMemalign(void ** res,size_t alignment,size_t size)217 ALWAYS_INLINE int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
218 // posix_memalign is supposed to check the arguments. See tc_posix_memalign()
219 // in tc_malloc.cc.
220 if (((alignment % sizeof(void*)) != 0) ||
221 ((alignment & (alignment - 1)) != 0) || (alignment == 0)) {
222 return EINVAL;
223 }
224 void* ptr = ShimMemalign(alignment, size, nullptr);
225 *res = ptr;
226 return ptr ? 0 : ENOMEM;
227 }
228
ShimValloc(size_t size,void * context)229 ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
230 return ShimMemalign(GetCachedPageSize(), size, context);
231 }
232
ShimPvalloc(size_t size)233 ALWAYS_INLINE void* ShimPvalloc(size_t size) {
234 // pvalloc(0) should allocate one page, according to its man page.
235 if (size == 0) {
236 size = GetCachedPageSize();
237 } else {
238 size = (size + GetCachedPageSize() - 1) & ~(GetCachedPageSize() - 1);
239 }
240 // The third argument is nullptr because pvalloc is glibc only and does not
241 // exist on OSX/BSD systems.
242 return ShimMemalign(GetCachedPageSize(), size, nullptr);
243 }
244
ShimFree(void * address,void * context)245 ALWAYS_INLINE void ShimFree(void* address, void* context) {
246 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
247 return chain_head->free_function(chain_head, address, context);
248 }
249
ShimGetSizeEstimate(const void * address,void * context)250 ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address, void* context) {
251 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
252 return chain_head->get_size_estimate_function(
253 chain_head, const_cast<void*>(address), context);
254 }
255
ShimBatchMalloc(size_t size,void ** results,unsigned num_requested,void * context)256 ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
257 void** results,
258 unsigned num_requested,
259 void* context) {
260 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
261 return chain_head->batch_malloc_function(chain_head, size, results,
262 num_requested, context);
263 }
264
ShimBatchFree(void ** to_be_freed,unsigned num_to_be_freed,void * context)265 ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
266 unsigned num_to_be_freed,
267 void* context) {
268 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
269 return chain_head->batch_free_function(chain_head, to_be_freed,
270 num_to_be_freed, context);
271 }
272
ShimFreeDefiniteSize(void * ptr,size_t size,void * context)273 ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr, size_t size, void* context) {
274 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
275 return chain_head->free_definite_size_function(chain_head, ptr, size,
276 context);
277 }
278
279 } // extern "C"
280
281 #if !defined(OS_WIN) && !defined(OS_MACOSX)
282 // Cpp symbols (new / delete) should always be routed through the shim layer
283 // except on Windows and macOS where the malloc intercept is deep enough that it
284 // also catches the cpp calls.
285 #include "base/allocator/allocator_shim_override_cpp_symbols.h"
286 #endif
287
288 #if defined(OS_ANDROID) || defined(ANDROID)
289 // Android does not support symbol interposition. The way malloc symbols are
290 // intercepted on Android is by using link-time -wrap flags.
291 #include "base/allocator/allocator_shim_override_linker_wrapped_symbols.h"
292 #elif defined(OS_WIN)
293 // On Windows we use plain link-time overriding of the CRT symbols.
294 #include "base/allocator/allocator_shim_override_ucrt_symbols_win.h"
295 #elif defined(OS_MACOSX)
296 #include "base/allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.h"
297 #include "base/allocator/allocator_shim_override_mac_symbols.h"
298 #else
299 #include "base/allocator/allocator_shim_override_libc_symbols.h"
300 #endif
301
302 // In the case of tcmalloc we also want to plumb into the glibc hooks
303 // to avoid that allocations made in glibc itself (e.g., strdup()) get
304 // accidentally performed on the glibc heap instead of the tcmalloc one.
305 #if defined(USE_TCMALLOC)
306 #include "base/allocator/allocator_shim_override_glibc_weak_symbols.h"
307 #endif
308
309 #if defined(OS_MACOSX)
310 namespace base {
311 namespace allocator {
InitializeAllocatorShim()312 void InitializeAllocatorShim() {
313 // Prepares the default dispatch. After the intercepted malloc calls have
314 // traversed the shim this will route them to the default malloc zone.
315 InitializeDefaultDispatchToMacAllocator();
316
317 MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
318
319 // This replaces the default malloc zone, causing calls to malloc & friends
320 // from the codebase to be routed to ShimMalloc() above.
321 base::allocator::ReplaceFunctionsForStoredZones(&functions);
322 }
323 } // namespace allocator
324 } // namespace base
325 #endif
326
327 // Cross-checks.
328
329 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
330 #error The allocator shim should not be compiled when building for memory tools.
331 #endif
332
333 #if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
334 (defined(_MSC_VER) && defined(_CPPUNWIND))
335 #error This code cannot be used when exceptions are turned on.
336 #endif
337