1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/allocator/allocator_shim.h"
6
7 #include <errno.h>
8 #include <unistd.h>
9
10 #include <new>
11
12 #include "base/atomicops.h"
13 #include "base/logging.h"
14 #include "base/macros.h"
15 #include "base/threading/platform_thread.h"
16 #include "build/build_config.h"
17
18 // No calls to malloc / new in this file. They would would cause re-entrancy of
19 // the shim, which is hard to deal with. Keep this code as simple as possible
20 // and don't use any external C++ object here, not even //base ones. Even if
21 // they are safe to use today, in future they might be refactored.
22
23 namespace {
24
25 using namespace base;
26
27 subtle::AtomicWord g_chain_head = reinterpret_cast<subtle::AtomicWord>(
28 &allocator::AllocatorDispatch::default_dispatch);
29
30 bool g_call_new_handler_on_malloc_failure = false;
31 subtle::Atomic32 g_new_handler_lock = 0;
32
33 // In theory this should be just base::ThreadChecker. But we can't afford
34 // the luxury of a LazyInstance<ThreadChecker> here as it would cause a new().
CalledOnValidThread()35 bool CalledOnValidThread() {
36 using subtle::Atomic32;
37 const Atomic32 kInvalidTID = static_cast<Atomic32>(kInvalidThreadId);
38 static Atomic32 g_tid = kInvalidTID;
39 Atomic32 cur_tid = static_cast<Atomic32>(PlatformThread::CurrentId());
40 Atomic32 prev_tid =
41 subtle::NoBarrier_CompareAndSwap(&g_tid, kInvalidTID, cur_tid);
42 return prev_tid == kInvalidTID || prev_tid == cur_tid;
43 }
44
GetPageSize()45 inline size_t GetPageSize() {
46 static size_t pagesize = 0;
47 if (!pagesize)
48 pagesize = sysconf(_SC_PAGESIZE);
49 return pagesize;
50 }
51
52 // Calls the std::new handler thread-safely. Returns true if a new_handler was
53 // set and called, false if no new_handler was set.
CallNewHandler()54 bool CallNewHandler() {
55 // TODO(primiano): C++11 has introduced ::get_new_handler() which is supposed
56 // to be thread safe and would avoid the spinlock boilerplate here. However
57 // it doesn't seem to be available yet in the Linux chroot headers yet.
58 std::new_handler nh;
59 {
60 while (subtle::Acquire_CompareAndSwap(&g_new_handler_lock, 0, 1))
61 PlatformThread::YieldCurrentThread();
62 nh = std::set_new_handler(0);
63 ignore_result(std::set_new_handler(nh));
64 subtle::Release_Store(&g_new_handler_lock, 0);
65 }
66 if (!nh)
67 return false;
68 (*nh)();
69 // Assume the new_handler will abort if it fails. Exception are disabled and
70 // we don't support the case of a new_handler throwing std::bad_balloc.
71 return true;
72 }
73
GetChainHead()74 inline const allocator::AllocatorDispatch* GetChainHead() {
75 // TODO(primiano): Just use NoBarrier_Load once crbug.com/593344 is fixed.
76 // Unfortunately due to that bug NoBarrier_Load() is mistakenly fully
77 // barriered on Linux+Clang, and that causes visible perf regressons.
78 return reinterpret_cast<const allocator::AllocatorDispatch*>(
79 #if defined(OS_LINUX) && defined(__clang__)
80 *static_cast<const volatile subtle::AtomicWord*>(&g_chain_head)
81 #else
82 subtle::NoBarrier_Load(&g_chain_head)
83 #endif
84 );
85 }
86
87 } // namespace
88
89 namespace base {
90 namespace allocator {
91
SetCallNewHandlerOnMallocFailure(bool value)92 void SetCallNewHandlerOnMallocFailure(bool value) {
93 g_call_new_handler_on_malloc_failure = value;
94 }
95
UncheckedAlloc(size_t size)96 void* UncheckedAlloc(size_t size) {
97 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
98 return chain_head->alloc_function(chain_head, size);
99 }
100
InsertAllocatorDispatch(AllocatorDispatch * dispatch)101 void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
102 // Ensure this is always called on the same thread.
103 DCHECK(CalledOnValidThread());
104
105 dispatch->next = GetChainHead();
106
107 // This function does not guarantee to be thread-safe w.r.t. concurrent
108 // insertions, but still has to guarantee that all the threads always
109 // see a consistent chain, hence the MemoryBarrier() below.
110 // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
111 // we don't really want this to be a release-store with a corresponding
112 // acquire-load during malloc().
113 subtle::MemoryBarrier();
114
115 subtle::NoBarrier_Store(&g_chain_head,
116 reinterpret_cast<subtle::AtomicWord>(dispatch));
117 }
118
RemoveAllocatorDispatchForTesting(AllocatorDispatch * dispatch)119 void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
120 DCHECK(CalledOnValidThread());
121 DCHECK_EQ(GetChainHead(), dispatch);
122 subtle::NoBarrier_Store(&g_chain_head,
123 reinterpret_cast<subtle::AtomicWord>(dispatch->next));
124 }
125
126 } // namespace allocator
127 } // namespace base
128
129 // The Shim* functions below are the entry-points into the shim-layer and
130 // are supposed to be invoked / aliased by the allocator_shim_override_*
131 // headers to route the malloc / new symbols through the shim layer.
132 extern "C" {
133
134 // The general pattern for allocations is:
135 // - Try to allocate, if succeded return the pointer.
136 // - If the allocation failed:
137 // - Call the std::new_handler if it was a C++ allocation.
138 // - Call the std::new_handler if it was a malloc() (or calloc() or similar)
139 // AND SetCallNewHandlerOnMallocFailure(true).
140 // - If the std::new_handler is NOT set just return nullptr.
141 // - If the std::new_handler is set:
142 // - Assume it will abort() if it fails (very likely the new_handler will
143 // just suicide priting a message).
144 // - Assume it did succeed if it returns, in which case reattempt the alloc.
145
ShimCppNew(size_t size)146 void* ShimCppNew(size_t size) {
147 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
148 void* ptr;
149 do {
150 ptr = chain_head->alloc_function(chain_head, size);
151 } while (!ptr && CallNewHandler());
152 return ptr;
153 }
154
ShimCppDelete(void * address)155 void ShimCppDelete(void* address) {
156 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
157 return chain_head->free_function(chain_head, address);
158 }
159
ShimMalloc(size_t size)160 void* ShimMalloc(size_t size) {
161 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
162 void* ptr;
163 do {
164 ptr = chain_head->alloc_function(chain_head, size);
165 } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
166 return ptr;
167 }
168
ShimCalloc(size_t n,size_t size)169 void* ShimCalloc(size_t n, size_t size) {
170 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
171 void* ptr;
172 do {
173 ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size);
174 } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
175 return ptr;
176 }
177
ShimRealloc(void * address,size_t size)178 void* ShimRealloc(void* address, size_t size) {
179 // realloc(size == 0) means free() and might return a nullptr. We should
180 // not call the std::new_handler in that case, though.
181 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
182 void* ptr;
183 do {
184 ptr = chain_head->realloc_function(chain_head, address, size);
185 } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
186 CallNewHandler());
187 return ptr;
188 }
189
ShimMemalign(size_t alignment,size_t size)190 void* ShimMemalign(size_t alignment, size_t size) {
191 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
192 void* ptr;
193 do {
194 ptr = chain_head->alloc_aligned_function(chain_head, alignment, size);
195 } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
196 return ptr;
197 }
198
ShimPosixMemalign(void ** res,size_t alignment,size_t size)199 int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
200 // posix_memalign is supposed to check the arguments. See tc_posix_memalign()
201 // in tc_malloc.cc.
202 if (((alignment % sizeof(void*)) != 0) ||
203 ((alignment & (alignment - 1)) != 0) || (alignment == 0)) {
204 return EINVAL;
205 }
206 void* ptr = ShimMemalign(alignment, size);
207 *res = ptr;
208 return ptr ? 0 : ENOMEM;
209 }
210
ShimValloc(size_t size)211 void* ShimValloc(size_t size) {
212 return ShimMemalign(GetPageSize(), size);
213 }
214
ShimPvalloc(size_t size)215 void* ShimPvalloc(size_t size) {
216 // pvalloc(0) should allocate one page, according to its man page.
217 if (size == 0) {
218 size = GetPageSize();
219 } else {
220 size = (size + GetPageSize() - 1) & ~(GetPageSize() - 1);
221 }
222 return ShimMemalign(GetPageSize(), size);
223 }
224
ShimFree(void * address)225 void ShimFree(void* address) {
226 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
227 return chain_head->free_function(chain_head, address);
228 }
229
230 } // extern "C"
231
232 // Cpp symbols (new / delete) should always be routed through the shim layer.
233 #include "base/allocator/allocator_shim_override_cpp_symbols.h"
234
235 // Android does not support symbol interposition. The way malloc symbols are
236 // intercepted on Android is by using link-time -wrap flags.
237 #if !defined(OS_ANDROID) && !defined(ANDROID)
238 // Ditto for plain malloc() / calloc() / free() etc. symbols.
239 #include "base/allocator/allocator_shim_override_libc_symbols.h"
240 #else
241 #include "base/allocator/allocator_shim_override_linker_wrapped_symbols.h"
242 #endif
243
244 // In the case of tcmalloc we also want to plumb into the glibc hooks
245 // to avoid that allocations made in glibc itself (e.g., strdup()) get
246 // accidentally performed on the glibc heap instead of the tcmalloc one.
247 #if defined(USE_TCMALLOC)
248 #include "base/allocator/allocator_shim_override_glibc_weak_symbols.h"
249 #endif
250
251 // Cross-checks.
252
253 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
254 #error The allocator shim should not be compiled when building for memory tools.
255 #endif
256
257 #if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
258 (defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS)
259 #error This code cannot be used when exceptions are turned on.
260 #endif
261