• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <inttypes.h>
30 #include <pthread.h>
31 #include <stdatomic.h>
32 #include <stdint.h>
33 #include <stdio.h>
34 #include <unistd.h>
35 
36 #include <private/bionic_malloc_dispatch.h>
37 
38 #if __has_feature(hwaddress_sanitizer)
39 #include <sanitizer/allocator_interface.h>
40 #endif
41 
42 #include "malloc_common.h"
43 #include "malloc_common_dynamic.h"
44 #include "malloc_heapprofd.h"
45 #include "malloc_limit.h"
46 
47 __BEGIN_DECLS
48 static void* LimitCalloc(size_t n_elements, size_t elem_size);
49 static void LimitFree(void* mem);
50 static void* LimitMalloc(size_t bytes);
51 static void* LimitMemalign(size_t alignment, size_t bytes);
52 static int LimitPosixMemalign(void** memptr, size_t alignment, size_t size);
53 static void* LimitRealloc(void* old_mem, size_t bytes);
54 static void* LimitAlignedAlloc(size_t alignment, size_t size);
55 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
56 static void* LimitPvalloc(size_t bytes);
57 static void* LimitValloc(size_t bytes);
58 #endif
59 
60 // Pass through functions.
61 static size_t LimitUsableSize(const void* mem);
62 static struct mallinfo LimitMallinfo();
63 static int LimitIterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*), void* arg);
64 static void LimitMallocDisable();
65 static void LimitMallocEnable();
66 static int LimitMallocInfo(int options, FILE* fp);
67 static int LimitMallopt(int param, int value);
68 __END_DECLS
69 
70 static constexpr MallocDispatch __limit_dispatch
71   __attribute__((unused)) = {
72     LimitCalloc,
73     LimitFree,
74     LimitMallinfo,
75     LimitMalloc,
76     LimitUsableSize,
77     LimitMemalign,
78     LimitPosixMemalign,
79 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
80     LimitPvalloc,
81 #endif
82     LimitRealloc,
83 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
84     LimitValloc,
85 #endif
86     LimitIterate,
87     LimitMallocDisable,
88     LimitMallocEnable,
89     LimitMallopt,
90     LimitAlignedAlloc,
91     LimitMallocInfo,
92   };
93 
94 static _Atomic uint64_t gAllocated;
95 static uint64_t gAllocLimit;
96 
CheckLimit(size_t bytes)97 static inline bool CheckLimit(size_t bytes) {
98   uint64_t total;
99   if (__predict_false(__builtin_add_overflow(
100                           atomic_load_explicit(&gAllocated, memory_order_relaxed), bytes, &total) ||
101                       total > gAllocLimit)) {
102     return false;
103   }
104   return true;
105 }
106 
IncrementLimit(void * mem)107 static inline void* IncrementLimit(void* mem) {
108   if (__predict_false(mem == nullptr)) {
109     return nullptr;
110   }
111   atomic_fetch_add(&gAllocated, LimitUsableSize(mem));
112   return mem;
113 }
114 
LimitCalloc(size_t n_elements,size_t elem_size)115 void* LimitCalloc(size_t n_elements, size_t elem_size) {
116   size_t total;
117   if (__builtin_mul_overflow(n_elements, elem_size, &total) || !CheckLimit(total)) {
118     warning_log("malloc_limit: calloc(%zu, %zu) exceeds limit %" PRId64, n_elements, elem_size,
119                 gAllocLimit);
120     return nullptr;
121   }
122   auto dispatch_table = GetDefaultDispatchTable();
123   if (__predict_false(dispatch_table != nullptr)) {
124     return IncrementLimit(dispatch_table->calloc(n_elements, elem_size));
125   }
126   return IncrementLimit(Malloc(calloc)(n_elements, elem_size));
127 }
128 
LimitFree(void * mem)129 void LimitFree(void* mem) {
130   atomic_fetch_sub(&gAllocated, LimitUsableSize(mem));
131   auto dispatch_table = GetDefaultDispatchTable();
132   if (__predict_false(dispatch_table != nullptr)) {
133     return dispatch_table->free(mem);
134   }
135   return Malloc(free)(mem);
136 }
137 
LimitMalloc(size_t bytes)138 void* LimitMalloc(size_t bytes) {
139   if (!CheckLimit(bytes)) {
140     warning_log("malloc_limit: malloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
141     return nullptr;
142   }
143   auto dispatch_table = GetDefaultDispatchTable();
144   if (__predict_false(dispatch_table != nullptr)) {
145     return IncrementLimit(dispatch_table->malloc(bytes));
146   }
147   return IncrementLimit(Malloc(malloc)(bytes));
148 }
149 
LimitMemalign(size_t alignment,size_t bytes)150 static void* LimitMemalign(size_t alignment, size_t bytes) {
151   if (!CheckLimit(bytes)) {
152     warning_log("malloc_limit: memalign(%zu, %zu) exceeds limit %" PRId64, alignment, bytes,
153                 gAllocLimit);
154     return nullptr;
155   }
156   auto dispatch_table = GetDefaultDispatchTable();
157   if (__predict_false(dispatch_table != nullptr)) {
158     return IncrementLimit(dispatch_table->memalign(alignment, bytes));
159   }
160   return IncrementLimit(Malloc(memalign)(alignment, bytes));
161 }
162 
LimitPosixMemalign(void ** memptr,size_t alignment,size_t size)163 static int LimitPosixMemalign(void** memptr, size_t alignment, size_t size) {
164   if (!CheckLimit(size)) {
165     warning_log("malloc_limit: posix_memalign(%zu, %zu) exceeds limit %" PRId64, alignment, size,
166                 gAllocLimit);
167     return ENOMEM;
168   }
169   int retval;
170   auto dispatch_table = GetDefaultDispatchTable();
171   if (__predict_false(dispatch_table != nullptr)) {
172     retval = dispatch_table->posix_memalign(memptr, alignment, size);
173   } else {
174     retval = Malloc(posix_memalign)(memptr, alignment, size);
175   }
176   if (__predict_false(retval != 0)) {
177     return retval;
178   }
179   IncrementLimit(*memptr);
180   return 0;
181 }
182 
LimitAlignedAlloc(size_t alignment,size_t size)183 static void* LimitAlignedAlloc(size_t alignment, size_t size) {
184   if (!CheckLimit(size)) {
185     warning_log("malloc_limit: aligned_alloc(%zu, %zu) exceeds limit %" PRId64, alignment, size,
186                 gAllocLimit);
187     return nullptr;
188   }
189   auto dispatch_table = GetDefaultDispatchTable();
190   if (__predict_false(dispatch_table != nullptr)) {
191     return IncrementLimit(dispatch_table->aligned_alloc(alignment, size));
192   }
193   return IncrementLimit(Malloc(aligned_alloc)(alignment, size));
194 }
195 
LimitRealloc(void * old_mem,size_t bytes)196 static void* LimitRealloc(void* old_mem, size_t bytes) {
197   size_t old_usable_size = LimitUsableSize(old_mem);
198   void* new_ptr;
199   // Need to check the size only if the allocation will increase in size.
200   if (bytes > old_usable_size && !CheckLimit(bytes - old_usable_size)) {
201     warning_log("malloc_limit: realloc(%p, %zu) exceeds limit %" PRId64, old_mem, bytes,
202                 gAllocLimit);
203     // Free the old pointer.
204     LimitFree(old_mem);
205     return nullptr;
206   }
207 
208   auto dispatch_table = GetDefaultDispatchTable();
209   if (__predict_false(dispatch_table != nullptr)) {
210     new_ptr = dispatch_table->realloc(old_mem, bytes);
211   } else {
212     new_ptr = Malloc(realloc)(old_mem, bytes);
213   }
214 
215   if (__predict_false(new_ptr == nullptr)) {
216     // This acts as if the pointer was freed.
217     atomic_fetch_sub(&gAllocated, old_usable_size);
218     return nullptr;
219   }
220 
221   size_t new_usable_size = LimitUsableSize(new_ptr);
222   // Assumes that most allocations increase in size, rather than shrink.
223   if (__predict_false(old_usable_size > new_usable_size)) {
224     atomic_fetch_sub(&gAllocated, old_usable_size - new_usable_size);
225   } else {
226     atomic_fetch_add(&gAllocated, new_usable_size - old_usable_size);
227   }
228   return new_ptr;
229 }
230 
231 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
LimitPvalloc(size_t bytes)232 static void* LimitPvalloc(size_t bytes) {
233   if (!CheckLimit(bytes)) {
234     warning_log("malloc_limit: pvalloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
235     return nullptr;
236   }
237   auto dispatch_table = GetDefaultDispatchTable();
238   if (__predict_false(dispatch_table != nullptr)) {
239     return IncrementLimit(dispatch_table->pvalloc(bytes));
240   }
241   return IncrementLimit(Malloc(pvalloc)(bytes));
242 }
243 
LimitValloc(size_t bytes)244 static void* LimitValloc(size_t bytes) {
245   if (!CheckLimit(bytes)) {
246     warning_log("malloc_limit: valloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
247     return nullptr;
248   }
249   auto dispatch_table = GetDefaultDispatchTable();
250   if (__predict_false(dispatch_table != nullptr)) {
251     return IncrementLimit(dispatch_table->valloc(bytes));
252   }
253   return IncrementLimit(Malloc(valloc)(bytes));
254 }
255 #endif
256 
MallocLimitInstalled()257 bool MallocLimitInstalled() {
258   return GetDispatchTable() == &__limit_dispatch;
259 }
260 
261 #if defined(LIBC_STATIC)
EnableLimitDispatchTable()262 static bool EnableLimitDispatchTable() {
263   // This is the only valid way to modify the dispatch tables for a
264   // static executable so no locks are necessary.
265   __libc_globals.mutate([](libc_globals* globals) {
266     atomic_store(&globals->current_dispatch_table, &__limit_dispatch);
267   });
268   return true;
269 }
270 #else
EnableLimitDispatchTable()271 static bool EnableLimitDispatchTable() {
272   pthread_mutex_lock(&gGlobalsMutateLock);
273   // All other code that calls mutate will grab the gGlobalsMutateLock.
274   // However, there is one case where the lock cannot be acquired, in the
275   // signal handler that enables heapprofd. In order to avoid having two
276   // threads calling mutate at the same time, use an atomic variable to
277   // verify that only this function or the signal handler are calling mutate.
278   // If this function is called at the same time as the signal handler is
279   // being called, allow a short period for the signal handler to complete
280   // before failing.
281   bool enabled = false;
282   size_t num_tries = 200;
283   while (true) {
284     if (!atomic_exchange(&gGlobalsMutating, true)) {
285       __libc_globals.mutate([](libc_globals* globals) {
286         atomic_store(&globals->current_dispatch_table, &__limit_dispatch);
287       });
288       atomic_store(&gGlobalsMutating, false);
289       enabled = true;
290       break;
291     }
292     if (--num_tries == 0) {
293       break;
294     }
295     usleep(1000);
296   }
297   pthread_mutex_unlock(&gGlobalsMutateLock);
298   if (enabled) {
299     info_log("malloc_limit: Allocation limit enabled, max size %" PRId64 " bytes\n", gAllocLimit);
300   } else {
301     error_log("malloc_limit: Failed to enable allocation limit.");
302   }
303   return enabled;
304 }
305 #endif
306 
LimitEnable(void * arg,size_t arg_size)307 bool LimitEnable(void* arg, size_t arg_size) {
308   if (arg == nullptr || arg_size != sizeof(size_t)) {
309     errno = EINVAL;
310     return false;
311   }
312 
313   static _Atomic bool limit_enabled;
314   if (atomic_exchange(&limit_enabled, true)) {
315     // The limit can only be enabled once.
316     error_log("malloc_limit: The allocation limit has already been set, it can only be set once.");
317     return false;
318   }
319 
320   gAllocLimit = *reinterpret_cast<size_t*>(arg);
321 #if __has_feature(hwaddress_sanitizer)
322   size_t current_allocated = __sanitizer_get_current_allocated_bytes();
323 #else
324   size_t current_allocated;
325   auto dispatch_table = GetDefaultDispatchTable();
326   if (__predict_false(dispatch_table != nullptr)) {
327     current_allocated = dispatch_table->mallinfo().uordblks;
328   } else {
329     current_allocated = Malloc(mallinfo)().uordblks;
330   }
331 #endif
332   // This has to be set before the enable occurs since "gAllocated" is used
333   // to compute the limit. If the enable fails, "gAllocated" is never used.
334   atomic_store(&gAllocated, current_allocated);
335 
336   if (!EnableLimitDispatchTable()) {
337     // Failed to enable, reset so a future enable will pass.
338     atomic_store(&limit_enabled, false);
339     return false;
340   }
341   return true;
342 }
343 
LimitUsableSize(const void * mem)344 static size_t LimitUsableSize(const void* mem) {
345   auto dispatch_table = GetDefaultDispatchTable();
346   if (__predict_false(dispatch_table != nullptr)) {
347     return dispatch_table->malloc_usable_size(mem);
348   }
349   return Malloc(malloc_usable_size)(mem);
350 }
351 
LimitMallinfo()352 static struct mallinfo LimitMallinfo() {
353   auto dispatch_table = GetDefaultDispatchTable();
354   if (__predict_false(dispatch_table != nullptr)) {
355     return dispatch_table->mallinfo();
356   }
357   return Malloc(mallinfo)();
358 }
359 
LimitIterate(uintptr_t base,size_t size,void (* callback)(uintptr_t,size_t,void *),void * arg)360 static int LimitIterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*), void* arg) {
361   auto dispatch_table = GetDefaultDispatchTable();
362   if (__predict_false(dispatch_table != nullptr)) {
363     return dispatch_table->malloc_iterate(base, size, callback, arg);
364   }
365   return Malloc(malloc_iterate)(base, size, callback, arg);
366 }
367 
LimitMallocDisable()368 static void LimitMallocDisable() {
369   auto dispatch_table = GetDefaultDispatchTable();
370   if (__predict_false(dispatch_table != nullptr)) {
371     dispatch_table->malloc_disable();
372   } else {
373     Malloc(malloc_disable)();
374   }
375 }
376 
LimitMallocEnable()377 static void LimitMallocEnable() {
378   auto dispatch_table = GetDefaultDispatchTable();
379   if (__predict_false(dispatch_table != nullptr)) {
380     dispatch_table->malloc_enable();
381   } else {
382     Malloc(malloc_enable)();
383   }
384 }
385 
LimitMallocInfo(int options,FILE * fp)386 static int LimitMallocInfo(int options, FILE* fp) {
387   auto dispatch_table = GetDefaultDispatchTable();
388   if (__predict_false(dispatch_table != nullptr)) {
389     return dispatch_table->malloc_info(options, fp);
390   }
391   return Malloc(malloc_info)(options, fp);
392 }
393 
LimitMallopt(int param,int value)394 static int LimitMallopt(int param, int value) {
395   auto dispatch_table = GetDefaultDispatchTable();
396   if (__predict_false(dispatch_table != nullptr)) {
397     return dispatch_table->mallopt(param, value);
398   }
399   return Malloc(mallopt)(param, value);
400 }
401