• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <errno.h>
30 #include <inttypes.h>
31 #include <malloc.h>
32 #include <pthread.h>
33 #include <signal.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <sys/cdefs.h>
38 #include <sys/param.h>
39 #include <sys/syscall.h>
40 #include <unistd.h>
41 
42 #include <mutex>
43 #include <vector>
44 
45 #include <android-base/file.h>
46 #include <android-base/properties.h>
47 #include <android-base/stringprintf.h>
48 #include <bionic/malloc_tagged_pointers.h>
49 #include <platform/bionic/reserved_signals.h>
50 #include <private/MallocXmlElem.h>
51 #include <private/bionic_malloc_dispatch.h>
52 #include <unwindstack/Unwinder.h>
53 
54 #include "Config.h"
55 #include "DebugData.h"
56 #include "Unreachable.h"
57 #include "UnwindBacktrace.h"
58 #include "backtrace.h"
59 #include "debug_disable.h"
60 #include "debug_log.h"
61 #include "malloc_debug.h"
62 
63 // ------------------------------------------------------------------------
64 // Global Data
65 // ------------------------------------------------------------------------
66 DebugData* g_debug;
67 
68 bool* g_zygote_child;
69 
70 const MallocDispatch* g_dispatch;
71 
Nanotime()72 static __always_inline uint64_t Nanotime() {
73   struct timespec t = {};
74   clock_gettime(CLOCK_MONOTONIC, &t);
75   return static_cast<uint64_t>(t.tv_sec) * 1000000000LL + t.tv_nsec;
76 }
77 
78 namespace {
79 // A TimedResult contains the result of from malloc end_ns al. functions and the
80 // start/end timestamps.
81 struct TimedResult {
82   uint64_t start_ns = 0;
83   uint64_t end_ns = 0;
84   union {
85     size_t s;
86     int i;
87     void* p;
88   } v;
89 
GetStartTimeNS__anon724b4cac0111::TimedResult90   uint64_t GetStartTimeNS() const { return start_ns; }
GetEndTimeNS__anon724b4cac0111::TimedResult91   uint64_t GetEndTimeNS() const { return end_ns; }
SetStartTimeNS__anon724b4cac0111::TimedResult92   void SetStartTimeNS(uint64_t t) { start_ns = t; }
SetEndTimeNS__anon724b4cac0111::TimedResult93   void SetEndTimeNS(uint64_t t) { end_ns = t; }
94 
95   template <typename T>
96   void setValue(T);
97   template <>
setValue__anon724b4cac0111::TimedResult98   void setValue(size_t s) {
99     v.s = s;
100   }
101   template <>
setValue__anon724b4cac0111::TimedResult102   void setValue(int i) {
103     v.i = i;
104   }
105   template <>
setValue__anon724b4cac0111::TimedResult106   void setValue(void* p) {
107     v.p = p;
108   }
109 
110   template <typename T>
111   T getValue() const;
112   template <>
getValue__anon724b4cac0111::TimedResult113   size_t getValue<size_t>() const {
114     return v.s;
115   }
116   template <>
getValue__anon724b4cac0111::TimedResult117   int getValue<int>() const {
118     return v.i;
119   }
120   template <>
getValue__anon724b4cac0111::TimedResult121   void* getValue<void*>() const {
122     return v.p;
123   }
124 };
125 
126 class ScopedTimer {
127  public:
ScopedTimer(TimedResult & res)128   ScopedTimer(TimedResult& res) : res_(res) { res_.start_ns = Nanotime(); }
129 
~ScopedTimer()130   ~ScopedTimer() { res_.end_ns = Nanotime(); }
131 
132  private:
133   TimedResult& res_;
134 };
135 
136 }  // namespace
137 
138 template <typename MallocFn, typename... Args>
TimerCall(MallocFn fn,Args...args)139 static TimedResult TimerCall(MallocFn fn, Args... args) {
140   TimedResult ret;
141   decltype((g_dispatch->*fn)(args...)) r;
142   if (g_debug->config().options() & RECORD_ALLOCS) {
143     ScopedTimer t(ret);
144     r = (g_dispatch->*fn)(args...);
145   } else {
146     r = (g_dispatch->*fn)(args...);
147   }
148   ret.setValue<decltype(r)>(r);
149   return ret;
150 }
151 
152 template <typename MallocFn, typename... Args>
TimerCallVoid(MallocFn fn,Args...args)153 static TimedResult TimerCallVoid(MallocFn fn, Args... args) {
154   TimedResult ret;
155   {
156     ScopedTimer t(ret);
157     (g_dispatch->*fn)(args...);
158   }
159   return ret;
160 }
161 
162 #define TCALL(FUNC, ...) TimerCall(&MallocDispatch::FUNC, __VA_ARGS__);
163 #define TCALLVOID(FUNC, ...) TimerCallVoid(&MallocDispatch::FUNC, __VA_ARGS__);
164 
165 // ------------------------------------------------------------------------
166 
167 // ------------------------------------------------------------------------
168 // Use C style prototypes for all exported functions. This makes it easy
169 // to do dlsym lookups during libc initialization when malloc debug
170 // is enabled.
171 // ------------------------------------------------------------------------
172 __BEGIN_DECLS
173 
174 bool debug_initialize(const MallocDispatch* malloc_dispatch, bool* malloc_zygote_child,
175                       const char* options);
176 void debug_finalize();
177 void debug_dump_heap(const char* file_name);
178 void debug_get_malloc_leak_info(uint8_t** info, size_t* overall_size, size_t* info_size,
179                                 size_t* total_memory, size_t* backtrace_size);
180 bool debug_write_malloc_leak_info(FILE* fp);
181 ssize_t debug_malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count);
182 void debug_free_malloc_leak_info(uint8_t* info);
183 size_t debug_malloc_usable_size(void* pointer);
184 void* debug_malloc(size_t size);
185 void debug_free(void* pointer);
186 void* debug_aligned_alloc(size_t alignment, size_t size);
187 void* debug_memalign(size_t alignment, size_t bytes);
188 void* debug_realloc(void* pointer, size_t bytes);
189 void* debug_calloc(size_t nmemb, size_t bytes);
190 struct mallinfo debug_mallinfo();
191 int debug_mallopt(int param, int value);
192 int debug_malloc_info(int options, FILE* fp);
193 int debug_posix_memalign(void** memptr, size_t alignment, size_t size);
194 int debug_malloc_iterate(uintptr_t base, size_t size,
195                          void (*callback)(uintptr_t base, size_t size, void* arg), void* arg);
196 void debug_malloc_disable();
197 void debug_malloc_enable();
198 
199 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
200 void* debug_pvalloc(size_t bytes);
201 void* debug_valloc(size_t size);
202 #endif
203 
204 __END_DECLS
205 // ------------------------------------------------------------------------
206 
207 class ScopedConcurrentLock {
208  public:
ScopedConcurrentLock()209   ScopedConcurrentLock() {
210     pthread_rwlock_rdlock(&lock_);
211   }
~ScopedConcurrentLock()212   ~ScopedConcurrentLock() {
213     pthread_rwlock_unlock(&lock_);
214   }
215 
Init()216   static void Init() {
217     pthread_rwlockattr_t attr;
218     // Set the attribute so that when a write lock is pending, read locks are no
219     // longer granted.
220     pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
221     pthread_rwlock_init(&lock_, &attr);
222   }
223 
BlockAllOperations()224   static void BlockAllOperations() {
225     pthread_rwlock_wrlock(&lock_);
226   }
227 
228  private:
229   static pthread_rwlock_t lock_;
230 };
231 pthread_rwlock_t ScopedConcurrentLock::lock_;
232 
233 // Use this because the sigprocmask* functions filter out the reserved bionic
234 // signals including the signal this code blocks.
__rt_sigprocmask(int how,const sigset64_t * new_set,sigset64_t * old_set,size_t sigset_size)235 static inline int __rt_sigprocmask(int how, const sigset64_t* new_set, sigset64_t* old_set,
236                                    size_t sigset_size) {
237   return syscall(SYS_rt_sigprocmask, how, new_set, old_set, sigset_size);
238 }
239 
240 // Need to block the backtrace signal while in malloc debug routines
241 // otherwise there is a chance of a deadlock and timeout when unwinding.
242 // This can occur if a thread is paused while owning a malloc debug
243 // internal lock.
244 class ScopedBacktraceSignalBlocker {
245  public:
ScopedBacktraceSignalBlocker()246   ScopedBacktraceSignalBlocker() {
247     sigemptyset64(&backtrace_set_);
248     sigaddset64(&backtrace_set_, BIONIC_SIGNAL_BACKTRACE);
249     sigset64_t old_set;
250     __rt_sigprocmask(SIG_BLOCK, &backtrace_set_, &old_set, sizeof(backtrace_set_));
251     if (sigismember64(&old_set, BIONIC_SIGNAL_BACKTRACE)) {
252       unblock_ = false;
253     }
254   }
255 
~ScopedBacktraceSignalBlocker()256   ~ScopedBacktraceSignalBlocker() {
257     if (unblock_) {
258       __rt_sigprocmask(SIG_UNBLOCK, &backtrace_set_, nullptr, sizeof(backtrace_set_));
259     }
260   }
261 
262  private:
263   bool unblock_ = true;
264   sigset64_t backtrace_set_;
265 };
266 
InitAtfork()267 static void InitAtfork() {
268   static pthread_once_t atfork_init = PTHREAD_ONCE_INIT;
269   pthread_once(&atfork_init, []() {
270     pthread_atfork(
271         []() {
272           if (g_debug != nullptr) {
273             g_debug->PrepareFork();
274           }
275         },
276         []() {
277           if (g_debug != nullptr) {
278             g_debug->PostForkParent();
279           }
280         },
281         []() {
282           if (g_debug != nullptr) {
283             g_debug->PostForkChild();
284           }
285         });
286   });
287 }
288 
BacktraceAndLog()289 void BacktraceAndLog() {
290   if (g_debug->config().options() & BACKTRACE_FULL) {
291     std::vector<uintptr_t> frames;
292     std::vector<unwindstack::FrameData> frames_info;
293     if (!Unwind(&frames, &frames_info, 256)) {
294       error_log("  Backtrace failed to get any frames.");
295     } else {
296       UnwindLog(frames_info);
297     }
298   } else {
299     std::vector<uintptr_t> frames(256);
300     size_t num_frames = backtrace_get(frames.data(), frames.size());
301     if (num_frames == 0) {
302       error_log("  Backtrace failed to get any frames.");
303     } else {
304       backtrace_log(frames.data(), num_frames);
305     }
306   }
307 }
308 
LogError(const void * pointer,const char * error_str)309 static void LogError(const void* pointer, const char* error_str) {
310   error_log(LOG_DIVIDER);
311   error_log("+++ ALLOCATION %p %s", pointer, error_str);
312 
313   // If we are tracking already freed pointers, check to see if this is
314   // one so we can print extra information.
315   if (g_debug->config().options() & FREE_TRACK) {
316     PointerData::LogFreeBacktrace(pointer);
317   }
318 
319   error_log("Backtrace at time of failure:");
320   BacktraceAndLog();
321   error_log(LOG_DIVIDER);
322   if (g_debug->config().options() & ABORT_ON_ERROR) {
323     abort();
324   }
325 }
326 
VerifyPointer(const void * pointer,const char * function_name)327 static bool VerifyPointer(const void* pointer, const char* function_name) {
328   if (g_debug->HeaderEnabled()) {
329     Header* header = g_debug->GetHeader(pointer);
330     if (header->tag != DEBUG_TAG) {
331       std::string error_str;
332       if (header->tag == DEBUG_FREE_TAG) {
333         error_str = std::string("USED AFTER FREE (") + function_name + ")";
334       } else {
335         error_str = android::base::StringPrintf("HAS INVALID TAG %" PRIx32 " (%s)", header->tag,
336                                                 function_name);
337       }
338       LogError(pointer, error_str.c_str());
339       return false;
340     }
341   }
342 
343   if (g_debug->TrackPointers()) {
344     if (!PointerData::Exists(pointer)) {
345       std::string error_str(std::string("UNKNOWN POINTER (") + function_name + ")");
346       LogError(pointer, error_str.c_str());
347       return false;
348     }
349   }
350   return true;
351 }
352 
InternalMallocUsableSize(void * pointer)353 static size_t InternalMallocUsableSize(void* pointer) {
354   if (g_debug->HeaderEnabled()) {
355     return g_debug->GetHeader(pointer)->usable_size;
356   } else {
357     return g_dispatch->malloc_usable_size(pointer);
358   }
359 }
360 
InitHeader(Header * header,void * orig_pointer,size_t size)361 static void* InitHeader(Header* header, void* orig_pointer, size_t size) {
362   header->tag = DEBUG_TAG;
363   header->orig_pointer = orig_pointer;
364   header->size = size;
365   header->usable_size = g_dispatch->malloc_usable_size(orig_pointer);
366   if (header->usable_size == 0) {
367     g_dispatch->free(orig_pointer);
368     return nullptr;
369   }
370   header->usable_size -= g_debug->pointer_offset() + reinterpret_cast<uintptr_t>(header) -
371                          reinterpret_cast<uintptr_t>(orig_pointer);
372 
373   if (g_debug->config().options() & FRONT_GUARD) {
374     uint8_t* guard = g_debug->GetFrontGuard(header);
375     memset(guard, g_debug->config().front_guard_value(), g_debug->config().front_guard_bytes());
376   }
377 
378   if (g_debug->config().options() & REAR_GUARD) {
379     uint8_t* guard = g_debug->GetRearGuard(header);
380     memset(guard, g_debug->config().rear_guard_value(), g_debug->config().rear_guard_bytes());
381     // If the rear guard is enabled, set the usable size to the exact size
382     // of the allocation.
383     header->usable_size = header->size;
384   }
385 
386   return g_debug->GetPointer(header);
387 }
388 
389 extern "C" void __asan_init() __attribute__((weak));
390 
debug_initialize(const MallocDispatch * malloc_dispatch,bool * zygote_child,const char * options)391 bool debug_initialize(const MallocDispatch* malloc_dispatch, bool* zygote_child,
392                       const char* options) {
393   if (zygote_child == nullptr || options == nullptr) {
394     return false;
395   }
396 
397   if (__asan_init != 0) {
398     error_log("malloc debug cannot be enabled alongside ASAN");
399     return false;
400   }
401 
402   InitAtfork();
403 
404   g_zygote_child = zygote_child;
405 
406   g_dispatch = malloc_dispatch;
407 
408   if (!DebugDisableInitialize()) {
409     return false;
410   }
411 
412   DebugData* debug = new DebugData();
413   if (!debug->Initialize(options) || !Unreachable::Initialize(debug->config())) {
414     delete debug;
415     DebugDisableFinalize();
416     return false;
417   }
418   g_debug = debug;
419 
420   // Always enable the backtrace code since we will use it in a number
421   // of different error cases.
422   backtrace_startup();
423 
424   if (g_debug->config().options() & VERBOSE) {
425     info_log("%s: malloc debug enabled", getprogname());
426   }
427 
428   ScopedConcurrentLock::Init();
429 
430   return true;
431 }
432 
debug_finalize()433 void debug_finalize() {
434   if (g_debug == nullptr) {
435     return;
436   }
437 
438   // Make sure that there are no other threads doing debug allocations
439   // before we kill everything.
440   ScopedConcurrentLock::BlockAllOperations();
441 
442   // Turn off capturing allocations calls.
443   DebugDisableSet(true);
444 
445   if (g_debug->config().options() & FREE_TRACK) {
446     PointerData::VerifyAllFreed();
447   }
448 
449   if (g_debug->config().options() & LEAK_TRACK) {
450     PointerData::LogLeaks();
451   }
452 
453   if ((g_debug->config().options() & BACKTRACE) && g_debug->config().backtrace_dump_on_exit()) {
454     debug_dump_heap(android::base::StringPrintf("%s.%d.exit.txt",
455                                                 g_debug->config().backtrace_dump_prefix().c_str(),
456                                                 getpid()).c_str());
457   }
458 
459   backtrace_shutdown();
460 
461   // In order to prevent any issues of threads freeing previous pointers
462   // after the main thread calls this code, simply leak the g_debug pointer
463   // and do not destroy the debug disable pthread key.
464 }
465 
debug_get_malloc_leak_info(uint8_t ** info,size_t * overall_size,size_t * info_size,size_t * total_memory,size_t * backtrace_size)466 void debug_get_malloc_leak_info(uint8_t** info, size_t* overall_size, size_t* info_size,
467                                 size_t* total_memory, size_t* backtrace_size) {
468   ScopedConcurrentLock lock;
469   ScopedDisableDebugCalls disable;
470   ScopedBacktraceSignalBlocker blocked;
471 
472   // Verify the arguments.
473   if (info == nullptr || overall_size == nullptr || info_size == nullptr || total_memory == nullptr ||
474       backtrace_size == nullptr) {
475     error_log("get_malloc_leak_info: At least one invalid parameter.");
476     return;
477   }
478 
479   *info = nullptr;
480   *overall_size = 0;
481   *info_size = 0;
482   *total_memory = 0;
483   *backtrace_size = 0;
484 
485   if (!(g_debug->config().options() & BACKTRACE)) {
486     error_log(
487         "get_malloc_leak_info: Allocations not being tracked, to enable "
488         "set the option 'backtrace'.");
489     return;
490   }
491 
492   PointerData::GetInfo(info, overall_size, info_size, total_memory, backtrace_size);
493 }
494 
debug_free_malloc_leak_info(uint8_t * info)495 void debug_free_malloc_leak_info(uint8_t* info) {
496   g_dispatch->free(info);
497   // Purge the memory that was freed since a significant amount of
498   // memory could have been allocated and freed.
499   g_dispatch->mallopt(M_PURGE_ALL, 0);
500 }
501 
debug_malloc_usable_size(void * pointer)502 size_t debug_malloc_usable_size(void* pointer) {
503   Unreachable::CheckIfRequested(g_debug->config());
504 
505   if (DebugCallsDisabled() || pointer == nullptr) {
506     return g_dispatch->malloc_usable_size(pointer);
507   }
508   ScopedConcurrentLock lock;
509   ScopedDisableDebugCalls disable;
510   ScopedBacktraceSignalBlocker blocked;
511 
512   if (!VerifyPointer(pointer, "malloc_usable_size")) {
513     return 0;
514   }
515 
516   return InternalMallocUsableSize(pointer);
517 }
518 
InternalMalloc(size_t size)519 static TimedResult InternalMalloc(size_t size) {
520   if ((g_debug->config().options() & BACKTRACE) && g_debug->pointer->ShouldDumpAndReset()) {
521     debug_dump_heap(android::base::StringPrintf(
522                         "%s.%d.txt", g_debug->config().backtrace_dump_prefix().c_str(), getpid())
523                         .c_str());
524   }
525 
526   if (size == 0) {
527     size = 1;
528   }
529 
530   TimedResult result;
531 
532   size_t real_size = size + g_debug->extra_bytes();
533   if (real_size < size) {
534     // Overflow.
535     errno = ENOMEM;
536     result.setValue<void*>(nullptr);
537     return result;
538   }
539 
540   if (size > PointerInfoType::MaxSize()) {
541     errno = ENOMEM;
542     result.setValue<void*>(nullptr);
543     return result;
544   }
545 
546   if (g_debug->HeaderEnabled()) {
547     result = TCALL(memalign, MINIMUM_ALIGNMENT_BYTES, real_size);
548     Header* header = reinterpret_cast<Header*>(result.getValue<void*>());
549     if (header == nullptr) {
550       return result;
551     }
552     result.setValue<void*>(InitHeader(header, header, size));
553   } else {
554     result = TCALL(malloc, real_size);
555   }
556 
557   void* pointer = result.getValue<void*>();
558 
559   if (pointer != nullptr) {
560     if (g_debug->TrackPointers()) {
561       PointerData::Add(pointer, size);
562     }
563 
564     if (g_debug->config().options() & FILL_ON_ALLOC) {
565       size_t bytes = InternalMallocUsableSize(pointer);
566       size_t fill_bytes = g_debug->config().fill_on_alloc_bytes();
567       bytes = (bytes < fill_bytes) ? bytes : fill_bytes;
568       memset(pointer, g_debug->config().fill_alloc_value(), bytes);
569     }
570   }
571 
572   return result;
573 }
574 
debug_malloc(size_t size)575 void* debug_malloc(size_t size) {
576   Unreachable::CheckIfRequested(g_debug->config());
577 
578   if (DebugCallsDisabled()) {
579     return g_dispatch->malloc(size);
580   }
581   ScopedConcurrentLock lock;
582   ScopedDisableDebugCalls disable;
583   ScopedBacktraceSignalBlocker blocked;
584 
585   TimedResult result = InternalMalloc(size);
586 
587   if (g_debug->config().options() & RECORD_ALLOCS) {
588     g_debug->record->AddEntry(new MallocEntry(result.getValue<void*>(), size,
589                                               result.GetStartTimeNS(), result.GetEndTimeNS()));
590   }
591 
592   return result.getValue<void*>();
593 }
594 
InternalFree(void * pointer)595 static TimedResult InternalFree(void* pointer) {
596   if ((g_debug->config().options() & BACKTRACE) && g_debug->pointer->ShouldDumpAndReset()) {
597     debug_dump_heap(android::base::StringPrintf(
598                         "%s.%d.txt", g_debug->config().backtrace_dump_prefix().c_str(), getpid())
599                         .c_str());
600   }
601 
602   void* free_pointer = pointer;
603   size_t bytes;
604   Header* header;
605   if (g_debug->HeaderEnabled()) {
606     header = g_debug->GetHeader(pointer);
607     free_pointer = header->orig_pointer;
608 
609     if (g_debug->config().options() & FRONT_GUARD) {
610       if (!g_debug->front_guard->Valid(header)) {
611         g_debug->front_guard->LogFailure(header);
612       }
613     }
614     if (g_debug->config().options() & REAR_GUARD) {
615       if (!g_debug->rear_guard->Valid(header)) {
616         g_debug->rear_guard->LogFailure(header);
617       }
618     }
619 
620     header->tag = DEBUG_FREE_TAG;
621 
622     bytes = header->usable_size;
623   } else {
624     bytes = g_dispatch->malloc_usable_size(pointer);
625   }
626 
627   if (g_debug->config().options() & FILL_ON_FREE) {
628     size_t fill_bytes = g_debug->config().fill_on_free_bytes();
629     fill_bytes = (bytes < fill_bytes) ? bytes : fill_bytes;
630     memset(pointer, g_debug->config().fill_free_value(), fill_bytes);
631   }
632 
633   if (g_debug->TrackPointers()) {
634     PointerData::Remove(pointer);
635   }
636 
637   TimedResult result;
638   if (g_debug->config().options() & FREE_TRACK) {
639     // Do not add the allocation until we are done modifying the pointer
640     // itself. This avoids a race if a lot of threads are all doing
641     // frees at the same time and we wind up trying to really free this
642     // pointer from another thread, while still trying to free it in
643     // this function.
644     pointer = PointerData::AddFreed(pointer, bytes);
645     if (pointer != nullptr && g_debug->HeaderEnabled()) {
646       pointer = g_debug->GetHeader(pointer)->orig_pointer;
647     }
648     result = TCALLVOID(free, pointer);
649   } else {
650     result = TCALLVOID(free, free_pointer);
651   }
652 
653   return result;
654 }
655 
debug_free(void * pointer)656 void debug_free(void* pointer) {
657   Unreachable::CheckIfRequested(g_debug->config());
658 
659   if (DebugCallsDisabled() || pointer == nullptr) {
660     return g_dispatch->free(pointer);
661   }
662   ScopedConcurrentLock lock;
663   ScopedDisableDebugCalls disable;
664   ScopedBacktraceSignalBlocker blocked;
665 
666   if (!VerifyPointer(pointer, "free")) {
667     return;
668   }
669 
670   TimedResult result = InternalFree(pointer);
671 
672   if (g_debug->config().options() & RECORD_ALLOCS) {
673     g_debug->record->AddEntry(
674         new FreeEntry(pointer, result.GetStartTimeNS(), result.GetEndTimeNS()));
675   }
676 }
677 
debug_memalign(size_t alignment,size_t bytes)678 void* debug_memalign(size_t alignment, size_t bytes) {
679   Unreachable::CheckIfRequested(g_debug->config());
680 
681   if (DebugCallsDisabled()) {
682     return g_dispatch->memalign(alignment, bytes);
683   }
684   ScopedConcurrentLock lock;
685   ScopedDisableDebugCalls disable;
686   ScopedBacktraceSignalBlocker blocked;
687 
688   if (bytes == 0) {
689     bytes = 1;
690   }
691 
692   if (bytes > PointerInfoType::MaxSize()) {
693     errno = ENOMEM;
694     return nullptr;
695   }
696 
697   TimedResult result;
698   void* pointer;
699   if (g_debug->HeaderEnabled()) {
700     // Make the alignment a power of two.
701     if (!powerof2(alignment)) {
702       alignment = BIONIC_ROUND_UP_POWER_OF_2(alignment);
703     }
704     // Force the alignment to at least MINIMUM_ALIGNMENT_BYTES to guarantee
705     // that the header is aligned properly.
706     if (alignment < MINIMUM_ALIGNMENT_BYTES) {
707       alignment = MINIMUM_ALIGNMENT_BYTES;
708     }
709 
710     // We don't have any idea what the natural alignment of
711     // the underlying native allocator is, so we always need to
712     // over allocate.
713     size_t real_size = alignment + bytes + g_debug->extra_bytes();
714     if (real_size < bytes) {
715       // Overflow.
716       errno = ENOMEM;
717       return nullptr;
718     }
719 
720     result = TCALL(malloc, real_size);
721     pointer = result.getValue<void*>();
722     if (pointer == nullptr) {
723       return nullptr;
724     }
725 
726     uintptr_t value = reinterpret_cast<uintptr_t>(pointer) + g_debug->pointer_offset();
727     // Now align the pointer.
728     value += (-value % alignment);
729 
730     Header* header = g_debug->GetHeader(reinterpret_cast<void*>(value));
731     // Don't need to update `result` here because we only need the timestamps.
732     pointer = InitHeader(header, pointer, bytes);
733   } else {
734     size_t real_size = bytes + g_debug->extra_bytes();
735     if (real_size < bytes) {
736       // Overflow.
737       errno = ENOMEM;
738       return nullptr;
739     }
740     result = TCALL(memalign, alignment, real_size);
741     pointer = result.getValue<void*>();
742   }
743 
744   if (pointer != nullptr) {
745     if (g_debug->TrackPointers()) {
746       PointerData::Add(pointer, bytes);
747     }
748 
749     if (g_debug->config().options() & FILL_ON_ALLOC) {
750       size_t bytes = InternalMallocUsableSize(pointer);
751       size_t fill_bytes = g_debug->config().fill_on_alloc_bytes();
752       bytes = (bytes < fill_bytes) ? bytes : fill_bytes;
753       memset(pointer, g_debug->config().fill_alloc_value(), bytes);
754     }
755 
756     if (g_debug->config().options() & RECORD_ALLOCS) {
757       g_debug->record->AddEntry(new MemalignEntry(pointer, bytes, alignment,
758                                                   result.GetStartTimeNS(), result.GetEndTimeNS()));
759     }
760   }
761 
762   return pointer;
763 }
764 
debug_realloc(void * pointer,size_t bytes)765 void* debug_realloc(void* pointer, size_t bytes) {
766   Unreachable::CheckIfRequested(g_debug->config());
767 
768   if (DebugCallsDisabled()) {
769     return g_dispatch->realloc(pointer, bytes);
770   }
771   ScopedConcurrentLock lock;
772   ScopedDisableDebugCalls disable;
773   ScopedBacktraceSignalBlocker blocked;
774 
775   if (pointer == nullptr) {
776     TimedResult result = InternalMalloc(bytes);
777     if (g_debug->config().options() & RECORD_ALLOCS) {
778       g_debug->record->AddEntry(new ReallocEntry(result.getValue<void*>(), bytes, nullptr,
779                                                  result.GetStartTimeNS(), result.GetEndTimeNS()));
780     }
781     pointer = result.getValue<void*>();
782     return pointer;
783   }
784 
785   if (!VerifyPointer(pointer, "realloc")) {
786     return nullptr;
787   }
788 
789   if (bytes == 0) {
790     TimedResult result = InternalFree(pointer);
791 
792     if (g_debug->config().options() & RECORD_ALLOCS) {
793       g_debug->record->AddEntry(new ReallocEntry(nullptr, bytes, pointer, result.GetStartTimeNS(),
794                                                  result.GetEndTimeNS()));
795     }
796 
797     return nullptr;
798   }
799 
800   size_t real_size = bytes;
801   if (g_debug->config().options() & EXPAND_ALLOC) {
802     real_size += g_debug->config().expand_alloc_bytes();
803     if (real_size < bytes) {
804       // Overflow.
805       errno = ENOMEM;
806       return nullptr;
807     }
808   }
809 
810   if (bytes > PointerInfoType::MaxSize()) {
811     errno = ENOMEM;
812     return nullptr;
813   }
814 
815   TimedResult result;
816   void* new_pointer;
817   size_t prev_size;
818   if (g_debug->HeaderEnabled()) {
819     // Same size, do nothing.
820     Header* header = g_debug->GetHeader(pointer);
821     if (real_size == header->size) {
822       if (g_debug->TrackPointers()) {
823         // Remove and re-add so that the backtrace is updated.
824         PointerData::Remove(pointer);
825         PointerData::Add(pointer, real_size);
826       }
827       return pointer;
828     }
829 
830     // Allocation is shrinking.
831     if (real_size < header->usable_size) {
832       header->size = real_size;
833       if (g_debug->config().options() & REAR_GUARD) {
834         // Don't bother allocating a smaller pointer in this case, simply
835         // change the header usable_size and reset the rear guard.
836         header->usable_size = header->size;
837         memset(g_debug->GetRearGuard(header), g_debug->config().rear_guard_value(),
838                g_debug->config().rear_guard_bytes());
839       }
840       if (g_debug->TrackPointers()) {
841         // Remove and re-add so that the backtrace is updated.
842         PointerData::Remove(pointer);
843         PointerData::Add(pointer, real_size);
844       }
845       return pointer;
846     }
847 
848     // Allocate the new size.
849     result = InternalMalloc(bytes);
850     new_pointer = result.getValue<void*>();
851     if (new_pointer == nullptr) {
852       errno = ENOMEM;
853       return nullptr;
854     }
855 
856     prev_size = header->usable_size;
857     memcpy(new_pointer, pointer, prev_size);
858     TimedResult free_time = InternalFree(pointer);
859     // `realloc` is split into two steps, update the end time to the finish time
860     // of the second operation.
861     result.SetEndTimeNS(free_time.GetEndTimeNS());
862   } else {
863     if (g_debug->TrackPointers()) {
864       PointerData::Remove(pointer);
865     }
866 
867     prev_size = g_dispatch->malloc_usable_size(pointer);
868     result = TCALL(realloc, pointer, real_size);
869     new_pointer = result.getValue<void*>();
870     if (new_pointer == nullptr) {
871       return nullptr;
872     }
873 
874     if (g_debug->TrackPointers()) {
875       PointerData::Add(new_pointer, real_size);
876     }
877   }
878 
879   if (g_debug->config().options() & FILL_ON_ALLOC) {
880     size_t bytes = InternalMallocUsableSize(new_pointer);
881     if (bytes > g_debug->config().fill_on_alloc_bytes()) {
882       bytes = g_debug->config().fill_on_alloc_bytes();
883     }
884     if (bytes > prev_size) {
885       memset(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(new_pointer) + prev_size),
886              g_debug->config().fill_alloc_value(), bytes - prev_size);
887     }
888   }
889 
890   if (g_debug->config().options() & RECORD_ALLOCS) {
891     g_debug->record->AddEntry(new ReallocEntry(new_pointer, bytes, pointer, result.GetStartTimeNS(),
892                                                result.GetEndTimeNS()));
893   }
894 
895   return new_pointer;
896 }
897 
debug_calloc(size_t nmemb,size_t bytes)898 void* debug_calloc(size_t nmemb, size_t bytes) {
899   Unreachable::CheckIfRequested(g_debug->config());
900 
901   if (DebugCallsDisabled()) {
902     return g_dispatch->calloc(nmemb, bytes);
903   }
904   ScopedConcurrentLock lock;
905   ScopedDisableDebugCalls disable;
906   ScopedBacktraceSignalBlocker blocked;
907 
908   size_t size;
909   if (__builtin_mul_overflow(nmemb, bytes, &size)) {
910     // Overflow
911     errno = ENOMEM;
912     return nullptr;
913   }
914 
915   if (size == 0) {
916     size = 1;
917   }
918 
919   size_t real_size;
920   if (__builtin_add_overflow(size, g_debug->extra_bytes(), &real_size)) {
921     // Overflow.
922     errno = ENOMEM;
923     return nullptr;
924   }
925 
926   if (real_size > PointerInfoType::MaxSize()) {
927     errno = ENOMEM;
928     return nullptr;
929   }
930 
931   void* pointer;
932   TimedResult result;
933   if (g_debug->HeaderEnabled()) {
934     // Need to guarantee the alignment of the header.
935     result = TCALL(memalign, MINIMUM_ALIGNMENT_BYTES, real_size);
936     Header* header = reinterpret_cast<Header*>(result.getValue<void*>());
937     if (header == nullptr) {
938       return nullptr;
939     }
940     memset(header, 0, g_dispatch->malloc_usable_size(header));
941     pointer = InitHeader(header, header, size);
942   } else {
943     result = TCALL(calloc, 1, real_size);
944     pointer = result.getValue<void*>();
945   }
946 
947   if (g_debug->config().options() & RECORD_ALLOCS) {
948     g_debug->record->AddEntry(
949         new CallocEntry(pointer, nmemb, bytes, result.GetStartTimeNS(), result.GetEndTimeNS()));
950   }
951 
952   if (pointer != nullptr && g_debug->TrackPointers()) {
953     PointerData::Add(pointer, size);
954   }
955   return pointer;
956 }
957 
debug_mallinfo()958 struct mallinfo debug_mallinfo() {
959   return g_dispatch->mallinfo();
960 }
961 
debug_mallopt(int param,int value)962 int debug_mallopt(int param, int value) {
963   return g_dispatch->mallopt(param, value);
964 }
965 
debug_malloc_info(int options,FILE * fp)966 int debug_malloc_info(int options, FILE* fp) {
967   if (DebugCallsDisabled() || !g_debug->TrackPointers()) {
968     return g_dispatch->malloc_info(options, fp);
969   }
970 
971   // Make sure any pending output is written to the file.
972   fflush(fp);
973 
974   ScopedConcurrentLock lock;
975   ScopedDisableDebugCalls disable;
976   ScopedBacktraceSignalBlocker blocked;
977 
978   // Avoid any issues where allocations are made that will be freed
979   // in the fclose.
980   int fd = fileno(fp);
981   MallocXmlElem root(fd, "malloc", "version=\"debug-malloc-1\"");
982   std::vector<ListInfoType> list;
983   PointerData::GetAllocList(&list);
984 
985   size_t alloc_num = 0;
986   for (size_t i = 0; i < list.size(); i++) {
987     MallocXmlElem alloc(fd, "allocation", "nr=\"%zu\"", alloc_num);
988 
989     size_t total = 1;
990     size_t size = list[i].size;
991     while (i < list.size() - 1 && list[i + 1].size == size) {
992       i++;
993       total++;
994     }
995     MallocXmlElem(fd, "size").Contents("%zu", list[i].size);
996     MallocXmlElem(fd, "total").Contents("%zu", total);
997     alloc_num++;
998   }
999   return 0;
1000 }
1001 
debug_aligned_alloc(size_t alignment,size_t size)1002 void* debug_aligned_alloc(size_t alignment, size_t size) {
1003   Unreachable::CheckIfRequested(g_debug->config());
1004 
1005   if (DebugCallsDisabled()) {
1006     return g_dispatch->aligned_alloc(alignment, size);
1007   }
1008   if (!powerof2(alignment) || (size % alignment) != 0) {
1009     errno = EINVAL;
1010     return nullptr;
1011   }
1012   return debug_memalign(alignment, size);
1013 }
1014 
debug_posix_memalign(void ** memptr,size_t alignment,size_t size)1015 int debug_posix_memalign(void** memptr, size_t alignment, size_t size) {
1016   Unreachable::CheckIfRequested(g_debug->config());
1017 
1018   if (DebugCallsDisabled()) {
1019     return g_dispatch->posix_memalign(memptr, alignment, size);
1020   }
1021 
1022   if (alignment < sizeof(void*) || !powerof2(alignment)) {
1023     return EINVAL;
1024   }
1025   int saved_errno = errno;
1026   *memptr = debug_memalign(alignment, size);
1027   errno = saved_errno;
1028   return (*memptr != nullptr) ? 0 : ENOMEM;
1029 }
1030 
debug_malloc_iterate(uintptr_t base,size_t size,void (* callback)(uintptr_t,size_t,void *),void * arg)1031 int debug_malloc_iterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*),
1032                   void* arg) {
1033   ScopedConcurrentLock lock;
1034   if (g_debug->TrackPointers()) {
1035     PointerData::IteratePointers([&callback, &arg](uintptr_t pointer) {
1036       callback(pointer, InternalMallocUsableSize(reinterpret_cast<void*>(pointer)), arg);
1037     });
1038     return 0;
1039   }
1040 
1041   // An option that adds a header will add pointer tracking, so no need to
1042   // check if headers are enabled.
1043   return g_dispatch->malloc_iterate(base, size, callback, arg);
1044 }
1045 
debug_malloc_disable()1046 void debug_malloc_disable() {
1047   ScopedConcurrentLock lock;
1048   g_dispatch->malloc_disable();
1049   if (g_debug->pointer) {
1050     g_debug->pointer->PrepareFork();
1051   }
1052 }
1053 
debug_malloc_enable()1054 void debug_malloc_enable() {
1055   ScopedConcurrentLock lock;
1056   if (g_debug->pointer) {
1057     g_debug->pointer->PostForkParent();
1058   }
1059   g_dispatch->malloc_enable();
1060 }
1061 
debug_malloc_backtrace(void * pointer,uintptr_t * frames,size_t max_frames)1062 ssize_t debug_malloc_backtrace(void* pointer, uintptr_t* frames, size_t max_frames) {
1063   if (DebugCallsDisabled() || pointer == nullptr) {
1064     return 0;
1065   }
1066   ScopedConcurrentLock lock;
1067   ScopedDisableDebugCalls disable;
1068   ScopedBacktraceSignalBlocker blocked;
1069 
1070   if (!(g_debug->config().options() & BACKTRACE)) {
1071     return 0;
1072   }
1073   pointer = UntagPointer(pointer);
1074   return PointerData::GetFrames(pointer, frames, max_frames);
1075 }
1076 
1077 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
debug_pvalloc(size_t bytes)1078 void* debug_pvalloc(size_t bytes) {
1079   Unreachable::CheckIfRequested(g_debug->config());
1080 
1081   if (DebugCallsDisabled()) {
1082     return g_dispatch->pvalloc(bytes);
1083   }
1084 
1085   size_t pagesize = getpagesize();
1086   size_t size = __BIONIC_ALIGN(bytes, pagesize);
1087   if (size < bytes) {
1088     // Overflow
1089     errno = ENOMEM;
1090     return nullptr;
1091   }
1092   return debug_memalign(pagesize, size);
1093 }
1094 
debug_valloc(size_t size)1095 void* debug_valloc(size_t size) {
1096   Unreachable::CheckIfRequested(g_debug->config());
1097 
1098   if (DebugCallsDisabled()) {
1099     return g_dispatch->valloc(size);
1100   }
1101   return debug_memalign(getpagesize(), size);
1102 }
1103 #endif
1104 
1105 static std::mutex g_dump_lock;
1106 
write_dump(int fd)1107 static void write_dump(int fd) {
1108   dprintf(fd, "Android Native Heap Dump v1.2\n\n");
1109 
1110   std::string fingerprint = android::base::GetProperty("ro.build.fingerprint", "unknown");
1111   dprintf(fd, "Build fingerprint: '%s'\n\n", fingerprint.c_str());
1112 
1113   PointerData::DumpLiveToFile(fd);
1114 
1115   dprintf(fd, "MAPS\n");
1116   std::string content;
1117   if (!android::base::ReadFileToString("/proc/self/maps", &content)) {
1118     dprintf(fd, "Could not open /proc/self/maps\n");
1119   } else {
1120     dprintf(fd, "%s", content.c_str());
1121   }
1122   dprintf(fd, "END\n");
1123 
1124   // Purge the memory that was allocated and freed during this operation
1125   // since it can be large enough to expand the RSS significantly.
1126   g_dispatch->mallopt(M_PURGE_ALL, 0);
1127 }
1128 
debug_write_malloc_leak_info(FILE * fp)1129 bool debug_write_malloc_leak_info(FILE* fp) {
1130   // Make sure any pending output is written to the file.
1131   fflush(fp);
1132 
1133   ScopedConcurrentLock lock;
1134   ScopedDisableDebugCalls disable;
1135   ScopedBacktraceSignalBlocker blocked;
1136 
1137   std::lock_guard<std::mutex> guard(g_dump_lock);
1138 
1139   if (!(g_debug->config().options() & BACKTRACE)) {
1140     return false;
1141   }
1142 
1143   write_dump(fileno(fp));
1144 
1145   return true;
1146 }
1147 
debug_dump_heap(const char * file_name)1148 void debug_dump_heap(const char* file_name) {
1149   ScopedConcurrentLock lock;
1150   ScopedDisableDebugCalls disable;
1151   ScopedBacktraceSignalBlocker blocked;
1152 
1153   std::lock_guard<std::mutex> guard(g_dump_lock);
1154 
1155   int fd = open(file_name, O_RDWR | O_CREAT | O_NOFOLLOW | O_TRUNC | O_CLOEXEC, 0644);
1156   if (fd == -1) {
1157     error_log("Unable to create file: %s", file_name);
1158     return;
1159   }
1160 
1161   error_log("Dumping to file: %s\n", file_name);
1162   write_dump(fd);
1163   close(fd);
1164 }
1165