• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <errno.h>
30 #include <inttypes.h>
31 #include <malloc.h>
32 #include <pthread.h>
33 #include <signal.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <sys/cdefs.h>
38 #include <sys/param.h>
39 #include <sys/syscall.h>
40 #include <unistd.h>
41 
42 #include <mutex>
43 #include <vector>
44 
45 #include <android-base/file.h>
46 #include <android-base/properties.h>
47 #include <android-base/stringprintf.h>
48 #include <bionic/malloc_tagged_pointers.h>
49 #include <platform/bionic/reserved_signals.h>
50 #include <private/MallocXmlElem.h>
51 #include <private/bionic_malloc_dispatch.h>
52 #include <unwindstack/Unwinder.h>
53 
54 #include "Config.h"
55 #include "DebugData.h"
56 #include "LogAllocatorStats.h"
57 #include "Nanotime.h"
58 #include "Unreachable.h"
59 #include "UnwindBacktrace.h"
60 #include "backtrace.h"
61 #include "debug_disable.h"
62 #include "debug_log.h"
63 #include "malloc_debug.h"
64 
65 // ------------------------------------------------------------------------
66 // Global Data
67 // ------------------------------------------------------------------------
68 DebugData* g_debug;
69 
70 bool* g_zygote_child;
71 
72 const MallocDispatch* g_dispatch;
73 
74 namespace {
75 // A TimedResult contains the result of from malloc end_ns al. functions and the
76 // start/end timestamps.
77 struct TimedResult {
78   uint64_t start_ns = 0;
79   uint64_t end_ns = 0;
80   union {
81     size_t s;
82     int i;
83     void* p;
84   } v;
85 
GetStartTimeNS__anone38c1afd0111::TimedResult86   uint64_t GetStartTimeNS() const { return start_ns; }
GetEndTimeNS__anone38c1afd0111::TimedResult87   uint64_t GetEndTimeNS() const { return end_ns; }
SetStartTimeNS__anone38c1afd0111::TimedResult88   void SetStartTimeNS(uint64_t t) { start_ns = t; }
SetEndTimeNS__anone38c1afd0111::TimedResult89   void SetEndTimeNS(uint64_t t) { end_ns = t; }
90 
91   template <typename T>
92   void setValue(T);
93   template <>
setValue__anone38c1afd0111::TimedResult94   void setValue(size_t s) {
95     v.s = s;
96   }
97   template <>
setValue__anone38c1afd0111::TimedResult98   void setValue(int i) {
99     v.i = i;
100   }
101   template <>
setValue__anone38c1afd0111::TimedResult102   void setValue(void* p) {
103     v.p = p;
104   }
105 
106   template <typename T>
107   T getValue() const;
108   template <>
getValue__anone38c1afd0111::TimedResult109   size_t getValue<size_t>() const {
110     return v.s;
111   }
112   template <>
getValue__anone38c1afd0111::TimedResult113   int getValue<int>() const {
114     return v.i;
115   }
116   template <>
getValue__anone38c1afd0111::TimedResult117   void* getValue<void*>() const {
118     return v.p;
119   }
120 };
121 
122 class ScopedTimer {
123  public:
ScopedTimer(TimedResult & res)124   ScopedTimer(TimedResult& res) : res_(res) { res_.start_ns = Nanotime(); }
125 
~ScopedTimer()126   ~ScopedTimer() { res_.end_ns = Nanotime(); }
127 
128  private:
129   TimedResult& res_;
130 };
131 
132 }  // namespace
133 
134 template <typename MallocFn, typename... Args>
TimerCall(MallocFn fn,Args...args)135 static TimedResult TimerCall(MallocFn fn, Args... args) {
136   TimedResult ret;
137   decltype((g_dispatch->*fn)(args...)) r;
138   if (g_debug->config().options() & RECORD_ALLOCS) {
139     ScopedTimer t(ret);
140     r = (g_dispatch->*fn)(args...);
141   } else {
142     r = (g_dispatch->*fn)(args...);
143   }
144   ret.setValue<decltype(r)>(r);
145   return ret;
146 }
147 
148 template <typename MallocFn, typename... Args>
TimerCallVoid(MallocFn fn,Args...args)149 static TimedResult TimerCallVoid(MallocFn fn, Args... args) {
150   TimedResult ret;
151   {
152     ScopedTimer t(ret);
153     (g_dispatch->*fn)(args...);
154   }
155   return ret;
156 }
157 
158 #define TCALL(FUNC, ...) TimerCall(&MallocDispatch::FUNC, __VA_ARGS__);
159 #define TCALLVOID(FUNC, ...) TimerCallVoid(&MallocDispatch::FUNC, __VA_ARGS__);
160 
161 // ------------------------------------------------------------------------
162 
163 // ------------------------------------------------------------------------
164 // Use C style prototypes for all exported functions. This makes it easy
165 // to do dlsym lookups during libc initialization when malloc debug
166 // is enabled.
167 // ------------------------------------------------------------------------
168 __BEGIN_DECLS
169 
170 bool debug_initialize(const MallocDispatch* malloc_dispatch, bool* malloc_zygote_child,
171                       const char* options);
172 void debug_finalize();
173 void debug_dump_heap(const char* file_name);
174 void debug_get_malloc_leak_info(uint8_t** info, size_t* overall_size, size_t* info_size,
175                                 size_t* total_memory, size_t* backtrace_size);
176 bool debug_write_malloc_leak_info(FILE* fp);
177 ssize_t debug_malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count);
178 void debug_free_malloc_leak_info(uint8_t* info);
179 size_t debug_malloc_usable_size(void* pointer);
180 void* debug_malloc(size_t size);
181 void debug_free(void* pointer);
182 void* debug_aligned_alloc(size_t alignment, size_t size);
183 void* debug_memalign(size_t alignment, size_t bytes);
184 void* debug_realloc(void* pointer, size_t bytes);
185 void* debug_calloc(size_t nmemb, size_t bytes);
186 struct mallinfo debug_mallinfo();
187 int debug_mallopt(int param, int value);
188 int debug_malloc_info(int options, FILE* fp);
189 int debug_posix_memalign(void** memptr, size_t alignment, size_t size);
190 int debug_malloc_iterate(uintptr_t base, size_t size,
191                          void (*callback)(uintptr_t base, size_t size, void* arg), void* arg);
192 void debug_malloc_disable();
193 void debug_malloc_enable();
194 
195 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
196 void* debug_pvalloc(size_t bytes);
197 void* debug_valloc(size_t size);
198 #endif
199 
200 __END_DECLS
201 // ------------------------------------------------------------------------
202 
203 class ScopedConcurrentLock {
204  public:
ScopedConcurrentLock()205   ScopedConcurrentLock() {
206     pthread_rwlock_rdlock(&lock_);
207   }
~ScopedConcurrentLock()208   ~ScopedConcurrentLock() {
209     pthread_rwlock_unlock(&lock_);
210   }
211 
Init()212   static void Init() {
213     pthread_rwlockattr_t attr;
214     // Set the attribute so that when a write lock is pending, read locks are no
215     // longer granted.
216     pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
217     pthread_rwlock_init(&lock_, &attr);
218   }
219 
BlockAllOperations()220   static void BlockAllOperations() {
221     pthread_rwlock_wrlock(&lock_);
222   }
223 
224  private:
225   static pthread_rwlock_t lock_;
226 };
227 pthread_rwlock_t ScopedConcurrentLock::lock_;
228 
229 // Use this because the sigprocmask* functions filter out the reserved bionic
230 // signals including the signal this code blocks.
__rt_sigprocmask(int how,const sigset64_t * new_set,sigset64_t * old_set,size_t sigset_size)231 static inline int __rt_sigprocmask(int how, const sigset64_t* new_set, sigset64_t* old_set,
232                                    size_t sigset_size) {
233   return syscall(SYS_rt_sigprocmask, how, new_set, old_set, sigset_size);
234 }
235 
236 // Need to block the backtrace signal while in malloc debug routines
237 // otherwise there is a chance of a deadlock and timeout when unwinding.
238 // This can occur if a thread is paused while owning a malloc debug
239 // internal lock.
240 class ScopedBacktraceSignalBlocker {
241  public:
ScopedBacktraceSignalBlocker()242   ScopedBacktraceSignalBlocker() {
243     sigemptyset64(&backtrace_set_);
244     sigaddset64(&backtrace_set_, BIONIC_SIGNAL_BACKTRACE);
245     sigset64_t old_set;
246     __rt_sigprocmask(SIG_BLOCK, &backtrace_set_, &old_set, sizeof(backtrace_set_));
247     if (sigismember64(&old_set, BIONIC_SIGNAL_BACKTRACE)) {
248       unblock_ = false;
249     }
250   }
251 
~ScopedBacktraceSignalBlocker()252   ~ScopedBacktraceSignalBlocker() {
253     if (unblock_) {
254       __rt_sigprocmask(SIG_UNBLOCK, &backtrace_set_, nullptr, sizeof(backtrace_set_));
255     }
256   }
257 
258  private:
259   bool unblock_ = true;
260   sigset64_t backtrace_set_;
261 };
262 
InitAtfork()263 static void InitAtfork() {
264   static pthread_once_t atfork_init = PTHREAD_ONCE_INIT;
265   pthread_once(&atfork_init, []() {
266     pthread_atfork(
267         []() {
268           if (g_debug != nullptr) {
269             g_debug->PrepareFork();
270           }
271         },
272         []() {
273           if (g_debug != nullptr) {
274             g_debug->PostForkParent();
275           }
276         },
277         []() {
278           if (g_debug != nullptr) {
279             g_debug->PostForkChild();
280           }
281         });
282   });
283 }
284 
BacktraceAndLog()285 void BacktraceAndLog() {
286   if (g_debug->config().options() & BACKTRACE_FULL) {
287     std::vector<uintptr_t> frames;
288     std::vector<unwindstack::FrameData> frames_info;
289     if (!Unwind(&frames, &frames_info, 256)) {
290       error_log("  Backtrace failed to get any frames.");
291     } else {
292       UnwindLog(frames_info);
293     }
294   } else {
295     std::vector<uintptr_t> frames(256);
296     size_t num_frames = backtrace_get(frames.data(), frames.size());
297     if (num_frames == 0) {
298       error_log("  Backtrace failed to get any frames.");
299     } else {
300       backtrace_log(frames.data(), num_frames);
301     }
302   }
303 }
304 
LogError(const void * pointer,const char * error_str)305 static void LogError(const void* pointer, const char* error_str) {
306   error_log(LOG_DIVIDER);
307   error_log("+++ ALLOCATION %p %s", pointer, error_str);
308 
309   // If we are tracking already freed pointers, check to see if this is
310   // one so we can print extra information.
311   if (g_debug->config().options() & FREE_TRACK) {
312     PointerData::LogFreeBacktrace(pointer);
313   }
314 
315   error_log("Backtrace at time of failure:");
316   BacktraceAndLog();
317   error_log(LOG_DIVIDER);
318   if (g_debug->config().options() & ABORT_ON_ERROR) {
319     abort();
320   }
321 }
322 
VerifyPointer(const void * pointer,const char * function_name)323 static bool VerifyPointer(const void* pointer, const char* function_name) {
324   if (g_debug->HeaderEnabled()) {
325     Header* header = g_debug->GetHeader(pointer);
326     if (header->tag != DEBUG_TAG) {
327       std::string error_str;
328       if (header->tag == DEBUG_FREE_TAG) {
329         error_str = std::string("USED AFTER FREE (") + function_name + ")";
330       } else {
331         error_str = android::base::StringPrintf("HAS INVALID TAG %" PRIx32 " (%s)", header->tag,
332                                                 function_name);
333       }
334       LogError(pointer, error_str.c_str());
335       return false;
336     }
337   }
338 
339   if (g_debug->TrackPointers()) {
340     if (!PointerData::Exists(pointer)) {
341       std::string error_str(std::string("UNKNOWN POINTER (") + function_name + ")");
342       LogError(pointer, error_str.c_str());
343       return false;
344     }
345   }
346   return true;
347 }
348 
InternalMallocUsableSize(void * pointer)349 static size_t InternalMallocUsableSize(void* pointer) {
350   if (g_debug->HeaderEnabled()) {
351     return g_debug->GetHeader(pointer)->usable_size;
352   } else {
353     return g_dispatch->malloc_usable_size(pointer);
354   }
355 }
356 
InitHeader(Header * header,void * orig_pointer,size_t size)357 static void* InitHeader(Header* header, void* orig_pointer, size_t size) {
358   header->tag = DEBUG_TAG;
359   header->orig_pointer = orig_pointer;
360   header->size = size;
361   header->usable_size = g_dispatch->malloc_usable_size(orig_pointer);
362   if (header->usable_size == 0) {
363     g_dispatch->free(orig_pointer);
364     return nullptr;
365   }
366   header->usable_size -= g_debug->pointer_offset() + reinterpret_cast<uintptr_t>(header) -
367                          reinterpret_cast<uintptr_t>(orig_pointer);
368 
369   if (g_debug->config().options() & FRONT_GUARD) {
370     uint8_t* guard = g_debug->GetFrontGuard(header);
371     memset(guard, g_debug->config().front_guard_value(), g_debug->config().front_guard_bytes());
372   }
373 
374   if (g_debug->config().options() & REAR_GUARD) {
375     uint8_t* guard = g_debug->GetRearGuard(header);
376     memset(guard, g_debug->config().rear_guard_value(), g_debug->config().rear_guard_bytes());
377     // If the rear guard is enabled, set the usable size to the exact size
378     // of the allocation.
379     header->usable_size = header->size;
380   }
381 
382   return g_debug->GetPointer(header);
383 }
384 
385 extern "C" void __asan_init() __attribute__((weak));
386 
debug_initialize(const MallocDispatch * malloc_dispatch,bool * zygote_child,const char * options)387 bool debug_initialize(const MallocDispatch* malloc_dispatch, bool* zygote_child,
388                       const char* options) {
389   if (zygote_child == nullptr || options == nullptr) {
390     return false;
391   }
392 
393   if (__asan_init != 0) {
394     error_log("malloc debug cannot be enabled alongside ASAN");
395     return false;
396   }
397 
398   InitAtfork();
399 
400   g_zygote_child = zygote_child;
401 
402   g_dispatch = malloc_dispatch;
403 
404   if (!DebugDisableInitialize()) {
405     return false;
406   }
407 
408   DebugData* debug = new DebugData();
409   if (!debug->Initialize(options) || !Unreachable::Initialize(debug->config())) {
410     delete debug;
411     DebugDisableFinalize();
412     return false;
413   }
414   g_debug = debug;
415 
416   // Always enable the backtrace code since we will use it in a number
417   // of different error cases.
418   backtrace_startup();
419 
420   if (g_debug->config().options() & VERBOSE) {
421     info_log("%s: malloc debug enabled", getprogname());
422   }
423 
424   ScopedConcurrentLock::Init();
425 
426   return true;
427 }
428 
debug_finalize()429 void debug_finalize() {
430   if (g_debug == nullptr) {
431     return;
432   }
433 
434   // Make sure that there are no other threads doing debug allocations
435   // before we kill everything.
436   ScopedConcurrentLock::BlockAllOperations();
437 
438   // Turn off capturing allocations calls.
439   DebugDisableSet(true);
440 
441   if (g_debug->config().options() & FREE_TRACK) {
442     PointerData::VerifyAllFreed();
443   }
444 
445   if (g_debug->config().options() & LEAK_TRACK) {
446     PointerData::LogLeaks();
447   }
448 
449   if ((g_debug->config().options() & RECORD_ALLOCS) && g_debug->config().record_allocs_on_exit()) {
450     RecordData::WriteEntriesOnExit();
451   }
452 
453   if ((g_debug->config().options() & BACKTRACE) && g_debug->config().backtrace_dump_on_exit()) {
454     debug_dump_heap(android::base::StringPrintf("%s.%d.exit.txt",
455                                                 g_debug->config().backtrace_dump_prefix().c_str(),
456                                                 getpid()).c_str());
457   }
458 
459   if (g_debug->config().options() & LOG_ALLOCATOR_STATS_ON_EXIT) {
460     LogAllocatorStats::Log();
461   }
462 
463   backtrace_shutdown();
464 
465   // In order to prevent any issues of threads freeing previous pointers
466   // after the main thread calls this code, simply leak the g_debug pointer
467   // and do not destroy the debug disable pthread key.
468 }
469 
debug_get_malloc_leak_info(uint8_t ** info,size_t * overall_size,size_t * info_size,size_t * total_memory,size_t * backtrace_size)470 void debug_get_malloc_leak_info(uint8_t** info, size_t* overall_size, size_t* info_size,
471                                 size_t* total_memory, size_t* backtrace_size) {
472   ScopedConcurrentLock lock;
473   ScopedDisableDebugCalls disable;
474   ScopedBacktraceSignalBlocker blocked;
475 
476   // Verify the arguments.
477   if (info == nullptr || overall_size == nullptr || info_size == nullptr || total_memory == nullptr ||
478       backtrace_size == nullptr) {
479     error_log("get_malloc_leak_info: At least one invalid parameter.");
480     return;
481   }
482 
483   *info = nullptr;
484   *overall_size = 0;
485   *info_size = 0;
486   *total_memory = 0;
487   *backtrace_size = 0;
488 
489   if (!(g_debug->config().options() & BACKTRACE)) {
490     error_log(
491         "get_malloc_leak_info: Allocations not being tracked, to enable "
492         "set the option 'backtrace'.");
493     return;
494   }
495 
496   PointerData::GetInfo(info, overall_size, info_size, total_memory, backtrace_size);
497 }
498 
debug_free_malloc_leak_info(uint8_t * info)499 void debug_free_malloc_leak_info(uint8_t* info) {
500   g_dispatch->free(info);
501   // Purge the memory that was freed since a significant amount of
502   // memory could have been allocated and freed.
503   g_dispatch->mallopt(M_PURGE_ALL, 0);
504 }
505 
debug_malloc_usable_size(void * pointer)506 size_t debug_malloc_usable_size(void* pointer) {
507   Unreachable::CheckIfRequested(g_debug->config());
508 
509   if (DebugCallsDisabled() || pointer == nullptr) {
510     return g_dispatch->malloc_usable_size(pointer);
511   }
512   ScopedConcurrentLock lock;
513   ScopedDisableDebugCalls disable;
514   ScopedBacktraceSignalBlocker blocked;
515 
516   if (!VerifyPointer(pointer, "malloc_usable_size")) {
517     return 0;
518   }
519 
520   return InternalMallocUsableSize(pointer);
521 }
522 
InternalMalloc(size_t size)523 static TimedResult InternalMalloc(size_t size) {
524   uint64_t options = g_debug->config().options();
525   if ((options & BACKTRACE) && g_debug->pointer->ShouldDumpAndReset()) {
526     debug_dump_heap(android::base::StringPrintf(
527                         "%s.%d.txt", g_debug->config().backtrace_dump_prefix().c_str(), getpid())
528                         .c_str());
529   }
530   if (options & LOG_ALLOCATOR_STATS_ON_SIGNAL) {
531     LogAllocatorStats::CheckIfShouldLog();
532   }
533 
534   if (size == 0) {
535     size = 1;
536   }
537 
538   TimedResult result;
539 
540   size_t real_size = size + g_debug->extra_bytes();
541   if (real_size < size) {
542     // Overflow.
543     errno = ENOMEM;
544     result.setValue<void*>(nullptr);
545     return result;
546   }
547 
548   if (size > PointerInfoType::MaxSize()) {
549     errno = ENOMEM;
550     result.setValue<void*>(nullptr);
551     return result;
552   }
553 
554   if (g_debug->HeaderEnabled()) {
555     result = TCALL(memalign, MINIMUM_ALIGNMENT_BYTES, real_size);
556     Header* header = reinterpret_cast<Header*>(result.getValue<void*>());
557     if (header == nullptr) {
558       return result;
559     }
560     result.setValue<void*>(InitHeader(header, header, size));
561   } else {
562     result = TCALL(malloc, real_size);
563   }
564 
565   void* pointer = result.getValue<void*>();
566 
567   if (pointer != nullptr) {
568     if (g_debug->TrackPointers()) {
569       PointerData::Add(pointer, size);
570     }
571 
572     if (g_debug->config().options() & FILL_ON_ALLOC) {
573       size_t bytes = InternalMallocUsableSize(pointer);
574       size_t fill_bytes = g_debug->config().fill_on_alloc_bytes();
575       bytes = (bytes < fill_bytes) ? bytes : fill_bytes;
576       memset(pointer, g_debug->config().fill_alloc_value(), bytes);
577     }
578   }
579 
580   return result;
581 }
582 
debug_malloc(size_t size)583 void* debug_malloc(size_t size) {
584   Unreachable::CheckIfRequested(g_debug->config());
585 
586   if (DebugCallsDisabled()) {
587     return g_dispatch->malloc(size);
588   }
589   ScopedConcurrentLock lock;
590   ScopedDisableDebugCalls disable;
591   ScopedBacktraceSignalBlocker blocked;
592 
593   memory_trace::Entry* entry = nullptr;
594   if (g_debug->config().options() & RECORD_ALLOCS) {
595     // In order to preserve the order of operations, reserve the entry before
596     // performing the operation.
597     entry = g_debug->record->ReserveEntry();
598   }
599 
600   TimedResult result = InternalMalloc(size);
601 
602   if (entry != nullptr) {
603     *entry = memory_trace::Entry{.tid = gettid(),
604                                  .type = memory_trace::MALLOC,
605                                  .ptr = reinterpret_cast<uint64_t>(result.getValue<void*>()),
606                                  .size = size,
607                                  .start_ns = result.GetStartTimeNS(),
608                                  .end_ns = result.GetEndTimeNS()};
609   }
610 
611   return result.getValue<void*>();
612 }
613 
InternalFree(void * pointer)614 static TimedResult InternalFree(void* pointer) {
615   uint64_t options = g_debug->config().options();
616   if ((options & BACKTRACE) && g_debug->pointer->ShouldDumpAndReset()) {
617     debug_dump_heap(android::base::StringPrintf(
618                         "%s.%d.txt", g_debug->config().backtrace_dump_prefix().c_str(), getpid())
619                         .c_str());
620   }
621   if (options & LOG_ALLOCATOR_STATS_ON_SIGNAL) {
622     LogAllocatorStats::CheckIfShouldLog();
623   }
624 
625   void* free_pointer = pointer;
626   size_t bytes;
627   Header* header;
628   if (g_debug->HeaderEnabled()) {
629     header = g_debug->GetHeader(pointer);
630     free_pointer = header->orig_pointer;
631 
632     if (g_debug->config().options() & FRONT_GUARD) {
633       if (!g_debug->front_guard->Valid(header)) {
634         g_debug->front_guard->LogFailure(header);
635       }
636     }
637     if (g_debug->config().options() & REAR_GUARD) {
638       if (!g_debug->rear_guard->Valid(header)) {
639         g_debug->rear_guard->LogFailure(header);
640       }
641     }
642 
643     header->tag = DEBUG_FREE_TAG;
644 
645     bytes = header->usable_size;
646   } else {
647     bytes = g_dispatch->malloc_usable_size(pointer);
648   }
649 
650   if (g_debug->config().options() & FILL_ON_FREE) {
651     size_t fill_bytes = g_debug->config().fill_on_free_bytes();
652     fill_bytes = (bytes < fill_bytes) ? bytes : fill_bytes;
653     memset(pointer, g_debug->config().fill_free_value(), fill_bytes);
654   }
655 
656   if (g_debug->TrackPointers()) {
657     PointerData::Remove(pointer);
658   }
659 
660   TimedResult result;
661   if (g_debug->config().options() & FREE_TRACK) {
662     // Do not add the allocation until we are done modifying the pointer
663     // itself. This avoids a race if a lot of threads are all doing
664     // frees at the same time and we wind up trying to really free this
665     // pointer from another thread, while still trying to free it in
666     // this function.
667     pointer = PointerData::AddFreed(pointer, bytes);
668     if (pointer != nullptr && g_debug->HeaderEnabled()) {
669       pointer = g_debug->GetHeader(pointer)->orig_pointer;
670     }
671     result = TCALLVOID(free, pointer);
672   } else {
673     result = TCALLVOID(free, free_pointer);
674   }
675 
676   return result;
677 }
678 
debug_free(void * pointer)679 void debug_free(void* pointer) {
680   Unreachable::CheckIfRequested(g_debug->config());
681 
682   if (DebugCallsDisabled() || pointer == nullptr) {
683     return g_dispatch->free(pointer);
684   }
685 
686   size_t size;
687   if (g_debug->config().options() & RECORD_ALLOCS) {
688     // Need to get the size before disabling debug calls.
689     size = debug_malloc_usable_size(pointer);
690   }
691 
692   ScopedConcurrentLock lock;
693   ScopedDisableDebugCalls disable;
694   ScopedBacktraceSignalBlocker blocked;
695 
696   if (!VerifyPointer(pointer, "free")) {
697     return;
698   }
699 
700   int64_t present_bytes = -1;
701   memory_trace::Entry* entry = nullptr;
702   if (g_debug->config().options() & RECORD_ALLOCS) {
703     // In order to preserve the order of operations, reserve the entry before
704     // performing the operation.
705     entry = g_debug->record->ReserveEntry();
706 
707     // Need to get the present bytes before the pointer is freed in case the
708     // memory is released during the free call.
709     present_bytes = g_debug->record->GetPresentBytes(pointer, size);
710   }
711 
712   TimedResult result = InternalFree(pointer);
713 
714   if (entry != nullptr) {
715     *entry = memory_trace::Entry{.tid = gettid(),
716                                  .type = memory_trace::FREE,
717                                  .ptr = reinterpret_cast<uint64_t>(pointer),
718                                  .present_bytes = present_bytes,
719                                  .start_ns = result.GetStartTimeNS(),
720                                  .end_ns = result.GetEndTimeNS()};
721   }
722 }
723 
debug_memalign(size_t alignment,size_t bytes)724 void* debug_memalign(size_t alignment, size_t bytes) {
725   Unreachable::CheckIfRequested(g_debug->config());
726 
727   if (DebugCallsDisabled()) {
728     return g_dispatch->memalign(alignment, bytes);
729   }
730   ScopedConcurrentLock lock;
731   ScopedDisableDebugCalls disable;
732   ScopedBacktraceSignalBlocker blocked;
733 
734   if (bytes == 0) {
735     bytes = 1;
736   }
737 
738   if (bytes > PointerInfoType::MaxSize()) {
739     errno = ENOMEM;
740     return nullptr;
741   }
742 
743   memory_trace::Entry* entry = nullptr;
744   if (g_debug->config().options() & RECORD_ALLOCS) {
745     // In order to preserve the order of operations, reserve the entry before
746     // performing the operation.
747     entry = g_debug->record->ReserveEntry();
748   }
749 
750   TimedResult result;
751   void* pointer;
752   if (g_debug->HeaderEnabled()) {
753     // Make the alignment a power of two.
754     if (!powerof2(alignment)) {
755       alignment = BIONIC_ROUND_UP_POWER_OF_2(alignment);
756     }
757     // Force the alignment to at least MINIMUM_ALIGNMENT_BYTES to guarantee
758     // that the header is aligned properly.
759     if (alignment < MINIMUM_ALIGNMENT_BYTES) {
760       alignment = MINIMUM_ALIGNMENT_BYTES;
761     }
762 
763     // We don't have any idea what the natural alignment of
764     // the underlying native allocator is, so we always need to
765     // over allocate.
766     size_t real_size = alignment + bytes + g_debug->extra_bytes();
767     if (real_size < bytes) {
768       // Overflow.
769       errno = ENOMEM;
770       return nullptr;
771     }
772 
773     result = TCALL(malloc, real_size);
774     pointer = result.getValue<void*>();
775     if (pointer == nullptr) {
776       return nullptr;
777     }
778 
779     uintptr_t value = reinterpret_cast<uintptr_t>(pointer) + g_debug->pointer_offset();
780     // Now align the pointer.
781     value += (-value % alignment);
782 
783     Header* header = g_debug->GetHeader(reinterpret_cast<void*>(value));
784     // Don't need to update `result` here because we only need the timestamps.
785     pointer = InitHeader(header, pointer, bytes);
786   } else {
787     size_t real_size = bytes + g_debug->extra_bytes();
788     if (real_size < bytes) {
789       // Overflow.
790       errno = ENOMEM;
791       return nullptr;
792     }
793     result = TCALL(memalign, alignment, real_size);
794     pointer = result.getValue<void*>();
795   }
796 
797   if (pointer == nullptr) {
798     return nullptr;
799   }
800 
801   if (g_debug->TrackPointers()) {
802     PointerData::Add(pointer, bytes);
803   }
804 
805   if (g_debug->config().options() & FILL_ON_ALLOC) {
806     size_t bytes = InternalMallocUsableSize(pointer);
807     size_t fill_bytes = g_debug->config().fill_on_alloc_bytes();
808     bytes = (bytes < fill_bytes) ? bytes : fill_bytes;
809     memset(pointer, g_debug->config().fill_alloc_value(), bytes);
810   }
811 
812   if (entry != nullptr) {
813     *entry = memory_trace::Entry{.tid = gettid(),
814                                  .type = memory_trace::MEMALIGN,
815                                  .ptr = reinterpret_cast<uint64_t>(pointer),
816                                  .size = bytes,
817                                  .u.align = alignment,
818                                  .start_ns = result.GetStartTimeNS(),
819                                  .end_ns = result.GetEndTimeNS()};
820   }
821 
822   return pointer;
823 }
824 
debug_realloc(void * pointer,size_t bytes)825 void* debug_realloc(void* pointer, size_t bytes) {
826   Unreachable::CheckIfRequested(g_debug->config());
827 
828   if (DebugCallsDisabled()) {
829     return g_dispatch->realloc(pointer, bytes);
830   }
831 
832   size_t old_size;
833   if (pointer != nullptr && g_debug->config().options() & RECORD_ALLOCS) {
834     // Need to get the size before disabling debug calls.
835     old_size = debug_malloc_usable_size(pointer);
836   }
837 
838   ScopedConcurrentLock lock;
839   ScopedDisableDebugCalls disable;
840   ScopedBacktraceSignalBlocker blocked;
841 
842   memory_trace::Entry* entry = nullptr;
843   if (g_debug->config().options() & RECORD_ALLOCS) {
844     // In order to preserve the order of operations, reserve the entry before
845     // performing the operation.
846     entry = g_debug->record->ReserveEntry();
847   }
848 
849   if (pointer == nullptr) {
850     TimedResult result = InternalMalloc(bytes);
851     pointer = result.getValue<void*>();
852     if (entry != nullptr) {
853       *entry = memory_trace::Entry{.tid = gettid(),
854                                    .type = memory_trace::REALLOC,
855                                    .ptr = reinterpret_cast<uint64_t>(pointer),
856                                    .size = bytes,
857                                    .u.old_ptr = 0,
858                                    .start_ns = result.GetStartTimeNS(),
859                                    .end_ns = result.GetEndTimeNS()};
860     }
861     return pointer;
862   }
863 
864   if (!VerifyPointer(pointer, "realloc")) {
865     return nullptr;
866   }
867 
868   int64_t present_bytes = -1;
869   if (g_debug->config().options() & RECORD_ALLOCS) {
870     // Need to get the present bytes before the pointer is freed in case the
871     // memory is released during the free call.
872     present_bytes = g_debug->record->GetPresentBytes(pointer, old_size);
873   }
874 
875   if (bytes == 0) {
876     TimedResult result = InternalFree(pointer);
877 
878     if (entry != nullptr) {
879       *entry = memory_trace::Entry{.tid = gettid(),
880                                    .type = memory_trace::REALLOC,
881                                    .ptr = 0,
882                                    .size = 0,
883                                    .u.old_ptr = reinterpret_cast<uint64_t>(pointer),
884                                    .present_bytes = present_bytes,
885                                    .start_ns = result.GetStartTimeNS(),
886                                    .end_ns = result.GetEndTimeNS()};
887     }
888 
889     return nullptr;
890   }
891 
892   size_t real_size = bytes;
893   if (g_debug->config().options() & EXPAND_ALLOC) {
894     real_size += g_debug->config().expand_alloc_bytes();
895     if (real_size < bytes) {
896       // Overflow.
897       errno = ENOMEM;
898       return nullptr;
899     }
900   }
901 
902   if (bytes > PointerInfoType::MaxSize()) {
903     errno = ENOMEM;
904     return nullptr;
905   }
906 
907   TimedResult result;
908   void* new_pointer;
909   size_t prev_size;
910   if (g_debug->HeaderEnabled()) {
911     // Same size, do nothing.
912     Header* header = g_debug->GetHeader(pointer);
913     if (real_size == header->size) {
914       if (g_debug->TrackPointers()) {
915         // Remove and re-add so that the backtrace is updated.
916         PointerData::Remove(pointer);
917         PointerData::Add(pointer, real_size);
918       }
919       return pointer;
920     }
921 
922     // Allocation is shrinking.
923     if (real_size < header->usable_size) {
924       header->size = real_size;
925       if (g_debug->config().options() & REAR_GUARD) {
926         // Don't bother allocating a smaller pointer in this case, simply
927         // change the header usable_size and reset the rear guard.
928         header->usable_size = header->size;
929         memset(g_debug->GetRearGuard(header), g_debug->config().rear_guard_value(),
930                g_debug->config().rear_guard_bytes());
931       }
932       if (g_debug->TrackPointers()) {
933         // Remove and re-add so that the backtrace is updated.
934         PointerData::Remove(pointer);
935         PointerData::Add(pointer, real_size);
936       }
937       return pointer;
938     }
939 
940     // Allocate the new size.
941     result = InternalMalloc(bytes);
942     new_pointer = result.getValue<void*>();
943     if (new_pointer == nullptr) {
944       errno = ENOMEM;
945       return nullptr;
946     }
947 
948     prev_size = header->usable_size;
949     memcpy(new_pointer, pointer, prev_size);
950     TimedResult free_time = InternalFree(pointer);
951     // `realloc` is split into two steps, update the end time to the finish time
952     // of the second operation.
953     result.SetEndTimeNS(free_time.GetEndTimeNS());
954   } else {
955     if (g_debug->TrackPointers()) {
956       PointerData::Remove(pointer);
957     }
958 
959     prev_size = g_dispatch->malloc_usable_size(pointer);
960     result = TCALL(realloc, pointer, real_size);
961     new_pointer = result.getValue<void*>();
962     if (new_pointer == nullptr) {
963       return nullptr;
964     }
965 
966     if (g_debug->TrackPointers()) {
967       PointerData::Add(new_pointer, real_size);
968     }
969   }
970 
971   if (g_debug->config().options() & FILL_ON_ALLOC) {
972     size_t bytes = InternalMallocUsableSize(new_pointer);
973     if (bytes > g_debug->config().fill_on_alloc_bytes()) {
974       bytes = g_debug->config().fill_on_alloc_bytes();
975     }
976     if (bytes > prev_size) {
977       memset(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(new_pointer) + prev_size),
978              g_debug->config().fill_alloc_value(), bytes - prev_size);
979     }
980   }
981 
982   if (entry != nullptr) {
983     *entry = memory_trace::Entry{.tid = gettid(),
984                                  .type = memory_trace::REALLOC,
985                                  .ptr = reinterpret_cast<uint64_t>(new_pointer),
986                                  .size = bytes,
987                                  .u.old_ptr = reinterpret_cast<uint64_t>(pointer),
988                                  .present_bytes = present_bytes,
989                                  .start_ns = result.GetStartTimeNS(),
990                                  .end_ns = result.GetEndTimeNS()};
991   }
992 
993   return new_pointer;
994 }
995 
debug_calloc(size_t nmemb,size_t bytes)996 void* debug_calloc(size_t nmemb, size_t bytes) {
997   Unreachable::CheckIfRequested(g_debug->config());
998 
999   if (DebugCallsDisabled()) {
1000     return g_dispatch->calloc(nmemb, bytes);
1001   }
1002   ScopedConcurrentLock lock;
1003   ScopedDisableDebugCalls disable;
1004   ScopedBacktraceSignalBlocker blocked;
1005 
1006   size_t size;
1007   if (__builtin_mul_overflow(nmemb, bytes, &size)) {
1008     // Overflow
1009     errno = ENOMEM;
1010     return nullptr;
1011   }
1012 
1013   if (size == 0) {
1014     size = 1;
1015   }
1016 
1017   size_t real_size;
1018   if (__builtin_add_overflow(size, g_debug->extra_bytes(), &real_size)) {
1019     // Overflow.
1020     errno = ENOMEM;
1021     return nullptr;
1022   }
1023 
1024   if (real_size > PointerInfoType::MaxSize()) {
1025     errno = ENOMEM;
1026     return nullptr;
1027   }
1028 
1029   memory_trace::Entry* entry = nullptr;
1030   if (g_debug->config().options() & RECORD_ALLOCS) {
1031     // In order to preserve the order of operations, reserve the entry before
1032     // performing the operation.
1033     entry = g_debug->record->ReserveEntry();
1034   }
1035 
1036   void* pointer;
1037   TimedResult result;
1038   if (g_debug->HeaderEnabled()) {
1039     // Need to guarantee the alignment of the header.
1040     result = TCALL(memalign, MINIMUM_ALIGNMENT_BYTES, real_size);
1041     Header* header = reinterpret_cast<Header*>(result.getValue<void*>());
1042     if (header == nullptr) {
1043       return nullptr;
1044     }
1045     memset(header, 0, g_dispatch->malloc_usable_size(header));
1046     pointer = InitHeader(header, header, size);
1047   } else {
1048     result = TCALL(calloc, 1, real_size);
1049     pointer = result.getValue<void*>();
1050   }
1051 
1052   if (entry != nullptr) {
1053     *entry = memory_trace::Entry{.tid = gettid(),
1054                                  .type = memory_trace::CALLOC,
1055                                  .ptr = reinterpret_cast<uint64_t>(pointer),
1056                                  .size = bytes,
1057                                  .u.n_elements = nmemb,
1058                                  .start_ns = result.GetStartTimeNS(),
1059                                  .end_ns = result.GetEndTimeNS()};
1060   }
1061 
1062   if (pointer != nullptr && g_debug->TrackPointers()) {
1063     PointerData::Add(pointer, size);
1064   }
1065   return pointer;
1066 }
1067 
debug_mallinfo()1068 struct mallinfo debug_mallinfo() {
1069   return g_dispatch->mallinfo();
1070 }
1071 
debug_mallopt(int param,int value)1072 int debug_mallopt(int param, int value) {
1073   return g_dispatch->mallopt(param, value);
1074 }
1075 
debug_malloc_info(int options,FILE * fp)1076 int debug_malloc_info(int options, FILE* fp) {
1077   if (DebugCallsDisabled() || !g_debug->TrackPointers()) {
1078     return g_dispatch->malloc_info(options, fp);
1079   }
1080 
1081   // Make sure any pending output is written to the file.
1082   fflush(fp);
1083 
1084   ScopedConcurrentLock lock;
1085   ScopedDisableDebugCalls disable;
1086   ScopedBacktraceSignalBlocker blocked;
1087 
1088   // Avoid any issues where allocations are made that will be freed
1089   // in the fclose.
1090   int fd = fileno(fp);
1091   MallocXmlElem root(fd, "malloc", "version=\"debug-malloc-1\"");
1092   std::vector<ListInfoType> list;
1093   PointerData::GetAllocList(&list);
1094 
1095   size_t alloc_num = 0;
1096   for (size_t i = 0; i < list.size(); i++) {
1097     MallocXmlElem alloc(fd, "allocation", "nr=\"%zu\"", alloc_num);
1098 
1099     size_t total = 1;
1100     size_t size = list[i].size;
1101     while (i < list.size() - 1 && list[i + 1].size == size) {
1102       i++;
1103       total++;
1104     }
1105     MallocXmlElem(fd, "size").Contents("%zu", list[i].size);
1106     MallocXmlElem(fd, "total").Contents("%zu", total);
1107     alloc_num++;
1108   }
1109   return 0;
1110 }
1111 
debug_aligned_alloc(size_t alignment,size_t size)1112 void* debug_aligned_alloc(size_t alignment, size_t size) {
1113   Unreachable::CheckIfRequested(g_debug->config());
1114 
1115   if (DebugCallsDisabled()) {
1116     return g_dispatch->aligned_alloc(alignment, size);
1117   }
1118   if (!powerof2(alignment) || (size % alignment) != 0) {
1119     errno = EINVAL;
1120     return nullptr;
1121   }
1122   return debug_memalign(alignment, size);
1123 }
1124 
debug_posix_memalign(void ** memptr,size_t alignment,size_t size)1125 int debug_posix_memalign(void** memptr, size_t alignment, size_t size) {
1126   Unreachable::CheckIfRequested(g_debug->config());
1127 
1128   if (DebugCallsDisabled()) {
1129     return g_dispatch->posix_memalign(memptr, alignment, size);
1130   }
1131 
1132   if (alignment < sizeof(void*) || !powerof2(alignment)) {
1133     return EINVAL;
1134   }
1135   int saved_errno = errno;
1136   *memptr = debug_memalign(alignment, size);
1137   errno = saved_errno;
1138   return (*memptr != nullptr) ? 0 : ENOMEM;
1139 }
1140 
debug_malloc_iterate(uintptr_t base,size_t size,void (* callback)(uintptr_t,size_t,void *),void * arg)1141 int debug_malloc_iterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*),
1142                   void* arg) {
1143   ScopedConcurrentLock lock;
1144   if (g_debug->TrackPointers()) {
1145     PointerData::IteratePointers([&callback, &arg](uintptr_t pointer) {
1146       callback(pointer, InternalMallocUsableSize(reinterpret_cast<void*>(pointer)), arg);
1147     });
1148     return 0;
1149   }
1150 
1151   // An option that adds a header will add pointer tracking, so no need to
1152   // check if headers are enabled.
1153   return g_dispatch->malloc_iterate(base, size, callback, arg);
1154 }
1155 
debug_malloc_disable()1156 void debug_malloc_disable() {
1157   ScopedConcurrentLock lock;
1158   if (g_debug->pointer) {
1159     // Acquire the pointer locks first, otherwise, the code can be holding
1160     // the allocation lock and deadlock trying to acquire a pointer lock.
1161     g_debug->pointer->PrepareFork();
1162   }
1163   g_dispatch->malloc_disable();
1164 }
1165 
debug_malloc_enable()1166 void debug_malloc_enable() {
1167   ScopedConcurrentLock lock;
1168   g_dispatch->malloc_enable();
1169   if (g_debug->pointer) {
1170     g_debug->pointer->PostForkParent();
1171   }
1172 }
1173 
debug_malloc_backtrace(void * pointer,uintptr_t * frames,size_t max_frames)1174 ssize_t debug_malloc_backtrace(void* pointer, uintptr_t* frames, size_t max_frames) {
1175   if (DebugCallsDisabled() || pointer == nullptr) {
1176     return 0;
1177   }
1178   ScopedConcurrentLock lock;
1179   ScopedDisableDebugCalls disable;
1180   ScopedBacktraceSignalBlocker blocked;
1181 
1182   if (!(g_debug->config().options() & BACKTRACE)) {
1183     return 0;
1184   }
1185   pointer = UntagPointer(pointer);
1186   return PointerData::GetFrames(pointer, frames, max_frames);
1187 }
1188 
1189 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
debug_pvalloc(size_t bytes)1190 void* debug_pvalloc(size_t bytes) {
1191   Unreachable::CheckIfRequested(g_debug->config());
1192 
1193   if (DebugCallsDisabled()) {
1194     return g_dispatch->pvalloc(bytes);
1195   }
1196 
1197   size_t pagesize = getpagesize();
1198   size_t size = __BIONIC_ALIGN(bytes, pagesize);
1199   if (size < bytes) {
1200     // Overflow
1201     errno = ENOMEM;
1202     return nullptr;
1203   }
1204   return debug_memalign(pagesize, size);
1205 }
1206 
debug_valloc(size_t size)1207 void* debug_valloc(size_t size) {
1208   Unreachable::CheckIfRequested(g_debug->config());
1209 
1210   if (DebugCallsDisabled()) {
1211     return g_dispatch->valloc(size);
1212   }
1213   return debug_memalign(getpagesize(), size);
1214 }
1215 #endif
1216 
1217 static std::mutex g_dump_lock;
1218 
write_dump(int fd)1219 static void write_dump(int fd) {
1220   dprintf(fd, "Android Native Heap Dump v1.2\n\n");
1221 
1222   std::string fingerprint = android::base::GetProperty("ro.build.fingerprint", "unknown");
1223   dprintf(fd, "Build fingerprint: '%s'\n\n", fingerprint.c_str());
1224 
1225   PointerData::DumpLiveToFile(fd);
1226 
1227   dprintf(fd, "MAPS\n");
1228   std::string content;
1229   if (!android::base::ReadFileToString("/proc/self/maps", &content)) {
1230     dprintf(fd, "Could not open /proc/self/maps\n");
1231   } else {
1232     dprintf(fd, "%s", content.c_str());
1233   }
1234   dprintf(fd, "END\n");
1235 
1236   // Purge the memory that was allocated and freed during this operation
1237   // since it can be large enough to expand the RSS significantly.
1238   g_dispatch->mallopt(M_PURGE_ALL, 0);
1239 }
1240 
debug_write_malloc_leak_info(FILE * fp)1241 bool debug_write_malloc_leak_info(FILE* fp) {
1242   // Make sure any pending output is written to the file.
1243   fflush(fp);
1244 
1245   ScopedConcurrentLock lock;
1246   ScopedDisableDebugCalls disable;
1247   ScopedBacktraceSignalBlocker blocked;
1248 
1249   std::lock_guard<std::mutex> guard(g_dump_lock);
1250 
1251   if (!(g_debug->config().options() & BACKTRACE)) {
1252     return false;
1253   }
1254 
1255   write_dump(fileno(fp));
1256 
1257   return true;
1258 }
1259 
debug_dump_heap(const char * file_name)1260 void debug_dump_heap(const char* file_name) {
1261   ScopedConcurrentLock lock;
1262   ScopedDisableDebugCalls disable;
1263   ScopedBacktraceSignalBlocker blocked;
1264 
1265   std::lock_guard<std::mutex> guard(g_dump_lock);
1266 
1267   int fd = open(file_name, O_RDWR | O_CREAT | O_NOFOLLOW | O_TRUNC | O_CLOEXEC, 0644);
1268   if (fd == -1) {
1269     error_log("Unable to create file: %s", file_name);
1270     return;
1271   }
1272 
1273   error_log("Dumping to file: %s\n", file_name);
1274   write_dump(fd);
1275   close(fd);
1276 }
1277