1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <errno.h>
30 #include <inttypes.h>
31 #include <malloc.h>
32 #include <pthread.h>
33 #include <signal.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <sys/cdefs.h>
38 #include <sys/param.h>
39 #include <sys/syscall.h>
40 #include <unistd.h>
41
42 #include <mutex>
43 #include <vector>
44
45 #include <android-base/file.h>
46 #include <android-base/properties.h>
47 #include <android-base/stringprintf.h>
48 #include <bionic/malloc_tagged_pointers.h>
49 #include <platform/bionic/reserved_signals.h>
50 #include <private/MallocXmlElem.h>
51 #include <private/bionic_malloc_dispatch.h>
52 #include <unwindstack/Unwinder.h>
53
54 #include "Config.h"
55 #include "DebugData.h"
56 #include "LogAllocatorStats.h"
57 #include "Unreachable.h"
58 #include "UnwindBacktrace.h"
59 #include "backtrace.h"
60 #include "debug_disable.h"
61 #include "debug_log.h"
62 #include "malloc_debug.h"
63
64 // ------------------------------------------------------------------------
65 // Global Data
66 // ------------------------------------------------------------------------
67 DebugData* g_debug;
68
69 bool* g_zygote_child;
70
71 const MallocDispatch* g_dispatch;
72
Nanotime()73 static inline __always_inline uint64_t Nanotime() {
74 struct timespec t = {};
75 clock_gettime(CLOCK_MONOTONIC, &t);
76 return static_cast<uint64_t>(t.tv_sec) * 1000000000LL + t.tv_nsec;
77 }
78
79 namespace {
80 // A TimedResult contains the result of from malloc end_ns al. functions and the
81 // start/end timestamps.
82 struct TimedResult {
83 uint64_t start_ns = 0;
84 uint64_t end_ns = 0;
85 union {
86 size_t s;
87 int i;
88 void* p;
89 } v;
90
GetStartTimeNS__anonbb243b9b0111::TimedResult91 uint64_t GetStartTimeNS() const { return start_ns; }
GetEndTimeNS__anonbb243b9b0111::TimedResult92 uint64_t GetEndTimeNS() const { return end_ns; }
SetStartTimeNS__anonbb243b9b0111::TimedResult93 void SetStartTimeNS(uint64_t t) { start_ns = t; }
SetEndTimeNS__anonbb243b9b0111::TimedResult94 void SetEndTimeNS(uint64_t t) { end_ns = t; }
95
96 template <typename T>
97 void setValue(T);
98 template <>
setValue__anonbb243b9b0111::TimedResult99 void setValue(size_t s) {
100 v.s = s;
101 }
102 template <>
setValue__anonbb243b9b0111::TimedResult103 void setValue(int i) {
104 v.i = i;
105 }
106 template <>
setValue__anonbb243b9b0111::TimedResult107 void setValue(void* p) {
108 v.p = p;
109 }
110
111 template <typename T>
112 T getValue() const;
113 template <>
getValue__anonbb243b9b0111::TimedResult114 size_t getValue<size_t>() const {
115 return v.s;
116 }
117 template <>
getValue__anonbb243b9b0111::TimedResult118 int getValue<int>() const {
119 return v.i;
120 }
121 template <>
getValue__anonbb243b9b0111::TimedResult122 void* getValue<void*>() const {
123 return v.p;
124 }
125 };
126
127 class ScopedTimer {
128 public:
ScopedTimer(TimedResult & res)129 ScopedTimer(TimedResult& res) : res_(res) { res_.start_ns = Nanotime(); }
130
~ScopedTimer()131 ~ScopedTimer() { res_.end_ns = Nanotime(); }
132
133 private:
134 TimedResult& res_;
135 };
136
137 } // namespace
138
139 template <typename MallocFn, typename... Args>
TimerCall(MallocFn fn,Args...args)140 static TimedResult TimerCall(MallocFn fn, Args... args) {
141 TimedResult ret;
142 decltype((g_dispatch->*fn)(args...)) r;
143 if (g_debug->config().options() & RECORD_ALLOCS) {
144 ScopedTimer t(ret);
145 r = (g_dispatch->*fn)(args...);
146 } else {
147 r = (g_dispatch->*fn)(args...);
148 }
149 ret.setValue<decltype(r)>(r);
150 return ret;
151 }
152
153 template <typename MallocFn, typename... Args>
TimerCallVoid(MallocFn fn,Args...args)154 static TimedResult TimerCallVoid(MallocFn fn, Args... args) {
155 TimedResult ret;
156 {
157 ScopedTimer t(ret);
158 (g_dispatch->*fn)(args...);
159 }
160 return ret;
161 }
162
163 #define TCALL(FUNC, ...) TimerCall(&MallocDispatch::FUNC, __VA_ARGS__);
164 #define TCALLVOID(FUNC, ...) TimerCallVoid(&MallocDispatch::FUNC, __VA_ARGS__);
165
166 // ------------------------------------------------------------------------
167
168 // ------------------------------------------------------------------------
169 // Use C style prototypes for all exported functions. This makes it easy
170 // to do dlsym lookups during libc initialization when malloc debug
171 // is enabled.
172 // ------------------------------------------------------------------------
173 __BEGIN_DECLS
174
175 bool debug_initialize(const MallocDispatch* malloc_dispatch, bool* malloc_zygote_child,
176 const char* options);
177 void debug_finalize();
178 void debug_dump_heap(const char* file_name);
179 void debug_get_malloc_leak_info(uint8_t** info, size_t* overall_size, size_t* info_size,
180 size_t* total_memory, size_t* backtrace_size);
181 bool debug_write_malloc_leak_info(FILE* fp);
182 ssize_t debug_malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count);
183 void debug_free_malloc_leak_info(uint8_t* info);
184 size_t debug_malloc_usable_size(void* pointer);
185 void* debug_malloc(size_t size);
186 void debug_free(void* pointer);
187 void* debug_aligned_alloc(size_t alignment, size_t size);
188 void* debug_memalign(size_t alignment, size_t bytes);
189 void* debug_realloc(void* pointer, size_t bytes);
190 void* debug_calloc(size_t nmemb, size_t bytes);
191 struct mallinfo debug_mallinfo();
192 int debug_mallopt(int param, int value);
193 int debug_malloc_info(int options, FILE* fp);
194 int debug_posix_memalign(void** memptr, size_t alignment, size_t size);
195 int debug_malloc_iterate(uintptr_t base, size_t size,
196 void (*callback)(uintptr_t base, size_t size, void* arg), void* arg);
197 void debug_malloc_disable();
198 void debug_malloc_enable();
199
200 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
201 void* debug_pvalloc(size_t bytes);
202 void* debug_valloc(size_t size);
203 #endif
204
205 __END_DECLS
206 // ------------------------------------------------------------------------
207
208 class ScopedConcurrentLock {
209 public:
ScopedConcurrentLock()210 ScopedConcurrentLock() {
211 pthread_rwlock_rdlock(&lock_);
212 }
~ScopedConcurrentLock()213 ~ScopedConcurrentLock() {
214 pthread_rwlock_unlock(&lock_);
215 }
216
Init()217 static void Init() {
218 pthread_rwlockattr_t attr;
219 // Set the attribute so that when a write lock is pending, read locks are no
220 // longer granted.
221 pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
222 pthread_rwlock_init(&lock_, &attr);
223 }
224
BlockAllOperations()225 static void BlockAllOperations() {
226 pthread_rwlock_wrlock(&lock_);
227 }
228
229 private:
230 static pthread_rwlock_t lock_;
231 };
232 pthread_rwlock_t ScopedConcurrentLock::lock_;
233
234 // Use this because the sigprocmask* functions filter out the reserved bionic
235 // signals including the signal this code blocks.
__rt_sigprocmask(int how,const sigset64_t * new_set,sigset64_t * old_set,size_t sigset_size)236 static inline int __rt_sigprocmask(int how, const sigset64_t* new_set, sigset64_t* old_set,
237 size_t sigset_size) {
238 return syscall(SYS_rt_sigprocmask, how, new_set, old_set, sigset_size);
239 }
240
241 // Need to block the backtrace signal while in malloc debug routines
242 // otherwise there is a chance of a deadlock and timeout when unwinding.
243 // This can occur if a thread is paused while owning a malloc debug
244 // internal lock.
245 class ScopedBacktraceSignalBlocker {
246 public:
ScopedBacktraceSignalBlocker()247 ScopedBacktraceSignalBlocker() {
248 sigemptyset64(&backtrace_set_);
249 sigaddset64(&backtrace_set_, BIONIC_SIGNAL_BACKTRACE);
250 sigset64_t old_set;
251 __rt_sigprocmask(SIG_BLOCK, &backtrace_set_, &old_set, sizeof(backtrace_set_));
252 if (sigismember64(&old_set, BIONIC_SIGNAL_BACKTRACE)) {
253 unblock_ = false;
254 }
255 }
256
~ScopedBacktraceSignalBlocker()257 ~ScopedBacktraceSignalBlocker() {
258 if (unblock_) {
259 __rt_sigprocmask(SIG_UNBLOCK, &backtrace_set_, nullptr, sizeof(backtrace_set_));
260 }
261 }
262
263 private:
264 bool unblock_ = true;
265 sigset64_t backtrace_set_;
266 };
267
InitAtfork()268 static void InitAtfork() {
269 static pthread_once_t atfork_init = PTHREAD_ONCE_INIT;
270 pthread_once(&atfork_init, []() {
271 pthread_atfork(
272 []() {
273 if (g_debug != nullptr) {
274 g_debug->PrepareFork();
275 }
276 },
277 []() {
278 if (g_debug != nullptr) {
279 g_debug->PostForkParent();
280 }
281 },
282 []() {
283 if (g_debug != nullptr) {
284 g_debug->PostForkChild();
285 }
286 });
287 });
288 }
289
BacktraceAndLog()290 void BacktraceAndLog() {
291 if (g_debug->config().options() & BACKTRACE_FULL) {
292 std::vector<uintptr_t> frames;
293 std::vector<unwindstack::FrameData> frames_info;
294 if (!Unwind(&frames, &frames_info, 256)) {
295 error_log(" Backtrace failed to get any frames.");
296 } else {
297 UnwindLog(frames_info);
298 }
299 } else {
300 std::vector<uintptr_t> frames(256);
301 size_t num_frames = backtrace_get(frames.data(), frames.size());
302 if (num_frames == 0) {
303 error_log(" Backtrace failed to get any frames.");
304 } else {
305 backtrace_log(frames.data(), num_frames);
306 }
307 }
308 }
309
LogError(const void * pointer,const char * error_str)310 static void LogError(const void* pointer, const char* error_str) {
311 error_log(LOG_DIVIDER);
312 error_log("+++ ALLOCATION %p %s", pointer, error_str);
313
314 // If we are tracking already freed pointers, check to see if this is
315 // one so we can print extra information.
316 if (g_debug->config().options() & FREE_TRACK) {
317 PointerData::LogFreeBacktrace(pointer);
318 }
319
320 error_log("Backtrace at time of failure:");
321 BacktraceAndLog();
322 error_log(LOG_DIVIDER);
323 if (g_debug->config().options() & ABORT_ON_ERROR) {
324 abort();
325 }
326 }
327
VerifyPointer(const void * pointer,const char * function_name)328 static bool VerifyPointer(const void* pointer, const char* function_name) {
329 if (g_debug->HeaderEnabled()) {
330 Header* header = g_debug->GetHeader(pointer);
331 if (header->tag != DEBUG_TAG) {
332 std::string error_str;
333 if (header->tag == DEBUG_FREE_TAG) {
334 error_str = std::string("USED AFTER FREE (") + function_name + ")";
335 } else {
336 error_str = android::base::StringPrintf("HAS INVALID TAG %" PRIx32 " (%s)", header->tag,
337 function_name);
338 }
339 LogError(pointer, error_str.c_str());
340 return false;
341 }
342 }
343
344 if (g_debug->TrackPointers()) {
345 if (!PointerData::Exists(pointer)) {
346 std::string error_str(std::string("UNKNOWN POINTER (") + function_name + ")");
347 LogError(pointer, error_str.c_str());
348 return false;
349 }
350 }
351 return true;
352 }
353
InternalMallocUsableSize(void * pointer)354 static size_t InternalMallocUsableSize(void* pointer) {
355 if (g_debug->HeaderEnabled()) {
356 return g_debug->GetHeader(pointer)->usable_size;
357 } else {
358 return g_dispatch->malloc_usable_size(pointer);
359 }
360 }
361
InitHeader(Header * header,void * orig_pointer,size_t size)362 static void* InitHeader(Header* header, void* orig_pointer, size_t size) {
363 header->tag = DEBUG_TAG;
364 header->orig_pointer = orig_pointer;
365 header->size = size;
366 header->usable_size = g_dispatch->malloc_usable_size(orig_pointer);
367 if (header->usable_size == 0) {
368 g_dispatch->free(orig_pointer);
369 return nullptr;
370 }
371 header->usable_size -= g_debug->pointer_offset() + reinterpret_cast<uintptr_t>(header) -
372 reinterpret_cast<uintptr_t>(orig_pointer);
373
374 if (g_debug->config().options() & FRONT_GUARD) {
375 uint8_t* guard = g_debug->GetFrontGuard(header);
376 memset(guard, g_debug->config().front_guard_value(), g_debug->config().front_guard_bytes());
377 }
378
379 if (g_debug->config().options() & REAR_GUARD) {
380 uint8_t* guard = g_debug->GetRearGuard(header);
381 memset(guard, g_debug->config().rear_guard_value(), g_debug->config().rear_guard_bytes());
382 // If the rear guard is enabled, set the usable size to the exact size
383 // of the allocation.
384 header->usable_size = header->size;
385 }
386
387 return g_debug->GetPointer(header);
388 }
389
390 extern "C" void __asan_init() __attribute__((weak));
391
debug_initialize(const MallocDispatch * malloc_dispatch,bool * zygote_child,const char * options)392 bool debug_initialize(const MallocDispatch* malloc_dispatch, bool* zygote_child,
393 const char* options) {
394 if (zygote_child == nullptr || options == nullptr) {
395 return false;
396 }
397
398 if (__asan_init != 0) {
399 error_log("malloc debug cannot be enabled alongside ASAN");
400 return false;
401 }
402
403 InitAtfork();
404
405 g_zygote_child = zygote_child;
406
407 g_dispatch = malloc_dispatch;
408
409 if (!DebugDisableInitialize()) {
410 return false;
411 }
412
413 DebugData* debug = new DebugData();
414 if (!debug->Initialize(options) || !Unreachable::Initialize(debug->config())) {
415 delete debug;
416 DebugDisableFinalize();
417 return false;
418 }
419 g_debug = debug;
420
421 // Always enable the backtrace code since we will use it in a number
422 // of different error cases.
423 backtrace_startup();
424
425 if (g_debug->config().options() & VERBOSE) {
426 info_log("%s: malloc debug enabled", getprogname());
427 }
428
429 ScopedConcurrentLock::Init();
430
431 return true;
432 }
433
debug_finalize()434 void debug_finalize() {
435 if (g_debug == nullptr) {
436 return;
437 }
438
439 // Make sure that there are no other threads doing debug allocations
440 // before we kill everything.
441 ScopedConcurrentLock::BlockAllOperations();
442
443 // Turn off capturing allocations calls.
444 DebugDisableSet(true);
445
446 if (g_debug->config().options() & FREE_TRACK) {
447 PointerData::VerifyAllFreed();
448 }
449
450 if (g_debug->config().options() & LEAK_TRACK) {
451 PointerData::LogLeaks();
452 }
453
454 if ((g_debug->config().options() & BACKTRACE) && g_debug->config().backtrace_dump_on_exit()) {
455 debug_dump_heap(android::base::StringPrintf("%s.%d.exit.txt",
456 g_debug->config().backtrace_dump_prefix().c_str(),
457 getpid()).c_str());
458 }
459
460 backtrace_shutdown();
461
462 // In order to prevent any issues of threads freeing previous pointers
463 // after the main thread calls this code, simply leak the g_debug pointer
464 // and do not destroy the debug disable pthread key.
465 }
466
debug_get_malloc_leak_info(uint8_t ** info,size_t * overall_size,size_t * info_size,size_t * total_memory,size_t * backtrace_size)467 void debug_get_malloc_leak_info(uint8_t** info, size_t* overall_size, size_t* info_size,
468 size_t* total_memory, size_t* backtrace_size) {
469 ScopedConcurrentLock lock;
470 ScopedDisableDebugCalls disable;
471 ScopedBacktraceSignalBlocker blocked;
472
473 // Verify the arguments.
474 if (info == nullptr || overall_size == nullptr || info_size == nullptr || total_memory == nullptr ||
475 backtrace_size == nullptr) {
476 error_log("get_malloc_leak_info: At least one invalid parameter.");
477 return;
478 }
479
480 *info = nullptr;
481 *overall_size = 0;
482 *info_size = 0;
483 *total_memory = 0;
484 *backtrace_size = 0;
485
486 if (!(g_debug->config().options() & BACKTRACE)) {
487 error_log(
488 "get_malloc_leak_info: Allocations not being tracked, to enable "
489 "set the option 'backtrace'.");
490 return;
491 }
492
493 PointerData::GetInfo(info, overall_size, info_size, total_memory, backtrace_size);
494 }
495
debug_free_malloc_leak_info(uint8_t * info)496 void debug_free_malloc_leak_info(uint8_t* info) {
497 g_dispatch->free(info);
498 // Purge the memory that was freed since a significant amount of
499 // memory could have been allocated and freed.
500 g_dispatch->mallopt(M_PURGE_ALL, 0);
501 }
502
debug_malloc_usable_size(void * pointer)503 size_t debug_malloc_usable_size(void* pointer) {
504 Unreachable::CheckIfRequested(g_debug->config());
505
506 if (DebugCallsDisabled() || pointer == nullptr) {
507 return g_dispatch->malloc_usable_size(pointer);
508 }
509 ScopedConcurrentLock lock;
510 ScopedDisableDebugCalls disable;
511 ScopedBacktraceSignalBlocker blocked;
512
513 if (!VerifyPointer(pointer, "malloc_usable_size")) {
514 return 0;
515 }
516
517 return InternalMallocUsableSize(pointer);
518 }
519
InternalMalloc(size_t size)520 static TimedResult InternalMalloc(size_t size) {
521 uint64_t options = g_debug->config().options();
522 if ((options & BACKTRACE) && g_debug->pointer->ShouldDumpAndReset()) {
523 debug_dump_heap(android::base::StringPrintf(
524 "%s.%d.txt", g_debug->config().backtrace_dump_prefix().c_str(), getpid())
525 .c_str());
526 }
527 if (options & LOG_ALLOCATOR_STATS_ON_SIGNAL) {
528 LogAllocatorStats::CheckIfShouldLog();
529 }
530
531 if (size == 0) {
532 size = 1;
533 }
534
535 TimedResult result;
536
537 size_t real_size = size + g_debug->extra_bytes();
538 if (real_size < size) {
539 // Overflow.
540 errno = ENOMEM;
541 result.setValue<void*>(nullptr);
542 return result;
543 }
544
545 if (size > PointerInfoType::MaxSize()) {
546 errno = ENOMEM;
547 result.setValue<void*>(nullptr);
548 return result;
549 }
550
551 if (g_debug->HeaderEnabled()) {
552 result = TCALL(memalign, MINIMUM_ALIGNMENT_BYTES, real_size);
553 Header* header = reinterpret_cast<Header*>(result.getValue<void*>());
554 if (header == nullptr) {
555 return result;
556 }
557 result.setValue<void*>(InitHeader(header, header, size));
558 } else {
559 result = TCALL(malloc, real_size);
560 }
561
562 void* pointer = result.getValue<void*>();
563
564 if (pointer != nullptr) {
565 if (g_debug->TrackPointers()) {
566 PointerData::Add(pointer, size);
567 }
568
569 if (g_debug->config().options() & FILL_ON_ALLOC) {
570 size_t bytes = InternalMallocUsableSize(pointer);
571 size_t fill_bytes = g_debug->config().fill_on_alloc_bytes();
572 bytes = (bytes < fill_bytes) ? bytes : fill_bytes;
573 memset(pointer, g_debug->config().fill_alloc_value(), bytes);
574 }
575 }
576
577 return result;
578 }
579
debug_malloc(size_t size)580 void* debug_malloc(size_t size) {
581 Unreachable::CheckIfRequested(g_debug->config());
582
583 if (DebugCallsDisabled()) {
584 return g_dispatch->malloc(size);
585 }
586 ScopedConcurrentLock lock;
587 ScopedDisableDebugCalls disable;
588 ScopedBacktraceSignalBlocker blocked;
589
590 TimedResult result = InternalMalloc(size);
591
592 if (g_debug->config().options() & RECORD_ALLOCS) {
593 g_debug->record->AddEntry(new MallocEntry(result.getValue<void*>(), size,
594 result.GetStartTimeNS(), result.GetEndTimeNS()));
595 }
596
597 return result.getValue<void*>();
598 }
599
InternalFree(void * pointer)600 static TimedResult InternalFree(void* pointer) {
601 uint64_t options = g_debug->config().options();
602 if ((options & BACKTRACE) && g_debug->pointer->ShouldDumpAndReset()) {
603 debug_dump_heap(android::base::StringPrintf(
604 "%s.%d.txt", g_debug->config().backtrace_dump_prefix().c_str(), getpid())
605 .c_str());
606 }
607 if (options & LOG_ALLOCATOR_STATS_ON_SIGNAL) {
608 LogAllocatorStats::CheckIfShouldLog();
609 }
610
611 void* free_pointer = pointer;
612 size_t bytes;
613 Header* header;
614 if (g_debug->HeaderEnabled()) {
615 header = g_debug->GetHeader(pointer);
616 free_pointer = header->orig_pointer;
617
618 if (g_debug->config().options() & FRONT_GUARD) {
619 if (!g_debug->front_guard->Valid(header)) {
620 g_debug->front_guard->LogFailure(header);
621 }
622 }
623 if (g_debug->config().options() & REAR_GUARD) {
624 if (!g_debug->rear_guard->Valid(header)) {
625 g_debug->rear_guard->LogFailure(header);
626 }
627 }
628
629 header->tag = DEBUG_FREE_TAG;
630
631 bytes = header->usable_size;
632 } else {
633 bytes = g_dispatch->malloc_usable_size(pointer);
634 }
635
636 if (g_debug->config().options() & FILL_ON_FREE) {
637 size_t fill_bytes = g_debug->config().fill_on_free_bytes();
638 fill_bytes = (bytes < fill_bytes) ? bytes : fill_bytes;
639 memset(pointer, g_debug->config().fill_free_value(), fill_bytes);
640 }
641
642 if (g_debug->TrackPointers()) {
643 PointerData::Remove(pointer);
644 }
645
646 TimedResult result;
647 if (g_debug->config().options() & FREE_TRACK) {
648 // Do not add the allocation until we are done modifying the pointer
649 // itself. This avoids a race if a lot of threads are all doing
650 // frees at the same time and we wind up trying to really free this
651 // pointer from another thread, while still trying to free it in
652 // this function.
653 pointer = PointerData::AddFreed(pointer, bytes);
654 if (pointer != nullptr && g_debug->HeaderEnabled()) {
655 pointer = g_debug->GetHeader(pointer)->orig_pointer;
656 }
657 result = TCALLVOID(free, pointer);
658 } else {
659 result = TCALLVOID(free, free_pointer);
660 }
661
662 return result;
663 }
664
debug_free(void * pointer)665 void debug_free(void* pointer) {
666 Unreachable::CheckIfRequested(g_debug->config());
667
668 if (DebugCallsDisabled() || pointer == nullptr) {
669 return g_dispatch->free(pointer);
670 }
671 ScopedConcurrentLock lock;
672 ScopedDisableDebugCalls disable;
673 ScopedBacktraceSignalBlocker blocked;
674
675 if (!VerifyPointer(pointer, "free")) {
676 return;
677 }
678
679 TimedResult result = InternalFree(pointer);
680
681 if (g_debug->config().options() & RECORD_ALLOCS) {
682 g_debug->record->AddEntry(
683 new FreeEntry(pointer, result.GetStartTimeNS(), result.GetEndTimeNS()));
684 }
685 }
686
debug_memalign(size_t alignment,size_t bytes)687 void* debug_memalign(size_t alignment, size_t bytes) {
688 Unreachable::CheckIfRequested(g_debug->config());
689
690 if (DebugCallsDisabled()) {
691 return g_dispatch->memalign(alignment, bytes);
692 }
693 ScopedConcurrentLock lock;
694 ScopedDisableDebugCalls disable;
695 ScopedBacktraceSignalBlocker blocked;
696
697 if (bytes == 0) {
698 bytes = 1;
699 }
700
701 if (bytes > PointerInfoType::MaxSize()) {
702 errno = ENOMEM;
703 return nullptr;
704 }
705
706 TimedResult result;
707 void* pointer;
708 if (g_debug->HeaderEnabled()) {
709 // Make the alignment a power of two.
710 if (!powerof2(alignment)) {
711 alignment = BIONIC_ROUND_UP_POWER_OF_2(alignment);
712 }
713 // Force the alignment to at least MINIMUM_ALIGNMENT_BYTES to guarantee
714 // that the header is aligned properly.
715 if (alignment < MINIMUM_ALIGNMENT_BYTES) {
716 alignment = MINIMUM_ALIGNMENT_BYTES;
717 }
718
719 // We don't have any idea what the natural alignment of
720 // the underlying native allocator is, so we always need to
721 // over allocate.
722 size_t real_size = alignment + bytes + g_debug->extra_bytes();
723 if (real_size < bytes) {
724 // Overflow.
725 errno = ENOMEM;
726 return nullptr;
727 }
728
729 result = TCALL(malloc, real_size);
730 pointer = result.getValue<void*>();
731 if (pointer == nullptr) {
732 return nullptr;
733 }
734
735 uintptr_t value = reinterpret_cast<uintptr_t>(pointer) + g_debug->pointer_offset();
736 // Now align the pointer.
737 value += (-value % alignment);
738
739 Header* header = g_debug->GetHeader(reinterpret_cast<void*>(value));
740 // Don't need to update `result` here because we only need the timestamps.
741 pointer = InitHeader(header, pointer, bytes);
742 } else {
743 size_t real_size = bytes + g_debug->extra_bytes();
744 if (real_size < bytes) {
745 // Overflow.
746 errno = ENOMEM;
747 return nullptr;
748 }
749 result = TCALL(memalign, alignment, real_size);
750 pointer = result.getValue<void*>();
751 }
752
753 if (pointer != nullptr) {
754 if (g_debug->TrackPointers()) {
755 PointerData::Add(pointer, bytes);
756 }
757
758 if (g_debug->config().options() & FILL_ON_ALLOC) {
759 size_t bytes = InternalMallocUsableSize(pointer);
760 size_t fill_bytes = g_debug->config().fill_on_alloc_bytes();
761 bytes = (bytes < fill_bytes) ? bytes : fill_bytes;
762 memset(pointer, g_debug->config().fill_alloc_value(), bytes);
763 }
764
765 if (g_debug->config().options() & RECORD_ALLOCS) {
766 g_debug->record->AddEntry(new MemalignEntry(pointer, bytes, alignment,
767 result.GetStartTimeNS(), result.GetEndTimeNS()));
768 }
769 }
770
771 return pointer;
772 }
773
debug_realloc(void * pointer,size_t bytes)774 void* debug_realloc(void* pointer, size_t bytes) {
775 Unreachable::CheckIfRequested(g_debug->config());
776
777 if (DebugCallsDisabled()) {
778 return g_dispatch->realloc(pointer, bytes);
779 }
780 ScopedConcurrentLock lock;
781 ScopedDisableDebugCalls disable;
782 ScopedBacktraceSignalBlocker blocked;
783
784 if (pointer == nullptr) {
785 TimedResult result = InternalMalloc(bytes);
786 if (g_debug->config().options() & RECORD_ALLOCS) {
787 g_debug->record->AddEntry(new ReallocEntry(result.getValue<void*>(), bytes, nullptr,
788 result.GetStartTimeNS(), result.GetEndTimeNS()));
789 }
790 pointer = result.getValue<void*>();
791 return pointer;
792 }
793
794 if (!VerifyPointer(pointer, "realloc")) {
795 return nullptr;
796 }
797
798 if (bytes == 0) {
799 TimedResult result = InternalFree(pointer);
800
801 if (g_debug->config().options() & RECORD_ALLOCS) {
802 g_debug->record->AddEntry(new ReallocEntry(nullptr, bytes, pointer, result.GetStartTimeNS(),
803 result.GetEndTimeNS()));
804 }
805
806 return nullptr;
807 }
808
809 size_t real_size = bytes;
810 if (g_debug->config().options() & EXPAND_ALLOC) {
811 real_size += g_debug->config().expand_alloc_bytes();
812 if (real_size < bytes) {
813 // Overflow.
814 errno = ENOMEM;
815 return nullptr;
816 }
817 }
818
819 if (bytes > PointerInfoType::MaxSize()) {
820 errno = ENOMEM;
821 return nullptr;
822 }
823
824 TimedResult result;
825 void* new_pointer;
826 size_t prev_size;
827 if (g_debug->HeaderEnabled()) {
828 // Same size, do nothing.
829 Header* header = g_debug->GetHeader(pointer);
830 if (real_size == header->size) {
831 if (g_debug->TrackPointers()) {
832 // Remove and re-add so that the backtrace is updated.
833 PointerData::Remove(pointer);
834 PointerData::Add(pointer, real_size);
835 }
836 return pointer;
837 }
838
839 // Allocation is shrinking.
840 if (real_size < header->usable_size) {
841 header->size = real_size;
842 if (g_debug->config().options() & REAR_GUARD) {
843 // Don't bother allocating a smaller pointer in this case, simply
844 // change the header usable_size and reset the rear guard.
845 header->usable_size = header->size;
846 memset(g_debug->GetRearGuard(header), g_debug->config().rear_guard_value(),
847 g_debug->config().rear_guard_bytes());
848 }
849 if (g_debug->TrackPointers()) {
850 // Remove and re-add so that the backtrace is updated.
851 PointerData::Remove(pointer);
852 PointerData::Add(pointer, real_size);
853 }
854 return pointer;
855 }
856
857 // Allocate the new size.
858 result = InternalMalloc(bytes);
859 new_pointer = result.getValue<void*>();
860 if (new_pointer == nullptr) {
861 errno = ENOMEM;
862 return nullptr;
863 }
864
865 prev_size = header->usable_size;
866 memcpy(new_pointer, pointer, prev_size);
867 TimedResult free_time = InternalFree(pointer);
868 // `realloc` is split into two steps, update the end time to the finish time
869 // of the second operation.
870 result.SetEndTimeNS(free_time.GetEndTimeNS());
871 } else {
872 if (g_debug->TrackPointers()) {
873 PointerData::Remove(pointer);
874 }
875
876 prev_size = g_dispatch->malloc_usable_size(pointer);
877 result = TCALL(realloc, pointer, real_size);
878 new_pointer = result.getValue<void*>();
879 if (new_pointer == nullptr) {
880 return nullptr;
881 }
882
883 if (g_debug->TrackPointers()) {
884 PointerData::Add(new_pointer, real_size);
885 }
886 }
887
888 if (g_debug->config().options() & FILL_ON_ALLOC) {
889 size_t bytes = InternalMallocUsableSize(new_pointer);
890 if (bytes > g_debug->config().fill_on_alloc_bytes()) {
891 bytes = g_debug->config().fill_on_alloc_bytes();
892 }
893 if (bytes > prev_size) {
894 memset(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(new_pointer) + prev_size),
895 g_debug->config().fill_alloc_value(), bytes - prev_size);
896 }
897 }
898
899 if (g_debug->config().options() & RECORD_ALLOCS) {
900 g_debug->record->AddEntry(new ReallocEntry(new_pointer, bytes, pointer, result.GetStartTimeNS(),
901 result.GetEndTimeNS()));
902 }
903
904 return new_pointer;
905 }
906
debug_calloc(size_t nmemb,size_t bytes)907 void* debug_calloc(size_t nmemb, size_t bytes) {
908 Unreachable::CheckIfRequested(g_debug->config());
909
910 if (DebugCallsDisabled()) {
911 return g_dispatch->calloc(nmemb, bytes);
912 }
913 ScopedConcurrentLock lock;
914 ScopedDisableDebugCalls disable;
915 ScopedBacktraceSignalBlocker blocked;
916
917 size_t size;
918 if (__builtin_mul_overflow(nmemb, bytes, &size)) {
919 // Overflow
920 errno = ENOMEM;
921 return nullptr;
922 }
923
924 if (size == 0) {
925 size = 1;
926 }
927
928 size_t real_size;
929 if (__builtin_add_overflow(size, g_debug->extra_bytes(), &real_size)) {
930 // Overflow.
931 errno = ENOMEM;
932 return nullptr;
933 }
934
935 if (real_size > PointerInfoType::MaxSize()) {
936 errno = ENOMEM;
937 return nullptr;
938 }
939
940 void* pointer;
941 TimedResult result;
942 if (g_debug->HeaderEnabled()) {
943 // Need to guarantee the alignment of the header.
944 result = TCALL(memalign, MINIMUM_ALIGNMENT_BYTES, real_size);
945 Header* header = reinterpret_cast<Header*>(result.getValue<void*>());
946 if (header == nullptr) {
947 return nullptr;
948 }
949 memset(header, 0, g_dispatch->malloc_usable_size(header));
950 pointer = InitHeader(header, header, size);
951 } else {
952 result = TCALL(calloc, 1, real_size);
953 pointer = result.getValue<void*>();
954 }
955
956 if (g_debug->config().options() & RECORD_ALLOCS) {
957 g_debug->record->AddEntry(
958 new CallocEntry(pointer, nmemb, bytes, result.GetStartTimeNS(), result.GetEndTimeNS()));
959 }
960
961 if (pointer != nullptr && g_debug->TrackPointers()) {
962 PointerData::Add(pointer, size);
963 }
964 return pointer;
965 }
966
debug_mallinfo()967 struct mallinfo debug_mallinfo() {
968 return g_dispatch->mallinfo();
969 }
970
debug_mallopt(int param,int value)971 int debug_mallopt(int param, int value) {
972 return g_dispatch->mallopt(param, value);
973 }
974
debug_malloc_info(int options,FILE * fp)975 int debug_malloc_info(int options, FILE* fp) {
976 if (DebugCallsDisabled() || !g_debug->TrackPointers()) {
977 return g_dispatch->malloc_info(options, fp);
978 }
979
980 // Make sure any pending output is written to the file.
981 fflush(fp);
982
983 ScopedConcurrentLock lock;
984 ScopedDisableDebugCalls disable;
985 ScopedBacktraceSignalBlocker blocked;
986
987 // Avoid any issues where allocations are made that will be freed
988 // in the fclose.
989 int fd = fileno(fp);
990 MallocXmlElem root(fd, "malloc", "version=\"debug-malloc-1\"");
991 std::vector<ListInfoType> list;
992 PointerData::GetAllocList(&list);
993
994 size_t alloc_num = 0;
995 for (size_t i = 0; i < list.size(); i++) {
996 MallocXmlElem alloc(fd, "allocation", "nr=\"%zu\"", alloc_num);
997
998 size_t total = 1;
999 size_t size = list[i].size;
1000 while (i < list.size() - 1 && list[i + 1].size == size) {
1001 i++;
1002 total++;
1003 }
1004 MallocXmlElem(fd, "size").Contents("%zu", list[i].size);
1005 MallocXmlElem(fd, "total").Contents("%zu", total);
1006 alloc_num++;
1007 }
1008 return 0;
1009 }
1010
debug_aligned_alloc(size_t alignment,size_t size)1011 void* debug_aligned_alloc(size_t alignment, size_t size) {
1012 Unreachable::CheckIfRequested(g_debug->config());
1013
1014 if (DebugCallsDisabled()) {
1015 return g_dispatch->aligned_alloc(alignment, size);
1016 }
1017 if (!powerof2(alignment) || (size % alignment) != 0) {
1018 errno = EINVAL;
1019 return nullptr;
1020 }
1021 return debug_memalign(alignment, size);
1022 }
1023
debug_posix_memalign(void ** memptr,size_t alignment,size_t size)1024 int debug_posix_memalign(void** memptr, size_t alignment, size_t size) {
1025 Unreachable::CheckIfRequested(g_debug->config());
1026
1027 if (DebugCallsDisabled()) {
1028 return g_dispatch->posix_memalign(memptr, alignment, size);
1029 }
1030
1031 if (alignment < sizeof(void*) || !powerof2(alignment)) {
1032 return EINVAL;
1033 }
1034 int saved_errno = errno;
1035 *memptr = debug_memalign(alignment, size);
1036 errno = saved_errno;
1037 return (*memptr != nullptr) ? 0 : ENOMEM;
1038 }
1039
debug_malloc_iterate(uintptr_t base,size_t size,void (* callback)(uintptr_t,size_t,void *),void * arg)1040 int debug_malloc_iterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*),
1041 void* arg) {
1042 ScopedConcurrentLock lock;
1043 if (g_debug->TrackPointers()) {
1044 PointerData::IteratePointers([&callback, &arg](uintptr_t pointer) {
1045 callback(pointer, InternalMallocUsableSize(reinterpret_cast<void*>(pointer)), arg);
1046 });
1047 return 0;
1048 }
1049
1050 // An option that adds a header will add pointer tracking, so no need to
1051 // check if headers are enabled.
1052 return g_dispatch->malloc_iterate(base, size, callback, arg);
1053 }
1054
debug_malloc_disable()1055 void debug_malloc_disable() {
1056 ScopedConcurrentLock lock;
1057 g_dispatch->malloc_disable();
1058 if (g_debug->pointer) {
1059 g_debug->pointer->PrepareFork();
1060 }
1061 }
1062
debug_malloc_enable()1063 void debug_malloc_enable() {
1064 ScopedConcurrentLock lock;
1065 if (g_debug->pointer) {
1066 g_debug->pointer->PostForkParent();
1067 }
1068 g_dispatch->malloc_enable();
1069 }
1070
debug_malloc_backtrace(void * pointer,uintptr_t * frames,size_t max_frames)1071 ssize_t debug_malloc_backtrace(void* pointer, uintptr_t* frames, size_t max_frames) {
1072 if (DebugCallsDisabled() || pointer == nullptr) {
1073 return 0;
1074 }
1075 ScopedConcurrentLock lock;
1076 ScopedDisableDebugCalls disable;
1077 ScopedBacktraceSignalBlocker blocked;
1078
1079 if (!(g_debug->config().options() & BACKTRACE)) {
1080 return 0;
1081 }
1082 pointer = UntagPointer(pointer);
1083 return PointerData::GetFrames(pointer, frames, max_frames);
1084 }
1085
1086 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
debug_pvalloc(size_t bytes)1087 void* debug_pvalloc(size_t bytes) {
1088 Unreachable::CheckIfRequested(g_debug->config());
1089
1090 if (DebugCallsDisabled()) {
1091 return g_dispatch->pvalloc(bytes);
1092 }
1093
1094 size_t pagesize = getpagesize();
1095 size_t size = __BIONIC_ALIGN(bytes, pagesize);
1096 if (size < bytes) {
1097 // Overflow
1098 errno = ENOMEM;
1099 return nullptr;
1100 }
1101 return debug_memalign(pagesize, size);
1102 }
1103
debug_valloc(size_t size)1104 void* debug_valloc(size_t size) {
1105 Unreachable::CheckIfRequested(g_debug->config());
1106
1107 if (DebugCallsDisabled()) {
1108 return g_dispatch->valloc(size);
1109 }
1110 return debug_memalign(getpagesize(), size);
1111 }
1112 #endif
1113
1114 static std::mutex g_dump_lock;
1115
write_dump(int fd)1116 static void write_dump(int fd) {
1117 dprintf(fd, "Android Native Heap Dump v1.2\n\n");
1118
1119 std::string fingerprint = android::base::GetProperty("ro.build.fingerprint", "unknown");
1120 dprintf(fd, "Build fingerprint: '%s'\n\n", fingerprint.c_str());
1121
1122 PointerData::DumpLiveToFile(fd);
1123
1124 dprintf(fd, "MAPS\n");
1125 std::string content;
1126 if (!android::base::ReadFileToString("/proc/self/maps", &content)) {
1127 dprintf(fd, "Could not open /proc/self/maps\n");
1128 } else {
1129 dprintf(fd, "%s", content.c_str());
1130 }
1131 dprintf(fd, "END\n");
1132
1133 // Purge the memory that was allocated and freed during this operation
1134 // since it can be large enough to expand the RSS significantly.
1135 g_dispatch->mallopt(M_PURGE_ALL, 0);
1136 }
1137
debug_write_malloc_leak_info(FILE * fp)1138 bool debug_write_malloc_leak_info(FILE* fp) {
1139 // Make sure any pending output is written to the file.
1140 fflush(fp);
1141
1142 ScopedConcurrentLock lock;
1143 ScopedDisableDebugCalls disable;
1144 ScopedBacktraceSignalBlocker blocked;
1145
1146 std::lock_guard<std::mutex> guard(g_dump_lock);
1147
1148 if (!(g_debug->config().options() & BACKTRACE)) {
1149 return false;
1150 }
1151
1152 write_dump(fileno(fp));
1153
1154 return true;
1155 }
1156
debug_dump_heap(const char * file_name)1157 void debug_dump_heap(const char* file_name) {
1158 ScopedConcurrentLock lock;
1159 ScopedDisableDebugCalls disable;
1160 ScopedBacktraceSignalBlocker blocked;
1161
1162 std::lock_guard<std::mutex> guard(g_dump_lock);
1163
1164 int fd = open(file_name, O_RDWR | O_CREAT | O_NOFOLLOW | O_TRUNC | O_CLOEXEC, 0644);
1165 if (fd == -1) {
1166 error_log("Unable to create file: %s", file_name);
1167 return;
1168 }
1169
1170 error_log("Dumping to file: %s\n", file_name);
1171 write_dump(fd);
1172 close(fd);
1173 }
1174