1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <errno.h>
30 #include <inttypes.h>
31 #include <malloc.h>
32 #include <string.h>
33 #include <sys/cdefs.h>
34 #include <sys/param.h>
35 #include <unistd.h>
36
37 #include <vector>
38
39 #include <private/bionic_malloc_dispatch.h>
40
41 #include "backtrace.h"
42 #include "Config.h"
43 #include "DebugData.h"
44 #include "debug_disable.h"
45 #include "debug_log.h"
46 #include "malloc_debug.h"
47
48 // ------------------------------------------------------------------------
49 // Global Data
50 // ------------------------------------------------------------------------
51 DebugData* g_debug;
52
53 int* g_malloc_zygote_child;
54
55 const MallocDispatch* g_dispatch;
56 // ------------------------------------------------------------------------
57
58 // ------------------------------------------------------------------------
59 // Use C style prototypes for all exported functions. This makes it easy
60 // to do dlsym lookups during libc initialization when malloc debug
61 // is enabled.
62 // ------------------------------------------------------------------------
63 __BEGIN_DECLS
64
65 bool debug_initialize(const MallocDispatch* malloc_dispatch, int* malloc_zygote_child,
66 const char* options);
67 void debug_finalize();
68 void debug_get_malloc_leak_info(
69 uint8_t** info, size_t* overall_size, size_t* info_size, size_t* total_memory,
70 size_t* backtrace_size);
71 ssize_t debug_malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count);
72 void debug_free_malloc_leak_info(uint8_t* info);
73 size_t debug_malloc_usable_size(void* pointer);
74 void* debug_malloc(size_t size);
75 void debug_free(void* pointer);
76 void* debug_memalign(size_t alignment, size_t bytes);
77 void* debug_realloc(void* pointer, size_t bytes);
78 void* debug_calloc(size_t nmemb, size_t bytes);
79 struct mallinfo debug_mallinfo();
80 int debug_mallopt(int param, int value);
81 int debug_posix_memalign(void** memptr, size_t alignment, size_t size);
82 int debug_iterate(uintptr_t base, size_t size,
83 void (*callback)(uintptr_t base, size_t size, void* arg), void* arg);
84 void debug_malloc_disable();
85 void debug_malloc_enable();
86
87 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
88 void* debug_pvalloc(size_t bytes);
89 void* debug_valloc(size_t size);
90 #endif
91
92 __END_DECLS
93 // ------------------------------------------------------------------------
94
InitAtfork()95 static void InitAtfork() {
96 static pthread_once_t atfork_init = PTHREAD_ONCE_INIT;
97 pthread_once(&atfork_init, [](){
98 pthread_atfork(
99 [](){
100 if (g_debug != nullptr) {
101 g_debug->PrepareFork();
102 }
103 },
104 [](){
105 if (g_debug != nullptr) {
106 g_debug->PostForkParent();
107 }
108 },
109 [](){
110 if (g_debug != nullptr) {
111 g_debug->PostForkChild();
112 }
113 }
114 );
115 });
116 }
117
LogTagError(const Header * header,const void * pointer,const char * name)118 static void LogTagError(const Header* header, const void* pointer, const char* name) {
119 error_log(LOG_DIVIDER);
120 if (header->tag == DEBUG_FREE_TAG) {
121 error_log("+++ ALLOCATION %p USED AFTER FREE (%s)", pointer, name);
122 if (g_debug->config().options() & FREE_TRACK) {
123 g_debug->free_track->LogBacktrace(header);
124 }
125 } else {
126 error_log("+++ ALLOCATION %p HAS INVALID TAG %" PRIx32 " (%s)", pointer, header->tag, name);
127 }
128 error_log("Backtrace at time of failure:");
129 std::vector<uintptr_t> frames(64);
130 size_t frame_num = backtrace_get(frames.data(), frames.size());
131 frames.resize(frame_num);
132 backtrace_log(frames.data(), frames.size());
133 error_log(LOG_DIVIDER);
134 }
135
InitHeader(Header * header,void * orig_pointer,size_t size)136 static void* InitHeader(Header* header, void* orig_pointer, size_t size) {
137 header->tag = DEBUG_TAG;
138 header->orig_pointer = orig_pointer;
139 header->size = size;
140 if (*g_malloc_zygote_child) {
141 header->set_zygote();
142 }
143 header->usable_size = g_dispatch->malloc_usable_size(orig_pointer);
144 if (header->usable_size == 0) {
145 g_dispatch->free(orig_pointer);
146 return nullptr;
147 }
148 header->usable_size -= g_debug->pointer_offset() +
149 reinterpret_cast<uintptr_t>(header) - reinterpret_cast<uintptr_t>(orig_pointer);
150
151 if (g_debug->config().options() & FRONT_GUARD) {
152 uint8_t* guard = g_debug->GetFrontGuard(header);
153 memset(guard, g_debug->config().front_guard_value(), g_debug->config().front_guard_bytes());
154 }
155
156 if (g_debug->config().options() & REAR_GUARD) {
157 uint8_t* guard = g_debug->GetRearGuard(header);
158 memset(guard, g_debug->config().rear_guard_value(), g_debug->config().rear_guard_bytes());
159 // If the rear guard is enabled, set the usable size to the exact size
160 // of the allocation.
161 header->usable_size = header->real_size();
162 }
163
164 bool backtrace_found = false;
165 if (g_debug->config().options() & BACKTRACE) {
166 BacktraceHeader* back_header = g_debug->GetAllocBacktrace(header);
167 if (g_debug->backtrace->enabled()) {
168 back_header->num_frames = backtrace_get(
169 &back_header->frames[0], g_debug->config().backtrace_frames());
170 backtrace_found = back_header->num_frames > 0;
171 } else {
172 back_header->num_frames = 0;
173 }
174 }
175
176 if (g_debug->config().options() & TRACK_ALLOCS) {
177 g_debug->track->Add(header, backtrace_found);
178 }
179
180 return g_debug->GetPointer(header);
181 }
182
debug_initialize(const MallocDispatch * malloc_dispatch,int * malloc_zygote_child,const char * options)183 bool debug_initialize(const MallocDispatch* malloc_dispatch, int* malloc_zygote_child,
184 const char* options) {
185 if (malloc_zygote_child == nullptr || options == nullptr) {
186 return false;
187 }
188
189 InitAtfork();
190
191 g_malloc_zygote_child = malloc_zygote_child;
192
193 g_dispatch = malloc_dispatch;
194
195 if (!DebugDisableInitialize()) {
196 return false;
197 }
198
199 DebugData* debug = new DebugData();
200 if (!debug->Initialize(options)) {
201 delete debug;
202 DebugDisableFinalize();
203 return false;
204 }
205 g_debug = debug;
206
207 // Always enable the backtrace code since we will use it in a number
208 // of different error cases.
209 backtrace_startup();
210
211 return true;
212 }
213
debug_finalize()214 void debug_finalize() {
215 if (g_debug == nullptr) {
216 return;
217 }
218
219 if (g_debug->config().options() & FREE_TRACK) {
220 g_debug->free_track->VerifyAll();
221 }
222
223 if (g_debug->config().options() & LEAK_TRACK) {
224 g_debug->track->DisplayLeaks();
225 }
226
227 DebugDisableSet(true);
228
229 backtrace_shutdown();
230
231 delete g_debug;
232 g_debug = nullptr;
233
234 DebugDisableFinalize();
235 }
236
debug_get_malloc_leak_info(uint8_t ** info,size_t * overall_size,size_t * info_size,size_t * total_memory,size_t * backtrace_size)237 void debug_get_malloc_leak_info(uint8_t** info, size_t* overall_size,
238 size_t* info_size, size_t* total_memory, size_t* backtrace_size) {
239 ScopedDisableDebugCalls disable;
240
241 // Verify the arguments.
242 if (info == nullptr || overall_size == nullptr || info_size == NULL ||
243 total_memory == nullptr || backtrace_size == nullptr) {
244 error_log("get_malloc_leak_info: At least one invalid parameter.");
245 return;
246 }
247
248 *info = nullptr;
249 *overall_size = 0;
250 *info_size = 0;
251 *total_memory = 0;
252 *backtrace_size = 0;
253
254 if (!(g_debug->config().options() & BACKTRACE)) {
255 error_log("get_malloc_leak_info: Allocations not being tracked, to enable "
256 "set the option 'backtrace'.");
257 return;
258 }
259
260 g_debug->track->GetInfo(info, overall_size, info_size, total_memory, backtrace_size);
261 }
262
debug_free_malloc_leak_info(uint8_t * info)263 void debug_free_malloc_leak_info(uint8_t* info) {
264 g_dispatch->free(info);
265 }
266
internal_malloc_usable_size(void * pointer)267 static size_t internal_malloc_usable_size(void* pointer) {
268 if (g_debug->need_header()) {
269 Header* header = g_debug->GetHeader(pointer);
270 if (header->tag != DEBUG_TAG) {
271 LogTagError(header, pointer, "malloc_usable_size");
272 return 0;
273 }
274
275 return header->usable_size;
276 } else {
277 return g_dispatch->malloc_usable_size(pointer);
278 }
279 }
280
debug_malloc_usable_size(void * pointer)281 size_t debug_malloc_usable_size(void* pointer) {
282 if (DebugCallsDisabled() || pointer == nullptr) {
283 return g_dispatch->malloc_usable_size(pointer);
284 }
285 ScopedDisableDebugCalls disable;
286
287 return internal_malloc_usable_size(pointer);
288 }
289
internal_malloc(size_t size)290 static void *internal_malloc(size_t size) {
291 if (size == 0) {
292 size = 1;
293 }
294
295 size_t real_size = size + g_debug->extra_bytes();
296 if (real_size < size) {
297 // Overflow.
298 errno = ENOMEM;
299 return nullptr;
300 }
301
302 void* pointer;
303 if (g_debug->need_header()) {
304 if (size > Header::max_size()) {
305 errno = ENOMEM;
306 return nullptr;
307 }
308
309 Header* header = reinterpret_cast<Header*>(
310 g_dispatch->memalign(MINIMUM_ALIGNMENT_BYTES, real_size));
311 if (header == nullptr) {
312 return nullptr;
313 }
314 pointer = InitHeader(header, header, size);
315 } else {
316 pointer = g_dispatch->malloc(real_size);
317 }
318
319 if (pointer != nullptr && g_debug->config().options() & FILL_ON_ALLOC) {
320 size_t bytes = internal_malloc_usable_size(pointer);
321 size_t fill_bytes = g_debug->config().fill_on_alloc_bytes();
322 bytes = (bytes < fill_bytes) ? bytes : fill_bytes;
323 memset(pointer, g_debug->config().fill_alloc_value(), bytes);
324 }
325 return pointer;
326 }
327
debug_malloc(size_t size)328 void* debug_malloc(size_t size) {
329 if (DebugCallsDisabled()) {
330 return g_dispatch->malloc(size);
331 }
332 ScopedDisableDebugCalls disable;
333
334 void* pointer = internal_malloc(size);
335
336 if (g_debug->config().options() & RECORD_ALLOCS) {
337 g_debug->record->AddEntry(new MallocEntry(pointer, size));
338 }
339
340 return pointer;
341 }
342
internal_free(void * pointer)343 static void internal_free(void* pointer) {
344 void* free_pointer = pointer;
345 size_t bytes;
346 Header* header;
347 if (g_debug->need_header()) {
348 header = g_debug->GetHeader(pointer);
349 if (header->tag != DEBUG_TAG) {
350 LogTagError(header, pointer, "free");
351 return;
352 }
353 free_pointer = header->orig_pointer;
354
355 if (g_debug->config().options() & FRONT_GUARD) {
356 if (!g_debug->front_guard->Valid(header)) {
357 g_debug->front_guard->LogFailure(header);
358 }
359 }
360 if (g_debug->config().options() & REAR_GUARD) {
361 if (!g_debug->rear_guard->Valid(header)) {
362 g_debug->rear_guard->LogFailure(header);
363 }
364 }
365
366 if (g_debug->config().options() & TRACK_ALLOCS) {
367 bool backtrace_found = false;
368 if (g_debug->config().options() & BACKTRACE) {
369 BacktraceHeader* back_header = g_debug->GetAllocBacktrace(header);
370 backtrace_found = back_header->num_frames > 0;
371 }
372 g_debug->track->Remove(header, backtrace_found);
373 }
374 header->tag = DEBUG_FREE_TAG;
375
376 bytes = header->usable_size;
377 } else {
378 bytes = g_dispatch->malloc_usable_size(pointer);
379 }
380
381 if (g_debug->config().options() & FILL_ON_FREE) {
382 size_t fill_bytes = g_debug->config().fill_on_free_bytes();
383 bytes = (bytes < fill_bytes) ? bytes : fill_bytes;
384 memset(pointer, g_debug->config().fill_free_value(), bytes);
385 }
386
387 if (g_debug->config().options() & FREE_TRACK) {
388 // Do not add the allocation until we are done modifying the pointer
389 // itself. This avoids a race if a lot of threads are all doing
390 // frees at the same time and we wind up trying to really free this
391 // pointer from another thread, while still trying to free it in
392 // this function.
393 g_debug->free_track->Add(header);
394 } else {
395 g_dispatch->free(free_pointer);
396 }
397 }
398
debug_free(void * pointer)399 void debug_free(void* pointer) {
400 if (DebugCallsDisabled() || pointer == nullptr) {
401 return g_dispatch->free(pointer);
402 }
403 ScopedDisableDebugCalls disable;
404
405 if (g_debug->config().options() & RECORD_ALLOCS) {
406 g_debug->record->AddEntry(new FreeEntry(pointer));
407 }
408
409 internal_free(pointer);
410 }
411
debug_memalign(size_t alignment,size_t bytes)412 void* debug_memalign(size_t alignment, size_t bytes) {
413 if (DebugCallsDisabled()) {
414 return g_dispatch->memalign(alignment, bytes);
415 }
416 ScopedDisableDebugCalls disable;
417
418 if (bytes == 0) {
419 bytes = 1;
420 }
421
422 void* pointer;
423 if (g_debug->need_header()) {
424 if (bytes > Header::max_size()) {
425 errno = ENOMEM;
426 return nullptr;
427 }
428
429 // Make the alignment a power of two.
430 if (!powerof2(alignment)) {
431 alignment = BIONIC_ROUND_UP_POWER_OF_2(alignment);
432 }
433 // Force the alignment to at least MINIMUM_ALIGNMENT_BYTES to guarantee
434 // that the header is aligned properly.
435 if (alignment < MINIMUM_ALIGNMENT_BYTES) {
436 alignment = MINIMUM_ALIGNMENT_BYTES;
437 }
438
439 // We don't have any idea what the natural alignment of
440 // the underlying native allocator is, so we always need to
441 // over allocate.
442 size_t real_size = alignment + bytes + g_debug->extra_bytes();
443 if (real_size < bytes) {
444 // Overflow.
445 errno = ENOMEM;
446 return nullptr;
447 }
448
449 pointer = g_dispatch->malloc(real_size);
450 if (pointer == nullptr) {
451 return nullptr;
452 }
453
454 uintptr_t value = reinterpret_cast<uintptr_t>(pointer) + g_debug->pointer_offset();
455 // Now align the pointer.
456 value += (-value % alignment);
457
458 Header* header = g_debug->GetHeader(reinterpret_cast<void*>(value));
459 pointer = InitHeader(header, pointer, bytes);
460 } else {
461 size_t real_size = bytes + g_debug->extra_bytes();
462 if (real_size < bytes) {
463 // Overflow.
464 errno = ENOMEM;
465 return nullptr;
466 }
467 pointer = g_dispatch->memalign(alignment, real_size);
468 }
469
470 if (pointer != nullptr && g_debug->config().options() & FILL_ON_ALLOC) {
471 size_t bytes = internal_malloc_usable_size(pointer);
472 size_t fill_bytes = g_debug->config().fill_on_alloc_bytes();
473 bytes = (bytes < fill_bytes) ? bytes : fill_bytes;
474 memset(pointer, g_debug->config().fill_alloc_value(), bytes);
475 }
476
477 if (g_debug->config().options() & RECORD_ALLOCS) {
478 g_debug->record->AddEntry(new MemalignEntry(pointer, bytes, alignment));
479 }
480
481 return pointer;
482 }
483
debug_realloc(void * pointer,size_t bytes)484 void* debug_realloc(void* pointer, size_t bytes) {
485 if (DebugCallsDisabled()) {
486 return g_dispatch->realloc(pointer, bytes);
487 }
488 ScopedDisableDebugCalls disable;
489
490 if (pointer == nullptr) {
491 pointer = internal_malloc(bytes);
492 if (g_debug->config().options() & RECORD_ALLOCS) {
493 g_debug->record->AddEntry(new ReallocEntry(pointer, bytes, nullptr));
494 }
495 return pointer;
496 }
497
498 if (bytes == 0) {
499 if (g_debug->config().options() & RECORD_ALLOCS) {
500 g_debug->record->AddEntry(new ReallocEntry(nullptr, bytes, pointer));
501 }
502
503 internal_free(pointer);
504 return nullptr;
505 }
506
507 size_t real_size = bytes;
508 if (g_debug->config().options() & EXPAND_ALLOC) {
509 real_size += g_debug->config().expand_alloc_bytes();
510 if (real_size < bytes) {
511 // Overflow.
512 errno = ENOMEM;
513 return nullptr;
514 }
515 }
516
517 void* new_pointer;
518 size_t prev_size;
519 if (g_debug->need_header()) {
520 if (bytes > Header::max_size()) {
521 errno = ENOMEM;
522 return nullptr;
523 }
524
525 Header* header = g_debug->GetHeader(pointer);
526 if (header->tag != DEBUG_TAG) {
527 LogTagError(header, pointer, "realloc");
528 return nullptr;
529 }
530
531 // Same size, do nothing.
532 if (real_size == header->real_size()) {
533 // Do not bother recording, this is essentially a nop.
534 return pointer;
535 }
536
537 // Allocation is shrinking.
538 if (real_size < header->usable_size) {
539 header->size = real_size;
540 if (*g_malloc_zygote_child) {
541 header->set_zygote();
542 }
543 if (g_debug->config().options() & REAR_GUARD) {
544 // Don't bother allocating a smaller pointer in this case, simply
545 // change the header usable_size and reset the rear guard.
546 header->usable_size = header->real_size();
547 memset(g_debug->GetRearGuard(header), g_debug->config().rear_guard_value(),
548 g_debug->config().rear_guard_bytes());
549 }
550 // Do not bother recording, this is essentially a nop.
551 return pointer;
552 }
553
554 // Allocate the new size.
555 new_pointer = internal_malloc(bytes);
556 if (new_pointer == nullptr) {
557 errno = ENOMEM;
558 return nullptr;
559 }
560
561 prev_size = header->usable_size;
562 memcpy(new_pointer, pointer, prev_size);
563 internal_free(pointer);
564 } else {
565 prev_size = g_dispatch->malloc_usable_size(pointer);
566 new_pointer = g_dispatch->realloc(pointer, real_size);
567 if (new_pointer == nullptr) {
568 return nullptr;
569 }
570 }
571
572 if (g_debug->config().options() & FILL_ON_ALLOC) {
573 size_t bytes = internal_malloc_usable_size(new_pointer);
574 if (bytes > g_debug->config().fill_on_alloc_bytes()) {
575 bytes = g_debug->config().fill_on_alloc_bytes();
576 }
577 if (bytes > prev_size) {
578 memset(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(new_pointer) + prev_size),
579 g_debug->config().fill_alloc_value(), bytes - prev_size);
580 }
581 }
582
583 if (g_debug->config().options() & RECORD_ALLOCS) {
584 g_debug->record->AddEntry(new ReallocEntry(new_pointer, bytes, pointer));
585 }
586
587 return new_pointer;
588 }
589
debug_calloc(size_t nmemb,size_t bytes)590 void* debug_calloc(size_t nmemb, size_t bytes) {
591 if (DebugCallsDisabled()) {
592 return g_dispatch->calloc(nmemb, bytes);
593 }
594 ScopedDisableDebugCalls disable;
595
596 size_t size;
597 if (__builtin_mul_overflow(nmemb, bytes, &size)) {
598 // Overflow
599 errno = ENOMEM;
600 return nullptr;
601 }
602
603 if (size == 0) {
604 size = 1;
605 }
606
607 size_t real_size;
608 if (__builtin_add_overflow(size, g_debug->extra_bytes(), &real_size)) {
609 // Overflow.
610 errno = ENOMEM;
611 return nullptr;
612 }
613
614 void* pointer;
615 if (g_debug->need_header()) {
616 // The above check will guarantee the multiply will not overflow.
617 if (size > Header::max_size()) {
618 errno = ENOMEM;
619 return nullptr;
620 }
621
622 // Need to guarantee the alignment of the header.
623 Header* header = reinterpret_cast<Header*>(
624 g_dispatch->memalign(MINIMUM_ALIGNMENT_BYTES, real_size));
625 if (header == nullptr) {
626 return nullptr;
627 }
628 memset(header, 0, g_dispatch->malloc_usable_size(header));
629 pointer = InitHeader(header, header, size);
630 } else {
631 pointer = g_dispatch->calloc(1, real_size);
632 }
633 if (g_debug->config().options() & RECORD_ALLOCS) {
634 g_debug->record->AddEntry(new CallocEntry(pointer, bytes, nmemb));
635 }
636 return pointer;
637 }
638
debug_mallinfo()639 struct mallinfo debug_mallinfo() {
640 return g_dispatch->mallinfo();
641 }
642
debug_mallopt(int param,int value)643 int debug_mallopt(int param, int value) {
644 return g_dispatch->mallopt(param, value);
645 }
646
debug_posix_memalign(void ** memptr,size_t alignment,size_t size)647 int debug_posix_memalign(void** memptr, size_t alignment, size_t size) {
648 if (DebugCallsDisabled()) {
649 return g_dispatch->posix_memalign(memptr, alignment, size);
650 }
651
652 if (!powerof2(alignment)) {
653 return EINVAL;
654 }
655 int saved_errno = errno;
656 *memptr = debug_memalign(alignment, size);
657 errno = saved_errno;
658 return (*memptr != nullptr) ? 0 : ENOMEM;
659 }
660
debug_iterate(uintptr_t base,size_t size,void (* callback)(uintptr_t base,size_t size,void * arg),void * arg)661 int debug_iterate(uintptr_t base, size_t size,
662 void (*callback)(uintptr_t base, size_t size, void* arg), void* arg) {
663 // Can't allocate, malloc is disabled
664 // Manual capture of the arguments to pass to the lambda below as void* arg
665 struct iterate_ctx {
666 decltype(callback) callback;
667 decltype(arg) arg;
668 } ctx = { callback, arg };
669
670 return g_dispatch->iterate(base, size,
671 [](uintptr_t base, size_t size, void* arg) {
672 const iterate_ctx* ctx = reinterpret_cast<iterate_ctx*>(arg);
673 const void* pointer = reinterpret_cast<void*>(base);
674 if (g_debug->need_header()) {
675 const Header* header = reinterpret_cast<const Header*>(pointer);
676 if (g_debug->config().options() & TRACK_ALLOCS) {
677 if (g_debug->track->Contains(header)) {
678 // Return just the body of the allocation if we're sure the header exists
679 ctx->callback(reinterpret_cast<uintptr_t>(g_debug->GetPointer(header)),
680 header->usable_size, ctx->arg);
681 return;
682 }
683 }
684 }
685 // Fall back to returning the whole allocation
686 ctx->callback(base, size, ctx->arg);
687 }, &ctx);
688 }
689
debug_malloc_disable()690 void debug_malloc_disable() {
691 g_dispatch->malloc_disable();
692 if (g_debug->track) {
693 g_debug->track->PrepareFork();
694 }
695 }
696
debug_malloc_enable()697 void debug_malloc_enable() {
698 if (g_debug->track) {
699 g_debug->track->PostForkParent();
700 }
701 g_dispatch->malloc_enable();
702 }
703
debug_malloc_backtrace(void * pointer,uintptr_t * frames,size_t frame_count)704 ssize_t debug_malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count) {
705 if (DebugCallsDisabled() || pointer == nullptr) {
706 return 0;
707 }
708 ScopedDisableDebugCalls disable;
709
710 if (g_debug->need_header()) {
711 Header* header;
712 if (g_debug->config().options() & TRACK_ALLOCS) {
713 header = g_debug->GetHeader(pointer);
714 if (!g_debug->track->Contains(header)) {
715 return 0;
716 }
717 } else {
718 header = reinterpret_cast<Header*>(pointer);
719 }
720 if (header->tag != DEBUG_TAG) {
721 return 0;
722 }
723 if (g_debug->config().options() & BACKTRACE) {
724 BacktraceHeader* back_header = g_debug->GetAllocBacktrace(header);
725 if (back_header->num_frames > 0) {
726 if (frame_count > back_header->num_frames) {
727 frame_count = back_header->num_frames;
728 }
729 memcpy(frames, &back_header->frames[0], frame_count * sizeof(uintptr_t));
730 return frame_count;
731 }
732 }
733 }
734
735 return 0;
736 }
737
738 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
debug_pvalloc(size_t bytes)739 void* debug_pvalloc(size_t bytes) {
740 if (DebugCallsDisabled()) {
741 return g_dispatch->pvalloc(bytes);
742 }
743
744 size_t pagesize = getpagesize();
745 size_t size = BIONIC_ALIGN(bytes, pagesize);
746 if (size < bytes) {
747 // Overflow
748 errno = ENOMEM;
749 return nullptr;
750 }
751 return debug_memalign(pagesize, size);
752 }
753
debug_valloc(size_t size)754 void* debug_valloc(size_t size) {
755 if (DebugCallsDisabled()) {
756 return g_dispatch->valloc(size);
757 }
758 return debug_memalign(getpagesize(), size);
759 }
760 #endif
761