• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define _GNU_SOURCE
2 #include <stdlib.h>
3 #include <limits.h>
4 #include <stdint.h>
5 #include <errno.h>
6 #include <sys/mman.h>
7 #include <sys/prctl.h>
8 #include "libc.h"
9 #include "atomic.h"
10 #include "pthread_impl.h"
11 #include "malloc_impl.h"
12 #include "malloc_random.h"
13 
14 #ifdef USE_JEMALLOC
15 #include <malloc.h>
16 extern void* je_malloc(size_t size);
17 extern void* je_calloc(size_t count, size_t size);
18 extern void* je_realloc(void* p, size_t newsize);
19 extern void je_free(void* p);
20 #ifdef USE_JEMALLOC_DFX_INTF
21 extern void je_malloc_disable();
22 extern void je_malloc_enable();
23 extern int je_iterate(uintptr_t base, size_t size,
24 	void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg);
25 extern int je_mallopt(int param, int value);
26 #endif
27 #endif
28 
29 #if defined(__GNUC__) && defined(__PIC__)
30 #define inline inline __attribute__((always_inline))
31 #endif
32 
33 #ifdef HOOK_ENABLE
34 void *__libc_malloc(size_t);
35 void __libc_free(void *p);
36 #endif
37 
38 static struct {
39 	volatile uint64_t binmap;
40 	struct bin bins[BINS_COUNT];
41 	volatile int free_lock[2];
42 #ifdef MUSL_ITERATE_AND_STATS_API
43 	occupied_bin_t occupied_bins[OCCUPIED_BIN_COUNT];
44 #endif
45 #ifdef MALLOC_FREELIST_QUARANTINE
46 	struct bin quarantine[QUARANTINE_NUM];
47 	size_t quarantined_count[QUARANTINE_NUM];
48 	size_t quarantined_size[QUARANTINE_NUM];
49 #ifdef MALLOC_RED_ZONE
50 	char poison[BINS_COUNT];
51 	volatile int poison_lock[2];
52 	int poison_count_down;
53 #endif
54 #endif
55 } mal;
56 
57 int __malloc_replaced;
58 
59 #ifdef MUSL_ITERATE_AND_STATS_API
60 
61 /* Usable memory only, excluding overhead for chunks */
62 size_t total_heap_space = 0;
63 volatile int total_heap_space_inc_lock[2];
64 
65 
__get_total_heap_space(void)66 size_t __get_total_heap_space(void) {
67 	return total_heap_space;
68 }
69 
occupied_bin_destructor(void * occupied_bin)70 static void occupied_bin_destructor(void *occupied_bin)
71 {
72 	internal_free(occupied_bin);
73 }
74 
__get_occupied_bin_by_idx(size_t bin_index)75 occupied_bin_t *__get_occupied_bin_by_idx(size_t bin_index)
76 {
77 	return &mal.occupied_bins[bin_index];
78 }
79 
get_occupied_bin_index(int thread_id)80 static inline size_t get_occupied_bin_index(int thread_id)
81 {
82 	return (size_t) ((size_t)thread_id % OCCUPIED_BIN_COUNT);
83 }
84 
__get_occupied_bin(struct chunk * c)85 occupied_bin_t *__get_occupied_bin(struct chunk *c)
86 {
87 	size_t bin_index = get_occupied_bin_index(c->thread_id);
88 	return __get_occupied_bin_by_idx(bin_index);
89 }
90 
__get_current_occupied_bin()91 occupied_bin_t *__get_current_occupied_bin()
92 {
93 	size_t bin_index = get_occupied_bin_index(__pthread_self()->tid);
94 	return &mal.occupied_bins[bin_index];
95 }
96 #endif
97 
98 /* Synchronization tools */
99 
encode_chunk(struct chunk * ptr,void * key)100 static inline struct chunk *encode_chunk(struct chunk *ptr, void *key)
101 {
102 #ifndef MALLOC_FREELIST_HARDENED
103 	(void)key;
104 	return ptr;
105 #else
106 	return (struct chunk *)encode_ptr(ptr, key);
107 #endif
108 }
109 
lock(volatile int * lk)110 static inline void lock(volatile int *lk)
111 {
112 	if (libc.threads_minus_1)
113 		while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
114 }
115 
unlock(volatile int * lk)116 static inline void unlock(volatile int *lk)
117 {
118 	if (lk[0]) {
119 		a_store(lk, 0);
120 		if (lk[1]) __wake(lk, 1, 1);
121 	}
122 }
123 
124 #ifdef MUSL_ITERATE_AND_STATS_API
__push_chunk(struct chunk * c)125 void __push_chunk(struct chunk *c)
126 {
127 	c->prev_occupied = c->next_occupied = NULL;
128 	c->thread_id = 0;
129 
130 	occupied_bin_t *occupied_bin = __get_current_occupied_bin();
131 	lock(occupied_bin->lock);
132 
133 	if (occupied_bin->head != NULL) {
134 		occupied_bin->head->prev_occupied = c;
135 		c->next_occupied = occupied_bin->head;
136 	} else {
137 		occupied_bin->tail = c;
138 	}
139 	occupied_bin->head = c;
140 	c->thread_id = __pthread_self()->tid;
141 
142 	unlock(occupied_bin->lock);
143 }
144 
__pop_chunk(struct chunk * c)145 void __pop_chunk(struct chunk *c)
146 {
147 	if (!c->thread_id) {
148 		return;
149 	}
150 	occupied_bin_t *occupied_bin = __get_occupied_bin(c);
151 	lock(occupied_bin->lock);
152 
153 	if (c == occupied_bin->head) {
154 		occupied_bin->head = c->next_occupied;
155 	} else {
156 		c->prev_occupied->next_occupied = c->next_occupied;
157 	}
158 	if (c == occupied_bin->tail) {
159 		occupied_bin->tail = c->prev_occupied;
160 	} else {
161 		c->next_occupied->prev_occupied = c->prev_occupied;
162 	}
163 	unlock(occupied_bin->lock);
164 }
165 #endif
166 
malloc_disable(void)167 void malloc_disable(void)
168 {
169 #ifdef USE_JEMALLOC_DFX_INTF
170 	je_malloc_disable();
171 #elif defined(MUSL_ITERATE_AND_STATS_API)
172 	lock(mal.free_lock);
173 	lock(total_heap_space_inc_lock);
174 	for (size_t i = 0; i < BINS_COUNT; ++i) {
175 		lock(mal.bins[i].lock);
176 	}
177 	for (size_t i = 0; i < OCCUPIED_BIN_COUNT; ++i) {
178 		lock(mal.occupied_bins[i].lock);
179 	}
180 #endif
181 }
182 
malloc_enable(void)183 void malloc_enable(void)
184 {
185 #ifdef USE_JEMALLOC_DFX_INTF
186 	je_malloc_enable();
187 #elif defined(MUSL_ITERATE_AND_STATS_API)
188 	for (size_t i = 0; i < OCCUPIED_BIN_COUNT; ++i) {
189 		unlock(mal.occupied_bins[i].lock);
190 	}
191 	for (size_t i = 0; i < BINS_COUNT; ++i) {
192 		unlock(mal.bins[i].lock);
193 	}
194 	unlock(total_heap_space_inc_lock);
195 	unlock(mal.free_lock);
196 #endif
197 }
198 
199 #ifdef MUSL_ITERATE_AND_STATS_API
200 typedef struct iterate_info_s {
201 	uintptr_t start_ptr;
202 	uintptr_t end_ptr;
203 	malloc_iterate_callback callback;
204 	void *arg;
205 } iterate_info_t;
206 
malloc_iterate_visitor(void * block,size_t block_size,void * arg)207 static void malloc_iterate_visitor(void *block, size_t block_size, void *arg)
208 {
209 	iterate_info_t *iterate_info = (iterate_info_t *)arg;
210 	if ((uintptr_t)block >= iterate_info->start_ptr && (uintptr_t)block < iterate_info->end_ptr) {
211 		iterate_info->callback(block, block_size, iterate_info->arg);
212 	}
213 }
214 
malloc_iterate_occupied_bin(occupied_bin_t * occupied_bin,iterate_info_t * iterate_info)215 static void malloc_iterate_occupied_bin(occupied_bin_t *occupied_bin, iterate_info_t *iterate_info)
216 {
217 	for (struct chunk *c = occupied_bin->head; c != NULL; c = c->next_occupied) {
218 		malloc_iterate_visitor(CHUNK_TO_MEM(c), CHUNK_SIZE(c) - OVERHEAD, iterate_info);
219 	}
220 }
221 #endif
222 
malloc_iterate(void * base,size_t size,void (* callback)(void * base,size_t size,void * arg),void * arg)223 int malloc_iterate(void* base, size_t size, void (*callback)(void* base, size_t size, void* arg), void* arg)
224 {
225 #ifdef USE_JEMALLOC_DFX_INTF
226 	return je_iterate(base, size, callback, arg);
227 #elif defined(MUSL_ITERATE_AND_STATS_API)
228 	uintptr_t ptr = (uintptr_t)base;
229 	uintptr_t end_ptr = ptr + size;
230 	iterate_info_t iterate_info = {ptr, end_ptr, callback, arg};
231 
232 	for (size_t i = 0; i < OCCUPIED_BIN_COUNT; ++i) {
233 		occupied_bin_t *occupied_bin = &mal.occupied_bins[i];
234 		malloc_iterate_occupied_bin(occupied_bin, &iterate_info);
235 	}
236 
237 #endif
238 	return 0;
239 }
240 
lock_bin(int i)241 static inline void lock_bin(int i)
242 {
243 	lock(mal.bins[i].lock);
244 	if (!mal.bins[i].head) {
245 #ifdef MALLOC_FREELIST_HARDENED
246 		mal.bins[i].key = next_key();
247 		mal.bins[i].head = mal.bins[i].tail = encode_chunk(BIN_TO_CHUNK(i), mal.bins[i].key);
248 #else
249 		mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i);
250 #endif
251 	}
252 }
253 
unlock_bin(int i)254 static inline void unlock_bin(int i)
255 {
256 	unlock(mal.bins[i].lock);
257 }
258 
259 #ifdef MALLOC_FREELIST_QUARANTINE
lock_quarantine(int i)260 static inline void lock_quarantine(int i)
261 {
262 	lock(mal.quarantine[i].lock);
263 	if (!mal.quarantine[i].head) {
264 		mal.quarantine[i].key = next_key();
265 		mal.quarantine[i].head = mal.quarantine[i].tail = encode_chunk(QUARANTINE_TO_CHUNK(i), mal.quarantine[i].key);
266 	}
267 }
268 
unlock_quarantine(int i)269 static inline void unlock_quarantine(int i)
270 {
271 	unlock(mal.quarantine[i].lock);
272 }
273 #endif
274 
first_set(uint64_t x)275 static int first_set(uint64_t x)
276 {
277 #if 1
278 	return a_ctz_64(x);
279 #else
280 	static const char debruijn64[64] = {
281 		0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
282 		62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
283 		63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
284 		51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12
285 	};
286 	static const char debruijn32[32] = {
287 		0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
288 		31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
289 	};
290 	if (sizeof(long) < 8) {
291 		uint32_t y = x;
292 		if (!y) {
293 			y = x>>32;
294 			return 32 + debruijn32[(y&-y)*0x076be629 >> 27];
295 		}
296 		return debruijn32[(y&-y)*0x076be629 >> 27];
297 	}
298 	return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58];
299 #endif
300 }
301 
302 static const unsigned char bin_tab[60] = {
303 	            32,33,34,35,36,36,37,37,38,38,39,39,
304 	40,40,40,40,41,41,41,41,42,42,42,42,43,43,43,43,
305 	44,44,44,44,44,44,44,44,45,45,45,45,45,45,45,45,
306 	46,46,46,46,46,46,46,46,47,47,47,47,47,47,47,47,
307 };
308 
bin_index(size_t x)309 static int bin_index(size_t x)
310 {
311 	x = x / SIZE_ALIGN - 1;
312 	if (x <= 32) return x;
313 	if (x < 512) return bin_tab[x/8-4];
314 	if (x > 0x1c00) return 63;
315 	return bin_tab[x/128-4] + 16;
316 }
317 
bin_index_up(size_t x)318 static int bin_index_up(size_t x)
319 {
320 	x = x / SIZE_ALIGN - 1;
321 	if (x <= 32) return x;
322 	x--;
323 	if (x < 512) return bin_tab[x/8-4] + 1;
324 	return bin_tab[x/128-4] + 17;
325 }
326 
327 #ifdef MALLOC_RED_ZONE
chunk_checksum_calculate(struct chunk * c)328 static inline size_t chunk_checksum_calculate(struct chunk *c)
329 {
330 	return (((size_t)c) ^ c->csize ^ c->usize ^ (c->state & M_STATE_MASK)) << M_CHECKSUM_SHIFT;
331 }
332 
chunk_checksum_set(struct chunk * c)333 void chunk_checksum_set(struct chunk *c)
334 {
335 	c->state = (c->state & M_STATE_MASK) | chunk_checksum_calculate(c);
336 }
337 
chunk_checksum_check(struct chunk * c)338 int chunk_checksum_check(struct chunk *c)
339 {
340 	return (c->state & ~M_STATE_MASK) ^ chunk_checksum_calculate(c);
341 }
342 
get_poison(int i)343 static inline char get_poison(int i)
344 {
345 	char poison = 0;
346 	lock(mal.poison_lock);
347 	if (!mal.poison[i]) {
348 		mal.poison[i] = (char)(uintptr_t)next_key();
349 	}
350 	poison = mal.poison[i];
351 	unlock(mal.poison_lock);
352 	return poison;
353 }
354 
need_poison(void)355 static inline int need_poison(void)
356 {
357 	int ret = 0;
358 	lock(mal.poison_lock);
359 	if (mal.poison_count_down == 0) {
360 		/* Make sure the period is POISON_COUNT_DOWN_BASE */
361 		mal.poison_count_down = POISON_PERIOD - 1;
362 		ret = 1;
363 	} else {
364 		--mal.poison_count_down;
365 	}
366 	unlock(mal.poison_lock);
367 	return ret;
368 }
369 
chunk_poison_set(struct chunk * c)370 static inline void chunk_poison_set(struct chunk *c)
371 {
372 	char * start = ((char *)CHUNK_TO_MEM(c)) + c->usize;
373 	size_t size = CHUNK_SIZE(c) - OVERHEAD - c->usize;
374 	char val = get_poison(bin_index(CHUNK_SIZE(c)));
375 	memset(start, val, size);
376 	c->state |= M_RZ_POISON;
377 }
378 
chunk_poison_check(struct chunk * c)379 void chunk_poison_check(struct chunk *c)
380 {
381 	size_t csize = CHUNK_SIZE(c);
382 	char poison = get_poison(bin_index(csize));
383 	size_t padding_size = csize - OVERHEAD - c->usize;
384 	char *start = (char *)c + OVERHEAD + c->usize;
385 	for (size_t i = 0; i < padding_size; ++i) {
386 		/* Poison not right, crash */
387 		if (start[i] != poison) {
388 			a_crash();
389 		}
390 	}
391 }
392 #endif
393 
394 #if 0
395 void __dump_heap(int x)
396 {
397 	struct chunk *c;
398 	int i;
399 	for (c = (void *)mal.heap; CHUNK_SIZE(c); c = NEXT_CHUNK(c))
400 		fprintf(stderr, "base %p size %zu (%d) flags %d/%d\n",
401 			c, CHUNK_SIZE(c), bin_index(CHUNK_SIZE(c)),
402 			c->csize & 15,
403 			NEXT_CHUNK(c)->psize & 15);
404 	for (i=0; i<BINS_COUNT; i++) {
405 		if (mal.bins[i].head != BIN_TO_CHUNK(i) && mal.bins[i].head) {
406 			fprintf(stderr, "bin %d: %p\n", i, mal.bins[i].head);
407 			if (!(mal.binmap & 1ULL<<i))
408 				fprintf(stderr, "missing from binmap!\n");
409 		} else if (mal.binmap & 1ULL<<i)
410 			fprintf(stderr, "binmap wrongly contains %d!\n", i);
411 	}
412 }
413 #endif
414 
expand_heap(size_t n)415 static struct chunk *expand_heap(size_t n)
416 {
417 	static int heap_lock[2];
418 	static void *end;
419 	void *p;
420 	struct chunk *w;
421 
422 	/* The argument n already accounts for the caller's chunk
423 	 * overhead needs, but if the heap can't be extended in-place,
424 	 * we need room for an extra zero-sized sentinel chunk. */
425 	n += SIZE_ALIGN;
426 
427 	lock(heap_lock);
428 
429 #ifdef MUSL_ITERATE_AND_STATS_API
430 	lock(total_heap_space_inc_lock);
431 #endif
432 
433 	p = __expand_heap(&n);
434 	if (!p) {
435 #ifdef MUSL_ITERATE_AND_STATS_API
436 		unlock(total_heap_space_inc_lock);
437 #endif
438 		unlock(heap_lock);
439 		return 0;
440 	}
441 
442 	/* If not just expanding existing space, we need to make a
443 	 * new sentinel chunk below the allocated space. */
444 	if (p != end) {
445 		/* Valid/safe because of the prologue increment. */
446 		n -= SIZE_ALIGN;
447 		p = (char *)p + SIZE_ALIGN;
448 		w = MEM_TO_CHUNK(p);
449 		w->psize = 0 | C_INUSE;
450 	}
451 
452 	/* Record new heap end and fill in footer. */
453 	end = (char *)p + n;
454 	w = MEM_TO_CHUNK(end);
455 	w->psize = n | C_INUSE;
456 	w->csize = 0 | C_INUSE;
457 
458 	/* Fill in header, which may be new or may be replacing a
459 	 * zero-size sentinel header at the old end-of-heap. */
460 	w = MEM_TO_CHUNK(p);
461 	w->csize = n | C_INUSE;
462 #ifdef MALLOC_RED_ZONE
463 	w->state = M_STATE_BRK;
464 	w->usize = POINTER_USAGE;
465 	chunk_checksum_set(w);
466 #endif
467 
468 #ifdef MUSL_ITERATE_AND_STATS_API
469 	total_heap_space += n - OVERHEAD;
470 	unlock(total_heap_space_inc_lock);
471 #endif
472 
473 	unlock(heap_lock);
474 
475 	return w;
476 }
477 
adjust_size(size_t * n)478 static int adjust_size(size_t *n)
479 {
480 	/* Result of pointer difference must fit in ptrdiff_t. */
481 	if (*n-1 > PTRDIFF_MAX - SIZE_ALIGN - PAGE_SIZE) {
482 		if (*n) {
483 			errno = ENOMEM;
484 			return -1;
485 		} else {
486 			*n = SIZE_ALIGN;
487 			return 0;
488 		}
489 	}
490 #ifdef MALLOC_RED_ZONE
491 	/*
492 	 * *n + OVERHEAD + SIZE_ALIGN + 1 - 1
493 	 * to make sure a least 1 byte for red zone
494 	 */
495 	*n = (*n + OVERHEAD + SIZE_ALIGN) & SIZE_MASK;
496 #else
497 	*n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK;
498 #endif
499 	return 0;
500 }
501 
unbin(struct chunk * c,int i)502 static void unbin(struct chunk *c, int i)
503 {
504 #ifdef MALLOC_FREELIST_HARDENED
505 	void *key = mal.bins[i].key;
506 #else
507 	void *key = NULL;
508 #endif
509 	struct chunk *next = encode_chunk(c->next, key);
510 	struct chunk *prev = encode_chunk(c->prev, key);
511 	if (prev->next != encode_chunk(c, key) ||
512 		next->prev != encode_chunk(c, key)) {
513 		/* crash when the double-link list is corrupt */
514 		a_crash();
515 	}
516 	if (c->prev == c->next)
517 		a_and_64(&mal.binmap, ~(1ULL<<i));
518 	prev->next = c->next;
519 	next->prev = c->prev;
520 	c->csize |= C_INUSE;
521 	NEXT_CHUNK(c)->psize |= C_INUSE;
522 }
523 
alloc_fwd(struct chunk * c)524 static int alloc_fwd(struct chunk *c)
525 {
526 	int i;
527 	size_t k;
528 	while (!((k=c->csize) & C_INUSE)) {
529 		i = bin_index(k);
530 		lock_bin(i);
531 		if (c->csize == k) {
532 			unbin(c, i);
533 			unlock_bin(i);
534 			return 1;
535 		}
536 		unlock_bin(i);
537 	}
538 	return 0;
539 }
540 
alloc_rev(struct chunk * c)541 static int alloc_rev(struct chunk *c)
542 {
543 	int i;
544 	size_t k;
545 	while (!((k=c->psize) & C_INUSE)) {
546 		i = bin_index(k);
547 		lock_bin(i);
548 		if (c->psize == k) {
549 			unbin(PREV_CHUNK(c), i);
550 			unlock_bin(i);
551 			return 1;
552 		}
553 		unlock_bin(i);
554 	}
555 	return 0;
556 }
557 
558 
559 /* pretrim - trims a chunk _prior_ to removing it from its bin.
560  * Must be called with i as the ideal bin for size n, j the bin
561  * for the _free_ chunk self, and bin j locked. */
pretrim(struct chunk * self,size_t n,int i,int j)562 static int pretrim(struct chunk *self, size_t n, int i, int j)
563 {
564 	size_t n1;
565 	struct chunk *next, *split;
566 
567 	/* We cannot pretrim if it would require re-binning. */
568 	if (j < 40) return 0;
569 	if (j < i+3) {
570 		if (j != 63) return 0;
571 		n1 = CHUNK_SIZE(self);
572 		if (n1-n <= MMAP_THRESHOLD) return 0;
573 	} else {
574 		n1 = CHUNK_SIZE(self);
575 	}
576 	if (bin_index(n1-n) != j) return 0;
577 
578 	next = NEXT_CHUNK(self);
579 	split = (void *)((char *)self + n);
580 
581 #ifdef MALLOC_FREELIST_HARDENED
582 	void *key = mal.bins[j].key;
583 #else
584 	void *key = NULL;
585 #endif
586 	struct chunk *realprev = encode_chunk(self->prev, key);
587 	struct chunk *realnext = encode_chunk(self->next, key);
588 	split->prev = self->prev;
589 	split->next = self->next;
590 	realprev->next = encode_chunk(split, key);
591 	realnext->prev = encode_chunk(split, key);
592 	split->psize = n | C_INUSE;
593 	split->csize = n1-n;
594 	next->psize = n1-n;
595 	self->csize = n | C_INUSE;
596 #ifdef MALLOC_RED_ZONE
597 	/* split poison state keep up to self for less poison operations */
598 	self->state &= ~M_RZ_POISON;
599 	split->state = M_STATE_BRK;
600 	split->usize = POINTER_USAGE;
601 	chunk_checksum_set(split);
602 #endif
603 	return 1;
604 }
605 
trim(struct chunk * self,size_t n)606 static void trim(struct chunk *self, size_t n)
607 {
608 	size_t n1 = CHUNK_SIZE(self);
609 	struct chunk *next, *split;
610 
611 	if (n >= n1 - DONTCARE) return;
612 
613 	next = NEXT_CHUNK(self);
614 	split = (void *)((char *)self + n);
615 
616 	split->psize = n | C_INUSE;
617 	split->csize = n1-n | C_INUSE;
618 	next->psize = n1-n | C_INUSE;
619 	self->csize = n | C_INUSE;
620 #ifdef MALLOC_RED_ZONE
621 	/* Remove the poison tag, because of triming chunk */
622 	split->state = M_STATE_BRK;
623 	split->usize = POINTER_USAGE;
624 	chunk_checksum_set(split);
625 #endif
626 
627 	__bin_chunk(split);
628 }
629 
630 #ifdef HOOK_ENABLE
__libc_malloc(size_t n)631 void *__libc_malloc(size_t n)
632 #else
633 void *malloc(size_t n)
634 #endif
635 {
636 #ifdef USE_JEMALLOC
637 	return je_malloc(n);
638 #endif
639 	return internal_malloc(n);
640 }
641 
internal_malloc(size_t n)642 void *internal_malloc(size_t n)
643 {
644 	struct chunk *c;
645 	int i, j;
646 #ifdef MALLOC_RED_ZONE
647 	size_t user_size = n;
648 	char poison;
649 #endif
650 
651 	if (adjust_size(&n) < 0) return 0;
652 
653 	if (n > MMAP_THRESHOLD) {
654 		size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE;
655 		char *base = __mmap(0, len, PROT_READ|PROT_WRITE,
656 			MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
657 		if (base == (void *)-1) return 0;
658 
659 		prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, base, len, "native_heap:musl");
660 
661 		c = (void *)(base + SIZE_ALIGN - OVERHEAD);
662 		c->csize = len - (SIZE_ALIGN - OVERHEAD);
663 		c->psize = SIZE_ALIGN - OVERHEAD;
664 #ifdef MALLOC_RED_ZONE
665 		c->state = M_STATE_MMAP | M_STATE_USED;
666 		c->usize = user_size;
667 		if (need_poison()) {
668 			chunk_poison_set(c);
669 		}
670 		chunk_checksum_set(c);
671 #endif
672 #ifdef MUSL_ITERATE_AND_STATS_API
673 		__push_chunk(c);
674 #endif
675 		return CHUNK_TO_MEM(c);
676 	}
677 
678 	i = bin_index_up(n);
679 	for (;;) {
680 		uint64_t mask = mal.binmap & -(1ULL<<i);
681 		if (!mask) {
682 			c = expand_heap(n);
683 			if (!c) return 0;
684 			if (alloc_rev(c)) {
685 				struct chunk *x = c;
686 				c = PREV_CHUNK(c);
687 				NEXT_CHUNK(x)->psize = c->csize =
688 					x->csize + CHUNK_SIZE(c);
689 			}
690 			break;
691 		}
692 		j = first_set(mask);
693 		lock_bin(j);
694 #ifdef MALLOC_FREELIST_HARDENED
695 		void *key = mal.bins[j].key;
696 #else
697 		void *key = NULL;
698 #endif
699 		c = encode_chunk(mal.bins[j].head, key); /* Decode the head pointer */
700 		if (c != BIN_TO_CHUNK(j)) {
701 #ifdef MALLOC_RED_ZONE
702 			if (c->state & M_RZ_POISON) {
703 				chunk_poison_check(c);
704 			}
705 #endif
706 			if (!pretrim(c, n, i, j)) unbin(c, j);
707 			unlock_bin(j);
708 			break;
709 		}
710 		unlock_bin(j);
711 	}
712 
713 	/* Now patch up in case we over-allocated */
714 	trim(c, n);
715 
716 #ifdef MALLOC_RED_ZONE
717 	c->usize = user_size;
718 	c->state |= M_STATE_USED;
719 	if (need_poison()) {
720 		chunk_poison_set(c);
721 	} else {
722 		c->state &= ~M_RZ_POISON;
723 	}
724 	chunk_checksum_set(c);
725 #endif
726 #ifdef MUSL_ITERATE_AND_STATS_API
727 	__push_chunk(c);
728 #endif
729 	return CHUNK_TO_MEM(c);
730 }
731 
mal0_clear(char * p,size_t pagesz,size_t n)732 static size_t mal0_clear(char *p, size_t pagesz, size_t n)
733 {
734 #ifdef __GNUC__
735 	typedef uint64_t __attribute__((__may_alias__)) T;
736 #else
737 	typedef unsigned char T;
738 #endif
739 	char *pp = p + n;
740 	size_t i = (uintptr_t)pp & (pagesz - 1);
741 	for (;;) {
742 		pp = memset(pp - i, 0, i);
743 		if (pp - p < pagesz) return pp - p;
744 		for (i = pagesz; i; i -= 2*sizeof(T), pp -= 2*sizeof(T))
745 		        if (((T *)pp)[-1] | ((T *)pp)[-2])
746 				break;
747 	}
748 }
749 
750 #ifdef HOOK_ENABLE
__libc_calloc(size_t m,size_t n)751 void *__libc_calloc(size_t m, size_t n)
752 #else
753 void *calloc(size_t m, size_t n)
754 #endif
755 {
756 #ifdef USE_JEMALLOC
757 	return je_calloc(m, n);
758 #endif
759 	if(n && m > (size_t)-1/n){
760 		errno=ENOMEM;
761 		return 0;
762 	}
763 	n *= m;
764 	void *p=malloc(n);
765 	if(!p) return p;
766 	if(!__malloc_replaced) {
767 		if(IS_MMAPPED(MEM_TO_CHUNK(p)))
768 			return p;
769 		if(n >= PAGE_SIZE)
770 			n = mal0_clear(p, PAGE_SIZE, n);
771 	}
772 	return memset(p, 0, n);
773 }
774 
internal_calloc(size_t m,size_t n)775 void *internal_calloc(size_t m, size_t n)
776 {
777 	if (n && m > (size_t)-1/n) {
778 		errno = ENOMEM;
779 		return 0;
780 	}
781 	n *= m;
782 	void *p = internal_malloc(n);
783 	if (!p) return p;
784 	if (!__malloc_replaced) {
785 		if (IS_MMAPPED(MEM_TO_CHUNK(p)))
786 			return p;
787 		if (n >= PAGE_SIZE)
788 			n = mal0_clear(p, PAGE_SIZE, n);
789 	}
790 	return memset(p, 0, n);
791 }
792 
793 #ifdef HOOK_ENABLE
__libc_realloc(void * p,size_t n)794 void *__libc_realloc(void *p, size_t n)
795 #else
796 void *realloc(void *p, size_t n)
797 #endif
798 {
799 #ifdef USE_JEMALLOC
800 	return je_realloc(p, n);
801 #endif
802 	struct chunk *self, *next;
803 	size_t n0, n1;
804 	void *new;
805 #ifdef MALLOC_RED_ZONE
806 	size_t user_size = n;
807 #endif
808 
809 	if (!p) return malloc(n);
810 	if (!n) {
811 		free(p);
812 		return NULL;
813 	}
814 
815 	if (adjust_size(&n) < 0) return 0;
816 
817 	self = MEM_TO_CHUNK(p);
818 	n1 = n0 = CHUNK_SIZE(self);
819 #ifdef MALLOC_RED_ZONE
820 	/* Not a valid chunk */
821 	if (!(self->state & M_STATE_USED)) a_crash();
822 	if (chunk_checksum_check(self)) a_crash();
823 	if (self->state & M_RZ_POISON) chunk_poison_check(self);
824 #endif
825 
826 	if (IS_MMAPPED(self)) {
827 		size_t extra = self->psize;
828 		char *base = (char *)self - extra;
829 		size_t oldlen = n0 + extra;
830 		size_t newlen = n + extra;
831 		/* Crash on realloc of freed chunk */
832 #ifdef MALLOC_RED_ZONE
833 		/* Wrong malloc type */
834 		if (!(self->state & M_STATE_MMAP)) a_crash();
835 #endif
836 		if (extra & 1) a_crash();
837 		if (newlen < PAGE_SIZE && (new = malloc(n-OVERHEAD))) {
838 			n0 = n;
839 			goto copy_free_ret;
840 		}
841 		newlen = (newlen + PAGE_SIZE-1) & -PAGE_SIZE;
842 		if (oldlen == newlen) return p;
843 #ifndef MUSL_ITERATE_AND_STATS_API
844 		base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE);
845 #else
846 		base = __mremap(base, oldlen, newlen, 0);
847 #endif
848 		if (base == (void *)-1)
849 			goto copy_realloc;
850 #ifndef MUSL_ITERATE_AND_STATS_API
851 		self = (void *)(base + extra);
852 #endif
853 		self->csize = newlen - extra;
854 #ifdef MALLOC_RED_ZONE
855 		self->usize = user_size;
856 		if (need_poison()) {
857 			chunk_poison_set(self);
858 		} else {
859 			self->state &= ~M_RZ_POISON;
860 		}
861 		chunk_checksum_set(self);
862 #endif
863 		return CHUNK_TO_MEM(self);
864 	}
865 
866 	next = NEXT_CHUNK(self);
867 
868 	/* Crash on corrupted footer (likely from buffer overflow) */
869 	if (next->psize != self->csize) a_crash();
870 
871 	/* Merge adjacent chunks if we need more space. This is not
872 	 * a waste of time even if we fail to get enough space, because our
873 	 * subsequent call to free would otherwise have to do the merge. */
874 	if (n > n1 && alloc_fwd(next)) {
875 		n1 += CHUNK_SIZE(next);
876 		next = NEXT_CHUNK(next);
877 #ifdef MALLOC_RED_ZONE
878 		/* alloc forward arises, remove the poison tag */
879 		self->state &= ~M_RZ_POISON;
880 #endif
881 	}
882 	/* FIXME: find what's wrong here and reenable it..? */
883 	if (0 && n > n1 && alloc_rev(self)) {
884 		self = PREV_CHUNK(self);
885 		n1 += CHUNK_SIZE(self);
886 	}
887 	self->csize = n1 | C_INUSE;
888 	next->psize = n1 | C_INUSE;
889 
890 	/* If we got enough space, split off the excess and return */
891 	if (n <= n1) {
892 		//memmove(CHUNK_TO_MEM(self), p, n0-OVERHEAD);
893 		trim(self, n);
894 #ifdef MALLOC_RED_ZONE
895 		self->usize = user_size;
896 		if (need_poison()) {
897 			chunk_poison_set(self);
898 		} else {
899 			self->state &= ~M_RZ_POISON;
900 		}
901 		chunk_checksum_set(self);
902 #endif
903 		return CHUNK_TO_MEM(self);
904 	}
905 
906 copy_realloc:
907 	/* As a last resort, allocate a new chunk and copy to it. */
908 	new = malloc(n-OVERHEAD);
909 	if (!new) return 0;
910 copy_free_ret:
911 #ifndef MALLOC_RED_ZONE
912 	memcpy(new, p, n0-OVERHEAD);
913 #else
914 	memcpy(new, p, self->usize < user_size ? self->usize : user_size);
915 	chunk_checksum_set(self);
916 #endif
917 	free(CHUNK_TO_MEM(self));
918 	return new;
919 }
920 
internal_realloc(void * p,size_t n)921 void *internal_realloc(void *p, size_t n)
922 {
923 	struct chunk *self, *next;
924 	size_t n0, n1;
925 	void *new;
926 #ifdef MALLOC_RED_ZONE
927 	size_t user_size = n;
928 #endif
929 
930 	if (!p) return internal_malloc(n);
931 	if (!n) {
932 		internal_free(p);
933 		return NULL;
934 	}
935 
936 	if (adjust_size(&n) < 0) return 0;
937 
938 	self = MEM_TO_CHUNK(p);
939 	n1 = n0 = CHUNK_SIZE(self);
940 #ifdef MALLOC_RED_ZONE
941 	/* Not a valid chunk */
942 	if (!(self->state & M_STATE_USED)) a_crash();
943 	if (chunk_checksum_check(self)) a_crash();
944 	if (self->state & M_RZ_POISON) chunk_poison_check(self);
945 #endif
946 
947 	if (IS_MMAPPED(self)) {
948 		size_t extra = self->psize;
949 		char *base = (char *)self - extra;
950 		size_t oldlen = n0 + extra;
951 		size_t newlen = n + extra;
952 		/* Crash on realloc of freed chunk */
953 #ifdef MALLOC_RED_ZONE
954 		/* Wrong malloc type */
955 		if (!(self->state & M_STATE_MMAP)) a_crash();
956 #endif
957 		if (extra & 1) a_crash();
958 		if (newlen < PAGE_SIZE && (new = internal_malloc(n-OVERHEAD))) {
959 			n0 = n;
960 			goto copy_free_ret;
961 		}
962 		newlen = (newlen + PAGE_SIZE-1) & -PAGE_SIZE;
963 		if (oldlen == newlen) return p;
964 		base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE);
965 		if (base == (void *)-1)
966 			goto copy_realloc;
967 		self = (void *)(base + extra);
968 		self->csize = newlen - extra;
969 #ifdef MALLOC_RED_ZONE
970 		self->usize = user_size;
971 		if (need_poison()) {
972 			chunk_poison_set(self);
973 		} else {
974 			self->state &= ~M_RZ_POISON;
975 		}
976 		chunk_checksum_set(self);
977 #endif
978 		return CHUNK_TO_MEM(self);
979 	}
980 
981 	next = NEXT_CHUNK(self);
982 
983 	/* Crash on corrupted footer (likely from buffer overflow) */
984 	if (next->psize != self->csize) a_crash();
985 
986 	/* Merge adjacent chunks if we need more space. This is not
987 	 * a waste of time even if we fail to get enough space, because our
988 	 * subsequent call to free would otherwise have to do the merge. */
989 	if (n > n1 && alloc_fwd(next)) {
990 		n1 += CHUNK_SIZE(next);
991 		next = NEXT_CHUNK(next);
992 #ifdef MALLOC_RED_ZONE
993 		/* alloc forward arises, remove the poison tag */
994 		self->state &= ~M_RZ_POISON;
995 #endif
996 	}
997 	/* FIXME: find what's wrong here and reenable it..? */
998 	if (0 && n > n1 && alloc_rev(self)) {
999 		self = PREV_CHUNK(self);
1000 		n1 += CHUNK_SIZE(self);
1001 	}
1002 	self->csize = n1 | C_INUSE;
1003 	next->psize = n1 | C_INUSE;
1004 
1005 	/* If we got enough space, split off the excess and return */
1006 	if (n <= n1) {
1007 		//memmove(CHUNK_TO_MEM(self), p, n0-OVERHEAD);
1008 		trim(self, n);
1009 #ifdef MALLOC_RED_ZONE
1010 		self->usize = user_size;
1011 		if (need_poison()) {
1012 			chunk_poison_set(self);
1013 		} else {
1014 			self->state &= ~M_RZ_POISON;
1015 		}
1016 		chunk_checksum_set(self);
1017 #endif
1018 		return CHUNK_TO_MEM(self);
1019 	}
1020 
1021 copy_realloc:
1022 	/* As a last resort, allocate a new chunk and copy to it. */
1023 	new = internal_malloc(n-OVERHEAD);
1024 	if (!new) return 0;
1025 copy_free_ret:
1026 #ifndef MALLOC_RED_ZONE
1027 	memcpy(new, p, n0-OVERHEAD);
1028 #else
1029 	memcpy(new, p, self->usize < user_size ? self->usize : user_size);
1030 	chunk_checksum_set(self);
1031 #endif
1032 	internal_free(CHUNK_TO_MEM(self));
1033 	return new;
1034 }
1035 
__bin_chunk(struct chunk * self)1036 void __bin_chunk(struct chunk *self)
1037 {
1038 	struct chunk *next = NEXT_CHUNK(self);
1039 	size_t final_size, new_size, size;
1040 	int reclaim=0;
1041 	int i;
1042 
1043 	final_size = new_size = CHUNK_SIZE(self);
1044 
1045 	/* Crash on corrupted footer (likely from buffer overflow) */
1046 	if (next->psize != self->csize) a_crash();
1047 
1048 	for (;;) {
1049 		if (self->psize & next->csize & C_INUSE) {
1050 			self->csize = final_size | C_INUSE;
1051 			next->psize = final_size | C_INUSE;
1052 			i = bin_index(final_size);
1053 			lock_bin(i);
1054 			lock(mal.free_lock);
1055 			if (self->psize & next->csize & C_INUSE)
1056 				break;
1057 			unlock(mal.free_lock);
1058 			unlock_bin(i);
1059 		}
1060 
1061 		if (alloc_rev(self)) {
1062 			self = PREV_CHUNK(self);
1063 			size = CHUNK_SIZE(self);
1064 			final_size += size;
1065 			if (new_size+size > RECLAIM && (new_size+size^size) > size)
1066 				reclaim = 1;
1067 		}
1068 
1069 		if (alloc_fwd(next)) {
1070 			size = CHUNK_SIZE(next);
1071 			final_size += size;
1072 			if (new_size+size > RECLAIM && (new_size+size^size) > size)
1073 				reclaim = 1;
1074 			next = NEXT_CHUNK(next);
1075 		}
1076 #ifdef MALLOC_RED_ZONE
1077 		/* if poisoned chunk is combined, we should remove the poisoned tag */
1078 		self->state &= ~M_RZ_POISON;
1079 #endif
1080 	}
1081 
1082 	if (!(mal.binmap & 1ULL<<i))
1083 		a_or_64(&mal.binmap, 1ULL<<i);
1084 
1085 	self->csize = final_size;
1086 	next->psize = final_size;
1087 	unlock(mal.free_lock);
1088 
1089 #ifdef MALLOC_FREELIST_HARDENED
1090 	void *key = mal.bins[i].key;
1091 #else
1092 	void *key = NULL;
1093 #endif
1094 	self->next = encode_chunk(BIN_TO_CHUNK(i), key);
1095 	self->prev = mal.bins[i].tail;
1096 	struct chunk *xorptr = encode_ptr(self, key);
1097 	encode_chunk(mal.bins[i].tail, key)->next = xorptr;
1098 	mal.bins[i].tail = xorptr;
1099 
1100 	/* Replace middle of large chunks with fresh zero pages */
1101 	if (reclaim) {
1102 		uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE;
1103 		uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE;
1104 #if 1
1105 		__madvise((void *)a, b-a, MADV_DONTNEED);
1106 #else
1107 		__mmap((void *)a, b-a, PROT_READ|PROT_WRITE,
1108 			MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
1109 #endif
1110 #ifdef MALLOC_RED_ZONE
1111 		self->state &= ~M_RZ_POISON;
1112 #endif
1113 	}
1114 
1115 #ifdef MALLOC_RED_ZONE
1116 	chunk_checksum_set(self);
1117 #endif
1118 	unlock_bin(i);
1119 }
1120 
unmap_chunk(struct chunk * self)1121 static void unmap_chunk(struct chunk *self)
1122 {
1123 	size_t extra = self->psize;
1124 	char *base = (char *)self - extra;
1125 	size_t len = CHUNK_SIZE(self) + extra;
1126 #ifdef MALLOC_RED_ZONE
1127 	/* Wrong chunk type */
1128 	if (!(self->state & M_STATE_MMAP)) a_crash();
1129 #endif
1130 	/* Crash on double free */
1131 	if (extra & 1) a_crash();
1132 	__munmap(base, len);
1133 }
1134 
1135 #ifdef MALLOC_FREELIST_QUARANTINE
quarantine_index(size_t size)1136 static inline int quarantine_index(size_t size)
1137 {
1138 	return (size / SIZE_ALIGN) & (QUARANTINE_NUM - 1);
1139 }
1140 
quarantine_contained(struct chunk * self)1141 static void quarantine_contained(struct chunk *self)
1142 {
1143 	size_t size = CHUNK_SIZE(self);
1144 	int i = quarantine_index(size);
1145 	struct chunk *cur;
1146 	struct chunk *next;
1147 	struct chunk *prev;
1148 	void *key;
1149 
1150 #ifdef MALLOC_RED_ZONE
1151 	/* Wrong chunk type */
1152 	if (!(self->state & M_STATE_BRK)) a_crash();
1153 #endif
1154 
1155 	lock_quarantine(i);
1156 	key = mal.quarantine[i].key;
1157 	cur = encode_chunk(mal.quarantine[i].head, key);
1158 	prev = QUARANTINE_TO_CHUNK(i);
1159 	while (cur != QUARANTINE_TO_CHUNK(i)) {
1160 		/* "Safe-unlink" check */
1161 		next = encode_chunk(cur->next, key);
1162 		if (prev->next != encode_chunk(cur, key) ||
1163 			next->prev != encode_chunk(cur, key))
1164 		a_crash();
1165 		/* Find that "self" is in the freelist, crash */
1166 		if (cur == self) a_crash();
1167 		prev = cur;
1168 		cur = next;
1169 	}
1170 	unlock_quarantine(i);
1171 }
1172 
quarantine_bin(struct chunk * self)1173 static void quarantine_bin(struct chunk *self)
1174 {
1175 	size_t size = CHUNK_SIZE(self);
1176 	struct chunk *cur;
1177 	struct chunk *next;
1178 	void *key;
1179 	int i;
1180 
1181 #ifdef MALLOC_RED_ZONE
1182 	self->state &= ~M_STATE_USED;
1183 #endif
1184 	/* Avoid quarantining large memory */
1185 	if (size > QUARANTINE_THRESHOLD) {
1186 #ifdef MALLOC_RED_ZONE
1187 		self->usize = POINTER_USAGE;
1188 		if ((self->psize & self->csize & NEXT_CHUNK(self)->csize & C_INUSE) &&
1189 			need_poison()) {
1190 			/* Self will not be combined mostly */
1191 			chunk_poison_set(self);
1192 		} else {
1193 			self->state &= ~M_RZ_POISON;
1194 		}
1195 		chunk_checksum_set(self);
1196 #endif
1197 		__bin_chunk(self);
1198 		return;
1199 	}
1200 
1201 	i = quarantine_index(size);
1202 	lock_quarantine(i);
1203 	key = mal.quarantine[i].key;
1204 	if (mal.quarantined_size[i] > QUARANTINE_THRESHOLD || mal.quarantined_count[i] > QUARANTINE_N_THRESHOLD) {
1205 		/* cherry-pick an independent list from quarantine */
1206 		cur = encode_chunk(mal.quarantine[i].head, key);
1207 		/* now clear the quarantine[i] */
1208 		mal.quarantine[i].head = mal.quarantine[i].tail = encode_chunk(QUARANTINE_TO_CHUNK(i), key);
1209 		mal.quarantined_size[i] = mal.quarantined_count[i] = 0;
1210 		unlock_quarantine(i);
1211 
1212 		/* Bin the quarantined chunk */
1213 		do {
1214 			next = encode_chunk(cur->next, key);
1215 #ifdef MALLOC_RED_ZONE
1216 			if (cur->state & M_RZ_POISON) chunk_poison_check(cur);
1217 #endif
1218 			__bin_chunk(cur);
1219 			cur = next;
1220 		} while (cur != QUARANTINE_TO_CHUNK(i));
1221 	} else {
1222 		unlock_quarantine(i);
1223 	}
1224 
1225 	lock_quarantine(i);
1226 	self->next = encode_chunk(QUARANTINE_TO_CHUNK(i), key);
1227 	self->prev = mal.quarantine[i].tail;
1228 	encode_chunk(mal.quarantine[i].tail, key)->next = encode_chunk(self, key);
1229 	mal.quarantine[i].tail = encode_chunk(self, key);
1230 
1231 	mal.quarantined_size[i] += size;
1232 	mal.quarantined_count[i] += 1;
1233 
1234 #ifdef MALLOC_RED_ZONE
1235 	if (need_poison()) {
1236 		self->usize = POINTER_USAGE;
1237 		chunk_poison_set(self);
1238 	} else {
1239 		self->state &= ~M_RZ_POISON;
1240 	}
1241 	chunk_checksum_set(self);
1242 #endif
1243 	unlock_quarantine(i);
1244 }
1245 #endif
1246 
1247 #ifdef HOOK_ENABLE
__libc_free(void * p)1248 void __libc_free(void *p)
1249 #else
1250 void free(void *p)
1251 #endif
1252 {
1253 #ifdef USE_JEMALLOC
1254 	return je_free(p);
1255 #endif
1256 	return internal_free(p);
1257 }
1258 
internal_free(void * p)1259 void internal_free(void *p)
1260 {
1261 	if (!p) return;
1262 
1263 	struct chunk *self = MEM_TO_CHUNK(p);
1264 #ifdef MUSL_ITERATE_AND_STATS_API
1265 	__pop_chunk(self);
1266 #endif
1267 
1268 #ifdef MALLOC_RED_ZONE
1269 	/* This is not a valid chunk for freeing */
1270 	if (chunk_checksum_check(self)) a_crash();
1271 	if (!(self->state & M_STATE_USED)) a_crash();
1272 	if (self->state & M_RZ_POISON) chunk_poison_check(self);
1273 #endif
1274 	if (IS_MMAPPED(self))
1275 		unmap_chunk(self);
1276 	else {
1277 #ifdef MALLOC_FREELIST_QUARANTINE
1278 		quarantine_contained(self);
1279 		quarantine_bin(self);
1280 #else
1281 		__bin_chunk(self);
1282 #endif
1283 	}
1284 }
1285 
__malloc_donate(char * start,char * end)1286 void __malloc_donate(char *start, char *end)
1287 {
1288 #ifndef USE_JEMALLOC
1289 	size_t align_start_up = (SIZE_ALIGN-1) & (-(uintptr_t)start - OVERHEAD);
1290 	size_t align_end_down = (SIZE_ALIGN-1) & (uintptr_t)end;
1291 
1292 	/* Getting past this condition ensures that the padding for alignment
1293 	 * and header overhead will not overflow and will leave a nonzero
1294 	 * multiple of SIZE_ALIGN bytes between start and end. */
1295 	if (end - start <= OVERHEAD + align_start_up + align_end_down)
1296 		return;
1297 	start += align_start_up + OVERHEAD;
1298 	end   -= align_end_down;
1299 
1300 	struct chunk *c = MEM_TO_CHUNK(start), *n = MEM_TO_CHUNK(end);
1301 	c->psize = n->csize = C_INUSE;
1302 	c->csize = n->psize = C_INUSE | (end-start);
1303 #ifdef MALLOC_RED_ZONE
1304 	c->usize = POINTER_USAGE;
1305 	c->state = M_STATE_BRK;
1306 	chunk_checksum_set(c);
1307 #endif
1308 #ifdef MUSL_ITERATE_AND_STATS_API
1309 	lock(total_heap_space_inc_lock);
1310 	total_heap_space += CHUNK_SIZE(c) - OVERHEAD;
1311 #endif
1312 	__bin_chunk(c);
1313 #ifdef MUSL_ITERATE_AND_STATS_API
1314 	unlock(total_heap_space_inc_lock);
1315 #endif
1316 #endif
1317 }
1318 
1319 #ifdef HOOK_ENABLE
__libc_mallopt(int param,int value)1320 int __libc_mallopt(int param, int value)
1321 #else
1322 int mallopt(int param, int value)
1323 #endif
1324 {
1325 #ifdef USE_JEMALLOC_DFX_INTF
1326 	return je_mallopt(param, value);
1327 #endif
1328 	return 0;
1329 }
1330 
malloc_backtrace(void * pointer,uintptr_t * frames,size_t frame_count)1331 ssize_t malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count)
1332 {
1333 	return 0;
1334 }
1335