1 #define JEMALLOC_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3
4 /******************************************************************************/
5 /* Data. */
6
7 /* Runtime configuration options. */
8 const char *je_malloc_conf JEMALLOC_ATTR(weak);
9 bool opt_abort =
10 #ifdef JEMALLOC_DEBUG
11 true
12 #else
13 false
14 #endif
15 ;
16 const char *opt_junk =
17 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
18 "true"
19 #else
20 "false"
21 #endif
22 ;
23 bool opt_junk_alloc =
24 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
25 true
26 #else
27 false
28 #endif
29 ;
30 bool opt_junk_free =
31 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
32 true
33 #else
34 false
35 #endif
36 ;
37
38 size_t opt_quarantine = ZU(0);
39 bool opt_redzone = false;
40 bool opt_utrace = false;
41 bool opt_xmalloc = false;
42 bool opt_zero = false;
43 unsigned opt_narenas = 0;
44
45 /* Initialized to true if the process is running inside Valgrind. */
46 bool in_valgrind;
47
48 unsigned ncpus;
49
50 /* Protects arenas initialization. */
51 static malloc_mutex_t arenas_lock;
52 /*
53 * Arenas that are used to service external requests. Not all elements of the
54 * arenas array are necessarily used; arenas are created lazily as needed.
55 *
56 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
57 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
58 * takes some action to create them and allocate from them.
59 */
60 arena_t **arenas;
61 static unsigned narenas_total; /* Use narenas_total_*(). */
62 static arena_t *a0; /* arenas[0]; read-only after initialization. */
63 static unsigned narenas_auto; /* Read-only after initialization. */
64
65 typedef enum {
66 malloc_init_uninitialized = 3,
67 malloc_init_a0_initialized = 2,
68 malloc_init_recursible = 1,
69 malloc_init_initialized = 0 /* Common case --> jnz. */
70 } malloc_init_t;
71 static malloc_init_t malloc_init_state = malloc_init_uninitialized;
72
73 /* 0 should be the common case. Set to true to trigger initialization. */
74 static bool malloc_slow = true;
75
76 /* When malloc_slow != 0, set the corresponding bits for sanity check. */
77 enum {
78 flag_opt_junk_alloc = (1U),
79 flag_opt_junk_free = (1U << 1),
80 flag_opt_quarantine = (1U << 2),
81 flag_opt_zero = (1U << 3),
82 flag_opt_utrace = (1U << 4),
83 flag_in_valgrind = (1U << 5),
84 flag_opt_xmalloc = (1U << 6)
85 };
86 static uint8_t malloc_slow_flags;
87
88 /* Last entry for overflow detection only. */
89 JEMALLOC_ALIGNED(CACHELINE)
90 const size_t index2size_tab[NSIZES+1] = {
91 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
92 ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
93 SIZE_CLASSES
94 #undef SC
95 ZU(0)
96 };
97
98 JEMALLOC_ALIGNED(CACHELINE)
99 const uint8_t size2index_tab[] = {
100 #if LG_TINY_MIN == 0
101 #warning "Dangerous LG_TINY_MIN"
102 #define S2B_0(i) i,
103 #elif LG_TINY_MIN == 1
104 #warning "Dangerous LG_TINY_MIN"
105 #define S2B_1(i) i,
106 #elif LG_TINY_MIN == 2
107 #warning "Dangerous LG_TINY_MIN"
108 #define S2B_2(i) i,
109 #elif LG_TINY_MIN == 3
110 #define S2B_3(i) i,
111 #elif LG_TINY_MIN == 4
112 #define S2B_4(i) i,
113 #elif LG_TINY_MIN == 5
114 #define S2B_5(i) i,
115 #elif LG_TINY_MIN == 6
116 #define S2B_6(i) i,
117 #elif LG_TINY_MIN == 7
118 #define S2B_7(i) i,
119 #elif LG_TINY_MIN == 8
120 #define S2B_8(i) i,
121 #elif LG_TINY_MIN == 9
122 #define S2B_9(i) i,
123 #elif LG_TINY_MIN == 10
124 #define S2B_10(i) i,
125 #elif LG_TINY_MIN == 11
126 #define S2B_11(i) i,
127 #else
128 #error "Unsupported LG_TINY_MIN"
129 #endif
130 #if LG_TINY_MIN < 1
131 #define S2B_1(i) S2B_0(i) S2B_0(i)
132 #endif
133 #if LG_TINY_MIN < 2
134 #define S2B_2(i) S2B_1(i) S2B_1(i)
135 #endif
136 #if LG_TINY_MIN < 3
137 #define S2B_3(i) S2B_2(i) S2B_2(i)
138 #endif
139 #if LG_TINY_MIN < 4
140 #define S2B_4(i) S2B_3(i) S2B_3(i)
141 #endif
142 #if LG_TINY_MIN < 5
143 #define S2B_5(i) S2B_4(i) S2B_4(i)
144 #endif
145 #if LG_TINY_MIN < 6
146 #define S2B_6(i) S2B_5(i) S2B_5(i)
147 #endif
148 #if LG_TINY_MIN < 7
149 #define S2B_7(i) S2B_6(i) S2B_6(i)
150 #endif
151 #if LG_TINY_MIN < 8
152 #define S2B_8(i) S2B_7(i) S2B_7(i)
153 #endif
154 #if LG_TINY_MIN < 9
155 #define S2B_9(i) S2B_8(i) S2B_8(i)
156 #endif
157 #if LG_TINY_MIN < 10
158 #define S2B_10(i) S2B_9(i) S2B_9(i)
159 #endif
160 #if LG_TINY_MIN < 11
161 #define S2B_11(i) S2B_10(i) S2B_10(i)
162 #endif
163 #define S2B_no(i)
164 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
165 S2B_##lg_delta_lookup(index)
166 SIZE_CLASSES
167 #undef S2B_3
168 #undef S2B_4
169 #undef S2B_5
170 #undef S2B_6
171 #undef S2B_7
172 #undef S2B_8
173 #undef S2B_9
174 #undef S2B_10
175 #undef S2B_11
176 #undef S2B_no
177 #undef SC
178 };
179
180 #ifdef JEMALLOC_THREADED_INIT
181 /* Used to let the initializing thread recursively allocate. */
182 # define NO_INITIALIZER ((unsigned long)0)
183 # define INITIALIZER pthread_self()
184 # define IS_INITIALIZER (malloc_initializer == pthread_self())
185 static pthread_t malloc_initializer = NO_INITIALIZER;
186 #else
187 # define NO_INITIALIZER false
188 # define INITIALIZER true
189 # define IS_INITIALIZER malloc_initializer
190 static bool malloc_initializer = NO_INITIALIZER;
191 #endif
192
193 /* Used to avoid initialization races. */
194 #ifdef _WIN32
195 #if _WIN32_WINNT >= 0x0600
196 static malloc_mutex_t init_lock = SRWLOCK_INIT;
197 #else
198 static malloc_mutex_t init_lock;
199 static bool init_lock_initialized = false;
200
JEMALLOC_ATTR(constructor)201 JEMALLOC_ATTR(constructor)
202 static void WINAPI
203 _init_init_lock(void)
204 {
205
206 /* If another constructor in the same binary is using mallctl to
207 * e.g. setup chunk hooks, it may end up running before this one,
208 * and malloc_init_hard will crash trying to lock the uninitialized
209 * lock. So we force an initialization of the lock in
210 * malloc_init_hard as well. We don't try to care about atomicity
211 * of the accessed to the init_lock_initialized boolean, since it
212 * really only matters early in the process creation, before any
213 * separate thread normally starts doing anything. */
214 if (!init_lock_initialized)
215 malloc_mutex_init(&init_lock);
216 init_lock_initialized = true;
217 }
218
219 #ifdef _MSC_VER
220 # pragma section(".CRT$XCU", read)
221 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
222 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
223 #endif
224 #endif
225 #else
226 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
227 #endif
228
229 typedef struct {
230 void *p; /* Input pointer (as in realloc(p, s)). */
231 size_t s; /* Request size. */
232 void *r; /* Result pointer. */
233 } malloc_utrace_t;
234
235 #ifdef JEMALLOC_UTRACE
236 # define UTRACE(a, b, c) do { \
237 if (unlikely(opt_utrace)) { \
238 int utrace_serrno = errno; \
239 malloc_utrace_t ut; \
240 ut.p = (a); \
241 ut.s = (b); \
242 ut.r = (c); \
243 utrace(&ut, sizeof(ut)); \
244 errno = utrace_serrno; \
245 } \
246 } while (0)
247 #else
248 # define UTRACE(a, b, c)
249 #endif
250
251 /******************************************************************************/
252 /*
253 * Function prototypes for static functions that are referenced prior to
254 * definition.
255 */
256
257 static bool malloc_init_hard_a0(void);
258 static bool malloc_init_hard(void);
259
260 /******************************************************************************/
261 /*
262 * Begin miscellaneous support functions.
263 */
264
265 JEMALLOC_ALWAYS_INLINE_C bool
malloc_initialized(void)266 malloc_initialized(void)
267 {
268
269 return (malloc_init_state == malloc_init_initialized);
270 }
271
272 JEMALLOC_ALWAYS_INLINE_C void
malloc_thread_init(void)273 malloc_thread_init(void)
274 {
275
276 /*
277 * TSD initialization can't be safely done as a side effect of
278 * deallocation, because it is possible for a thread to do nothing but
279 * deallocate its TLS data via free(), in which case writing to TLS
280 * would cause write-after-free memory corruption. The quarantine
281 * facility *only* gets used as a side effect of deallocation, so make
282 * a best effort attempt at initializing its TSD by hooking all
283 * allocation events.
284 */
285 if (config_fill && unlikely(opt_quarantine))
286 quarantine_alloc_hook();
287 }
288
289 JEMALLOC_ALWAYS_INLINE_C bool
malloc_init_a0(void)290 malloc_init_a0(void)
291 {
292
293 if (unlikely(malloc_init_state == malloc_init_uninitialized))
294 return (malloc_init_hard_a0());
295 return (false);
296 }
297
298 JEMALLOC_ALWAYS_INLINE_C bool
malloc_init(void)299 malloc_init(void)
300 {
301
302 if (unlikely(!malloc_initialized()) && malloc_init_hard())
303 return (true);
304 malloc_thread_init();
305
306 return (false);
307 }
308
309 /*
310 * The a0*() functions are used instead of i[mcd]alloc() in situations that
311 * cannot tolerate TLS variable access.
312 */
313
314 static void *
a0ialloc(size_t size,bool zero,bool is_metadata)315 a0ialloc(size_t size, bool zero, bool is_metadata)
316 {
317
318 if (unlikely(malloc_init_a0()))
319 return (NULL);
320
321 return (iallocztm(NULL, size, size2index(size), zero, false,
322 is_metadata, arena_get(0, false), true));
323 }
324
325 static void
a0idalloc(void * ptr,bool is_metadata)326 a0idalloc(void *ptr, bool is_metadata)
327 {
328
329 idalloctm(NULL, ptr, false, is_metadata, true);
330 }
331
332 void *
a0malloc(size_t size)333 a0malloc(size_t size)
334 {
335
336 return (a0ialloc(size, false, true));
337 }
338
339 void
a0dalloc(void * ptr)340 a0dalloc(void *ptr)
341 {
342
343 a0idalloc(ptr, true);
344 }
345
346 /*
347 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
348 * situations that cannot tolerate TLS variable access (TLS allocation and very
349 * early internal data structure initialization).
350 */
351
352 void *
bootstrap_malloc(size_t size)353 bootstrap_malloc(size_t size)
354 {
355
356 if (unlikely(size == 0))
357 size = 1;
358
359 return (a0ialloc(size, false, false));
360 }
361
362 void *
bootstrap_calloc(size_t num,size_t size)363 bootstrap_calloc(size_t num, size_t size)
364 {
365 size_t num_size;
366
367 num_size = num * size;
368 if (unlikely(num_size == 0)) {
369 assert(num == 0 || size == 0);
370 num_size = 1;
371 }
372
373 return (a0ialloc(num_size, true, false));
374 }
375
376 void
bootstrap_free(void * ptr)377 bootstrap_free(void *ptr)
378 {
379
380 if (unlikely(ptr == NULL))
381 return;
382
383 a0idalloc(ptr, false);
384 }
385
386 static void
arena_set(unsigned ind,arena_t * arena)387 arena_set(unsigned ind, arena_t *arena)
388 {
389
390 atomic_write_p((void **)&arenas[ind], arena);
391 }
392
393 static void
narenas_total_set(unsigned narenas)394 narenas_total_set(unsigned narenas)
395 {
396
397 atomic_write_u(&narenas_total, narenas);
398 }
399
400 static void
narenas_total_inc(void)401 narenas_total_inc(void)
402 {
403
404 atomic_add_u(&narenas_total, 1);
405 }
406
407 unsigned
narenas_total_get(void)408 narenas_total_get(void)
409 {
410
411 return (atomic_read_u(&narenas_total));
412 }
413
414 /* Create a new arena and insert it into the arenas array at index ind. */
415 static arena_t *
arena_init_locked(unsigned ind)416 arena_init_locked(unsigned ind)
417 {
418 arena_t *arena;
419
420 assert(ind <= narenas_total_get());
421 if (ind > MALLOCX_ARENA_MAX)
422 return (NULL);
423 if (ind == narenas_total_get())
424 narenas_total_inc();
425
426 /*
427 * Another thread may have already initialized arenas[ind] if it's an
428 * auto arena.
429 */
430 arena = arena_get(ind, false);
431 if (arena != NULL) {
432 assert(ind < narenas_auto);
433 return (arena);
434 }
435
436 /* Actually initialize the arena. */
437 arena = arena_new(ind);
438 arena_set(ind, arena);
439 return (arena);
440 }
441
442 arena_t *
arena_init(unsigned ind)443 arena_init(unsigned ind)
444 {
445 arena_t *arena;
446
447 malloc_mutex_lock(&arenas_lock);
448 arena = arena_init_locked(ind);
449 malloc_mutex_unlock(&arenas_lock);
450 return (arena);
451 }
452
453 static void
arena_bind(tsd_t * tsd,unsigned ind)454 arena_bind(tsd_t *tsd, unsigned ind)
455 {
456 arena_t *arena;
457
458 arena = arena_get(ind, false);
459 arena_nthreads_inc(arena);
460
461 if (tsd_nominal(tsd))
462 tsd_arena_set(tsd, arena);
463 }
464
465 void
arena_migrate(tsd_t * tsd,unsigned oldind,unsigned newind)466 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
467 {
468 arena_t *oldarena, *newarena;
469
470 oldarena = arena_get(oldind, false);
471 newarena = arena_get(newind, false);
472 arena_nthreads_dec(oldarena);
473 arena_nthreads_inc(newarena);
474 tsd_arena_set(tsd, newarena);
475 }
476
477 static void
arena_unbind(tsd_t * tsd,unsigned ind)478 arena_unbind(tsd_t *tsd, unsigned ind)
479 {
480 arena_t *arena;
481
482 arena = arena_get(ind, false);
483 arena_nthreads_dec(arena);
484 tsd_arena_set(tsd, NULL);
485 }
486
487 arena_tdata_t *
arena_tdata_get_hard(tsd_t * tsd,unsigned ind)488 arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
489 {
490 arena_tdata_t *tdata, *arenas_tdata_old;
491 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
492 unsigned narenas_tdata_old, i;
493 unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
494 unsigned narenas_actual = narenas_total_get();
495
496 /*
497 * Dissociate old tdata array (and set up for deallocation upon return)
498 * if it's too small.
499 */
500 if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
501 arenas_tdata_old = arenas_tdata;
502 narenas_tdata_old = narenas_tdata;
503 arenas_tdata = NULL;
504 narenas_tdata = 0;
505 tsd_arenas_tdata_set(tsd, arenas_tdata);
506 tsd_narenas_tdata_set(tsd, narenas_tdata);
507 } else {
508 arenas_tdata_old = NULL;
509 narenas_tdata_old = 0;
510 }
511
512 /* Allocate tdata array if it's missing. */
513 if (arenas_tdata == NULL) {
514 bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
515 narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
516
517 if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
518 *arenas_tdata_bypassp = true;
519 arenas_tdata = (arena_tdata_t *)a0malloc(
520 sizeof(arena_tdata_t) * narenas_tdata);
521 *arenas_tdata_bypassp = false;
522 }
523 if (arenas_tdata == NULL) {
524 tdata = NULL;
525 goto label_return;
526 }
527 assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
528 tsd_arenas_tdata_set(tsd, arenas_tdata);
529 tsd_narenas_tdata_set(tsd, narenas_tdata);
530 }
531
532 /*
533 * Copy to tdata array. It's possible that the actual number of arenas
534 * has increased since narenas_total_get() was called above, but that
535 * causes no correctness issues unless two threads concurrently execute
536 * the arenas.extend mallctl, which we trust mallctl synchronization to
537 * prevent.
538 */
539
540 /* Copy/initialize tickers. */
541 for (i = 0; i < narenas_actual; i++) {
542 if (i < narenas_tdata_old) {
543 ticker_copy(&arenas_tdata[i].decay_ticker,
544 &arenas_tdata_old[i].decay_ticker);
545 } else {
546 ticker_init(&arenas_tdata[i].decay_ticker,
547 DECAY_NTICKS_PER_UPDATE);
548 }
549 }
550 if (narenas_tdata > narenas_actual) {
551 memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
552 * (narenas_tdata - narenas_actual));
553 }
554
555 /* Read the refreshed tdata array. */
556 tdata = &arenas_tdata[ind];
557 label_return:
558 if (arenas_tdata_old != NULL)
559 a0dalloc(arenas_tdata_old);
560 return (tdata);
561 }
562
563 /* Slow path, called only by arena_choose(). */
564 arena_t *
arena_choose_hard(tsd_t * tsd)565 arena_choose_hard(tsd_t *tsd)
566 {
567 arena_t *ret;
568
569 if (narenas_auto > 1) {
570 unsigned i, choose, first_null;
571
572 choose = 0;
573 first_null = narenas_auto;
574 malloc_mutex_lock(&arenas_lock);
575 assert(arena_get(0, false) != NULL);
576 for (i = 1; i < narenas_auto; i++) {
577 if (arena_get(i, false) != NULL) {
578 /*
579 * Choose the first arena that has the lowest
580 * number of threads assigned to it.
581 */
582 if (arena_nthreads_get(arena_get(i, false)) <
583 arena_nthreads_get(arena_get(choose,
584 false)))
585 choose = i;
586 } else if (first_null == narenas_auto) {
587 /*
588 * Record the index of the first uninitialized
589 * arena, in case all extant arenas are in use.
590 *
591 * NB: It is possible for there to be
592 * discontinuities in terms of initialized
593 * versus uninitialized arenas, due to the
594 * "thread.arena" mallctl.
595 */
596 first_null = i;
597 }
598 }
599
600 if (arena_nthreads_get(arena_get(choose, false)) == 0
601 || first_null == narenas_auto) {
602 /*
603 * Use an unloaded arena, or the least loaded arena if
604 * all arenas are already initialized.
605 */
606 ret = arena_get(choose, false);
607 } else {
608 /* Initialize a new arena. */
609 choose = first_null;
610 ret = arena_init_locked(choose);
611 if (ret == NULL) {
612 malloc_mutex_unlock(&arenas_lock);
613 return (NULL);
614 }
615 }
616 arena_bind(tsd, choose);
617 malloc_mutex_unlock(&arenas_lock);
618 } else {
619 ret = arena_get(0, false);
620 arena_bind(tsd, 0);
621 }
622
623 return (ret);
624 }
625
626 void
thread_allocated_cleanup(tsd_t * tsd)627 thread_allocated_cleanup(tsd_t *tsd)
628 {
629
630 /* Do nothing. */
631 }
632
633 void
thread_deallocated_cleanup(tsd_t * tsd)634 thread_deallocated_cleanup(tsd_t *tsd)
635 {
636
637 /* Do nothing. */
638 }
639
640 void
arena_cleanup(tsd_t * tsd)641 arena_cleanup(tsd_t *tsd)
642 {
643 arena_t *arena;
644
645 arena = tsd_arena_get(tsd);
646 if (arena != NULL)
647 arena_unbind(tsd, arena->ind);
648 }
649
650 void
arenas_tdata_cleanup(tsd_t * tsd)651 arenas_tdata_cleanup(tsd_t *tsd)
652 {
653 arena_tdata_t *arenas_tdata;
654
655 /* Prevent tsd->arenas_tdata from being (re)created. */
656 *tsd_arenas_tdata_bypassp_get(tsd) = true;
657
658 arenas_tdata = tsd_arenas_tdata_get(tsd);
659 if (arenas_tdata != NULL) {
660 tsd_arenas_tdata_set(tsd, NULL);
661 a0dalloc(arenas_tdata);
662 }
663 }
664
665 void
narenas_tdata_cleanup(tsd_t * tsd)666 narenas_tdata_cleanup(tsd_t *tsd)
667 {
668
669 /* Do nothing. */
670 }
671
672 void
arenas_tdata_bypass_cleanup(tsd_t * tsd)673 arenas_tdata_bypass_cleanup(tsd_t *tsd)
674 {
675
676 /* Do nothing. */
677 }
678
679 static void
stats_print_atexit(void)680 stats_print_atexit(void)
681 {
682
683 if (config_tcache && config_stats) {
684 unsigned narenas, i;
685
686 /*
687 * Merge stats from extant threads. This is racy, since
688 * individual threads do not lock when recording tcache stats
689 * events. As a consequence, the final stats may be slightly
690 * out of date by the time they are reported, if other threads
691 * continue to allocate.
692 */
693 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
694 arena_t *arena = arena_get(i, false);
695 if (arena != NULL) {
696 tcache_t *tcache;
697
698 /*
699 * tcache_stats_merge() locks bins, so if any
700 * code is introduced that acquires both arena
701 * and bin locks in the opposite order,
702 * deadlocks may result.
703 */
704 malloc_mutex_lock(&arena->lock);
705 ql_foreach(tcache, &arena->tcache_ql, link) {
706 tcache_stats_merge(tcache, arena);
707 }
708 malloc_mutex_unlock(&arena->lock);
709 }
710 }
711 }
712 je_malloc_stats_print(NULL, NULL, NULL);
713 }
714
715 /*
716 * End miscellaneous support functions.
717 */
718 /******************************************************************************/
719 /*
720 * Begin initialization functions.
721 */
722
723 #ifndef JEMALLOC_HAVE_SECURE_GETENV
724 static char *
secure_getenv(const char * name)725 secure_getenv(const char *name)
726 {
727
728 # ifdef JEMALLOC_HAVE_ISSETUGID
729 if (issetugid() != 0)
730 return (NULL);
731 # endif
732 return (getenv(name));
733 }
734 #endif
735
736 static unsigned
malloc_ncpus(void)737 malloc_ncpus(void)
738 {
739 long result;
740
741 #ifdef _WIN32
742 SYSTEM_INFO si;
743 GetSystemInfo(&si);
744 result = si.dwNumberOfProcessors;
745 #else
746 result = sysconf(_SC_NPROCESSORS_ONLN);
747 #endif
748 return ((result == -1) ? 1 : (unsigned)result);
749 }
750
751 static bool
malloc_conf_next(char const ** opts_p,char const ** k_p,size_t * klen_p,char const ** v_p,size_t * vlen_p)752 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
753 char const **v_p, size_t *vlen_p)
754 {
755 bool accept;
756 const char *opts = *opts_p;
757
758 *k_p = opts;
759
760 for (accept = false; !accept;) {
761 switch (*opts) {
762 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
763 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
764 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
765 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
766 case 'Y': case 'Z':
767 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
768 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
769 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
770 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
771 case 'y': case 'z':
772 case '0': case '1': case '2': case '3': case '4': case '5':
773 case '6': case '7': case '8': case '9':
774 case '_':
775 opts++;
776 break;
777 case ':':
778 opts++;
779 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
780 *v_p = opts;
781 accept = true;
782 break;
783 case '\0':
784 if (opts != *opts_p) {
785 malloc_write("<jemalloc>: Conf string ends "
786 "with key\n");
787 }
788 return (true);
789 default:
790 malloc_write("<jemalloc>: Malformed conf string\n");
791 return (true);
792 }
793 }
794
795 for (accept = false; !accept;) {
796 switch (*opts) {
797 case ',':
798 opts++;
799 /*
800 * Look ahead one character here, because the next time
801 * this function is called, it will assume that end of
802 * input has been cleanly reached if no input remains,
803 * but we have optimistically already consumed the
804 * comma if one exists.
805 */
806 if (*opts == '\0') {
807 malloc_write("<jemalloc>: Conf string ends "
808 "with comma\n");
809 }
810 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
811 accept = true;
812 break;
813 case '\0':
814 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
815 accept = true;
816 break;
817 default:
818 opts++;
819 break;
820 }
821 }
822
823 *opts_p = opts;
824 return (false);
825 }
826
827 static void
malloc_conf_error(const char * msg,const char * k,size_t klen,const char * v,size_t vlen)828 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
829 size_t vlen)
830 {
831
832 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
833 (int)vlen, v);
834 }
835
836 static void
malloc_slow_flag_init(void)837 malloc_slow_flag_init(void)
838 {
839 /*
840 * Combine the runtime options into malloc_slow for fast path. Called
841 * after processing all the options.
842 */
843 malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
844 | (opt_junk_free ? flag_opt_junk_free : 0)
845 | (opt_quarantine ? flag_opt_quarantine : 0)
846 | (opt_zero ? flag_opt_zero : 0)
847 | (opt_utrace ? flag_opt_utrace : 0)
848 | (opt_xmalloc ? flag_opt_xmalloc : 0);
849
850 if (config_valgrind)
851 malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
852
853 malloc_slow = (malloc_slow_flags != 0);
854 }
855
856 static void
malloc_conf_init(void)857 malloc_conf_init(void)
858 {
859 unsigned i;
860 char buf[PATH_MAX + 1];
861 const char *opts, *k, *v;
862 size_t klen, vlen;
863
864 /*
865 * Automatically configure valgrind before processing options. The
866 * valgrind option remains in jemalloc 3.x for compatibility reasons.
867 */
868 if (config_valgrind) {
869 in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
870 if (config_fill && unlikely(in_valgrind)) {
871 opt_junk = "false";
872 opt_junk_alloc = false;
873 opt_junk_free = false;
874 assert(!opt_zero);
875 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
876 opt_redzone = true;
877 }
878 if (config_tcache && unlikely(in_valgrind))
879 opt_tcache = false;
880 }
881
882 #if defined(__ANDROID__)
883 for (i = 0; i < 2; i++) {
884 #else
885 for (i = 0; i < 4; i++) {
886 #endif
887 /* Get runtime configuration. */
888 switch (i) {
889 case 0:
890 opts = config_malloc_conf;
891 break;
892 case 1:
893 if (je_malloc_conf != NULL) {
894 /*
895 * Use options that were compiled into the
896 * program.
897 */
898 opts = je_malloc_conf;
899 } else {
900 /* No configuration specified. */
901 buf[0] = '\0';
902 opts = buf;
903 }
904 break;
905 case 2: {
906 ssize_t linklen = 0;
907 #ifndef _WIN32
908 int saved_errno = errno;
909 const char *linkname =
910 # ifdef JEMALLOC_PREFIX
911 "/etc/"JEMALLOC_PREFIX"malloc.conf"
912 # else
913 "/etc/malloc.conf"
914 # endif
915 ;
916
917 /*
918 * Try to use the contents of the "/etc/malloc.conf"
919 * symbolic link's name.
920 */
921 linklen = readlink(linkname, buf, sizeof(buf) - 1);
922 if (linklen == -1) {
923 /* No configuration specified. */
924 linklen = 0;
925 /* Restore errno. */
926 set_errno(saved_errno);
927 }
928 #endif
929 buf[linklen] = '\0';
930 opts = buf;
931 break;
932 } case 3: {
933 const char *envname =
934 #ifdef JEMALLOC_PREFIX
935 JEMALLOC_CPREFIX"MALLOC_CONF"
936 #else
937 "MALLOC_CONF"
938 #endif
939 ;
940
941 if ((opts = secure_getenv(envname)) != NULL) {
942 /*
943 * Do nothing; opts is already initialized to
944 * the value of the MALLOC_CONF environment
945 * variable.
946 */
947 } else {
948 /* No configuration specified. */
949 buf[0] = '\0';
950 opts = buf;
951 }
952 break;
953 } default:
954 not_reached();
955 buf[0] = '\0';
956 opts = buf;
957 }
958
959 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
960 &vlen)) {
961 #define CONF_MATCH(n) \
962 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
963 #define CONF_MATCH_VALUE(n) \
964 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
965 #define CONF_HANDLE_BOOL(o, n, cont) \
966 if (CONF_MATCH(n)) { \
967 if (CONF_MATCH_VALUE("true")) \
968 o = true; \
969 else if (CONF_MATCH_VALUE("false")) \
970 o = false; \
971 else { \
972 malloc_conf_error( \
973 "Invalid conf value", \
974 k, klen, v, vlen); \
975 } \
976 if (cont) \
977 continue; \
978 }
979 #define CONF_HANDLE_T_U(t, o, n, min, max, clip) \
980 if (CONF_MATCH(n)) { \
981 uintmax_t um; \
982 char *end; \
983 \
984 set_errno(0); \
985 um = malloc_strtoumax(v, &end, 0); \
986 if (get_errno() != 0 || (uintptr_t)end -\
987 (uintptr_t)v != vlen) { \
988 malloc_conf_error( \
989 "Invalid conf value", \
990 k, klen, v, vlen); \
991 } else if (clip) { \
992 if ((min) != 0 && um < (min)) \
993 o = (t)(min); \
994 else if (um > (max)) \
995 o = (t)(max); \
996 else \
997 o = (t)um; \
998 } else { \
999 if (((min) != 0 && um < (min)) \
1000 || um > (max)) { \
1001 malloc_conf_error( \
1002 "Out-of-range " \
1003 "conf value", \
1004 k, klen, v, vlen); \
1005 } else \
1006 o = (t)um; \
1007 } \
1008 continue; \
1009 }
1010 #define CONF_HANDLE_UNSIGNED(o, n, min, max, clip) \
1011 CONF_HANDLE_T_U(unsigned, o, n, min, max, clip)
1012 #define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
1013 CONF_HANDLE_T_U(size_t, o, n, min, max, clip)
1014 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
1015 if (CONF_MATCH(n)) { \
1016 long l; \
1017 char *end; \
1018 \
1019 set_errno(0); \
1020 l = strtol(v, &end, 0); \
1021 if (get_errno() != 0 || (uintptr_t)end -\
1022 (uintptr_t)v != vlen) { \
1023 malloc_conf_error( \
1024 "Invalid conf value", \
1025 k, klen, v, vlen); \
1026 } else if (l < (ssize_t)(min) || l > \
1027 (ssize_t)(max)) { \
1028 malloc_conf_error( \
1029 "Out-of-range conf value", \
1030 k, klen, v, vlen); \
1031 } else \
1032 o = l; \
1033 continue; \
1034 }
1035 #define CONF_HANDLE_CHAR_P(o, n, d) \
1036 if (CONF_MATCH(n)) { \
1037 size_t cpylen = (vlen <= \
1038 sizeof(o)-1) ? vlen : \
1039 sizeof(o)-1; \
1040 strncpy(o, v, cpylen); \
1041 o[cpylen] = '\0'; \
1042 continue; \
1043 }
1044
1045 CONF_HANDLE_BOOL(opt_abort, "abort", true)
1046 /*
1047 * Chunks always require at least one header page,
1048 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
1049 * possibly an additional page in the presence of
1050 * redzones. In order to simplify options processing,
1051 * use a conservative bound that accommodates all these
1052 * constraints.
1053 */
1054 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
1055 LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
1056 (sizeof(size_t) << 3) - 1, true)
1057 if (strncmp("dss", k, klen) == 0) {
1058 int i;
1059 bool match = false;
1060 for (i = 0; i < dss_prec_limit; i++) {
1061 if (strncmp(dss_prec_names[i], v, vlen)
1062 == 0) {
1063 if (chunk_dss_prec_set(i)) {
1064 malloc_conf_error(
1065 "Error setting dss",
1066 k, klen, v, vlen);
1067 } else {
1068 opt_dss =
1069 dss_prec_names[i];
1070 match = true;
1071 break;
1072 }
1073 }
1074 }
1075 if (!match) {
1076 malloc_conf_error("Invalid conf value",
1077 k, klen, v, vlen);
1078 }
1079 continue;
1080 }
1081 CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1082 UINT_MAX, false)
1083 if (strncmp("purge", k, klen) == 0) {
1084 int i;
1085 bool match = false;
1086 for (i = 0; i < purge_mode_limit; i++) {
1087 if (strncmp(purge_mode_names[i], v,
1088 vlen) == 0) {
1089 opt_purge = (purge_mode_t)i;
1090 match = true;
1091 break;
1092 }
1093 }
1094 if (!match) {
1095 malloc_conf_error("Invalid conf value",
1096 k, klen, v, vlen);
1097 }
1098 continue;
1099 }
1100 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
1101 -1, (sizeof(size_t) << 3) - 1)
1102 CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
1103 NSTIME_SEC_MAX);
1104 CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
1105 if (config_fill) {
1106 if (CONF_MATCH("junk")) {
1107 if (CONF_MATCH_VALUE("true")) {
1108 opt_junk = "true";
1109 opt_junk_alloc = opt_junk_free =
1110 true;
1111 } else if (CONF_MATCH_VALUE("false")) {
1112 opt_junk = "false";
1113 opt_junk_alloc = opt_junk_free =
1114 false;
1115 } else if (CONF_MATCH_VALUE("alloc")) {
1116 opt_junk = "alloc";
1117 opt_junk_alloc = true;
1118 opt_junk_free = false;
1119 } else if (CONF_MATCH_VALUE("free")) {
1120 opt_junk = "free";
1121 opt_junk_alloc = false;
1122 opt_junk_free = true;
1123 } else {
1124 malloc_conf_error(
1125 "Invalid conf value", k,
1126 klen, v, vlen);
1127 }
1128 continue;
1129 }
1130 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
1131 0, SIZE_T_MAX, false)
1132 CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
1133 CONF_HANDLE_BOOL(opt_zero, "zero", true)
1134 }
1135 if (config_utrace) {
1136 CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
1137 }
1138 if (config_xmalloc) {
1139 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
1140 }
1141 if (config_tcache) {
1142 CONF_HANDLE_BOOL(opt_tcache, "tcache",
1143 !config_valgrind || !in_valgrind)
1144 if (CONF_MATCH("tcache")) {
1145 assert(config_valgrind && in_valgrind);
1146 if (opt_tcache) {
1147 opt_tcache = false;
1148 malloc_conf_error(
1149 "tcache cannot be enabled "
1150 "while running inside Valgrind",
1151 k, klen, v, vlen);
1152 }
1153 continue;
1154 }
1155 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
1156 "lg_tcache_max", -1,
1157 (sizeof(size_t) << 3) - 1)
1158 }
1159 if (config_prof) {
1160 CONF_HANDLE_BOOL(opt_prof, "prof", true)
1161 CONF_HANDLE_CHAR_P(opt_prof_prefix,
1162 "prof_prefix", "jeprof")
1163 CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
1164 true)
1165 CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1166 "prof_thread_active_init", true)
1167 CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1168 "lg_prof_sample", 0,
1169 (sizeof(uint64_t) << 3) - 1, true)
1170 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
1171 true)
1172 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1173 "lg_prof_interval", -1,
1174 (sizeof(uint64_t) << 3) - 1)
1175 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
1176 true)
1177 CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
1178 true)
1179 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
1180 true)
1181 }
1182 malloc_conf_error("Invalid conf pair", k, klen, v,
1183 vlen);
1184 #undef CONF_MATCH
1185 #undef CONF_HANDLE_BOOL
1186 #undef CONF_HANDLE_SIZE_T
1187 #undef CONF_HANDLE_SSIZE_T
1188 #undef CONF_HANDLE_CHAR_P
1189 }
1190 }
1191 }
1192
1193 /* init_lock must be held. */
1194 static bool
1195 malloc_init_hard_needed(void)
1196 {
1197
1198 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1199 malloc_init_recursible)) {
1200 /*
1201 * Another thread initialized the allocator before this one
1202 * acquired init_lock, or this thread is the initializing
1203 * thread, and it is recursively allocating.
1204 */
1205 return (false);
1206 }
1207 #ifdef JEMALLOC_THREADED_INIT
1208 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1209 /* Busy-wait until the initializing thread completes. */
1210 do {
1211 malloc_mutex_unlock(&init_lock);
1212 CPU_SPINWAIT;
1213 malloc_mutex_lock(&init_lock);
1214 } while (!malloc_initialized());
1215 return (false);
1216 }
1217 #endif
1218 return (true);
1219 }
1220
1221 /* init_lock must be held. */
1222 static bool
1223 malloc_init_hard_a0_locked(void)
1224 {
1225
1226 malloc_initializer = INITIALIZER;
1227
1228 if (config_prof)
1229 prof_boot0();
1230 malloc_conf_init();
1231 if (opt_stats_print) {
1232 /* Print statistics at exit. */
1233 if (atexit(stats_print_atexit) != 0) {
1234 malloc_write("<jemalloc>: Error in atexit()\n");
1235 if (opt_abort)
1236 abort();
1237 }
1238 }
1239 if (base_boot())
1240 return (true);
1241 if (chunk_boot())
1242 return (true);
1243 if (ctl_boot())
1244 return (true);
1245 if (config_prof)
1246 prof_boot1();
1247 if (arena_boot())
1248 return (true);
1249 if (config_tcache && tcache_boot())
1250 return (true);
1251 if (malloc_mutex_init(&arenas_lock))
1252 return (true);
1253 /*
1254 * Create enough scaffolding to allow recursive allocation in
1255 * malloc_ncpus().
1256 */
1257 narenas_auto = 1;
1258 narenas_total_set(narenas_auto);
1259 arenas = &a0;
1260 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1261 /*
1262 * Initialize one arena here. The rest are lazily created in
1263 * arena_choose_hard().
1264 */
1265 if (arena_init(0) == NULL)
1266 return (true);
1267 malloc_init_state = malloc_init_a0_initialized;
1268 return (false);
1269 }
1270
1271 static bool
1272 malloc_init_hard_a0(void)
1273 {
1274 bool ret;
1275
1276 malloc_mutex_lock(&init_lock);
1277 ret = malloc_init_hard_a0_locked();
1278 malloc_mutex_unlock(&init_lock);
1279 return (ret);
1280 }
1281
1282 /*
1283 * Initialize data structures which may trigger recursive allocation.
1284 *
1285 * init_lock must be held.
1286 */
1287 static bool
1288 malloc_init_hard_recursible(void)
1289 {
1290 bool ret = false;
1291
1292 malloc_init_state = malloc_init_recursible;
1293 malloc_mutex_unlock(&init_lock);
1294
1295 /* LinuxThreads' pthread_setspecific() allocates. */
1296 if (malloc_tsd_boot0()) {
1297 ret = true;
1298 goto label_return;
1299 }
1300
1301 ncpus = malloc_ncpus();
1302
1303 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
1304 && !defined(_WIN32) && !defined(__native_client__))
1305 /* LinuxThreads' pthread_atfork() allocates. */
1306 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1307 jemalloc_postfork_child) != 0) {
1308 ret = true;
1309 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1310 if (opt_abort)
1311 abort();
1312 }
1313 #endif
1314
1315 label_return:
1316 malloc_mutex_lock(&init_lock);
1317 return (ret);
1318 }
1319
1320 /* init_lock must be held. */
1321 static bool
1322 malloc_init_hard_finish(void)
1323 {
1324
1325 if (mutex_boot())
1326 return (true);
1327
1328 if (opt_narenas == 0) {
1329 /*
1330 * For SMP systems, create more than one arena per CPU by
1331 * default.
1332 */
1333 if (ncpus > 1)
1334 opt_narenas = ncpus << 2;
1335 else
1336 opt_narenas = 1;
1337 }
1338 #if defined(ANDROID_MAX_ARENAS)
1339 /* Never create more than MAX_ARENAS arenas regardless of num_cpus.
1340 * Extra arenas use more PSS and are not very useful unless
1341 * lots of threads are allocing/freeing at the same time.
1342 */
1343 if (opt_narenas > ANDROID_MAX_ARENAS)
1344 opt_narenas = ANDROID_MAX_ARENAS;
1345 #endif
1346 narenas_auto = opt_narenas;
1347 /*
1348 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1349 */
1350 if (narenas_auto > MALLOCX_ARENA_MAX) {
1351 narenas_auto = MALLOCX_ARENA_MAX;
1352 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1353 narenas_auto);
1354 }
1355 narenas_total_set(narenas_auto);
1356
1357 /* Allocate and initialize arenas. */
1358 arenas = (arena_t **)base_alloc(sizeof(arena_t *) *
1359 (MALLOCX_ARENA_MAX+1));
1360 if (arenas == NULL)
1361 return (true);
1362 /* Copy the pointer to the one arena that was already initialized. */
1363 arena_set(0, a0);
1364
1365 malloc_init_state = malloc_init_initialized;
1366 malloc_slow_flag_init();
1367
1368 return (false);
1369 }
1370
1371 static bool
1372 malloc_init_hard(void)
1373 {
1374
1375 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1376 _init_init_lock();
1377 #endif
1378 malloc_mutex_lock(&init_lock);
1379 if (!malloc_init_hard_needed()) {
1380 malloc_mutex_unlock(&init_lock);
1381 return (false);
1382 }
1383
1384 if (malloc_init_state != malloc_init_a0_initialized &&
1385 malloc_init_hard_a0_locked()) {
1386 malloc_mutex_unlock(&init_lock);
1387 return (true);
1388 }
1389
1390 if (malloc_init_hard_recursible()) {
1391 malloc_mutex_unlock(&init_lock);
1392 return (true);
1393 }
1394
1395 if (config_prof && prof_boot2()) {
1396 malloc_mutex_unlock(&init_lock);
1397 return (true);
1398 }
1399
1400 if (malloc_init_hard_finish()) {
1401 malloc_mutex_unlock(&init_lock);
1402 return (true);
1403 }
1404
1405 malloc_mutex_unlock(&init_lock);
1406 malloc_tsd_boot1();
1407 return (false);
1408 }
1409
1410 /*
1411 * End initialization functions.
1412 */
1413 /******************************************************************************/
1414 /*
1415 * Begin malloc(3)-compatible functions.
1416 */
1417
1418 static void *
1419 imalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind,
1420 prof_tctx_t *tctx, bool slow_path)
1421 {
1422 void *p;
1423
1424 if (tctx == NULL)
1425 return (NULL);
1426 if (usize <= SMALL_MAXCLASS) {
1427 szind_t ind_large = size2index(LARGE_MINCLASS);
1428 p = imalloc(tsd, LARGE_MINCLASS, ind_large, slow_path);
1429 if (p == NULL)
1430 return (NULL);
1431 arena_prof_promoted(p, usize);
1432 } else
1433 p = imalloc(tsd, usize, ind, slow_path);
1434
1435 return (p);
1436 }
1437
1438 JEMALLOC_ALWAYS_INLINE_C void *
1439 imalloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool slow_path)
1440 {
1441 void *p;
1442 prof_tctx_t *tctx;
1443
1444 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1445 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1446 p = imalloc_prof_sample(tsd, usize, ind, tctx, slow_path);
1447 else
1448 p = imalloc(tsd, usize, ind, slow_path);
1449 if (unlikely(p == NULL)) {
1450 prof_alloc_rollback(tsd, tctx, true);
1451 return (NULL);
1452 }
1453 prof_malloc(p, usize, tctx);
1454
1455 return (p);
1456 }
1457
1458 JEMALLOC_ALWAYS_INLINE_C void *
1459 imalloc_body(size_t size, tsd_t **tsd, size_t *usize, bool slow_path)
1460 {
1461 szind_t ind;
1462
1463 if (slow_path && unlikely(malloc_init()))
1464 return (NULL);
1465 *tsd = tsd_fetch();
1466 ind = size2index(size);
1467 if (unlikely(ind >= NSIZES))
1468 return (NULL);
1469
1470 if (config_stats || (config_prof && opt_prof) || (slow_path &&
1471 config_valgrind && unlikely(in_valgrind))) {
1472 *usize = index2size(ind);
1473 assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
1474 }
1475
1476 if (config_prof && opt_prof)
1477 return (imalloc_prof(*tsd, *usize, ind, slow_path));
1478
1479 return (imalloc(*tsd, size, ind, slow_path));
1480 }
1481
1482 JEMALLOC_ALWAYS_INLINE_C void
1483 imalloc_post_check(void *ret, tsd_t *tsd, size_t usize, bool slow_path)
1484 {
1485 if (unlikely(ret == NULL)) {
1486 if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
1487 malloc_write("<jemalloc>: Error in malloc(): "
1488 "out of memory\n");
1489 abort();
1490 }
1491 set_errno(ENOMEM);
1492 }
1493 if (config_stats && likely(ret != NULL)) {
1494 assert(usize == isalloc(ret, config_prof));
1495 *tsd_thread_allocatedp_get(tsd) += usize;
1496 }
1497 }
1498
1499 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1500 void JEMALLOC_NOTHROW *
1501 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
1502 je_malloc(size_t size)
1503 {
1504 void *ret;
1505 tsd_t *tsd;
1506 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1507
1508 if (size == 0)
1509 size = 1;
1510
1511 if (likely(!malloc_slow)) {
1512 /*
1513 * imalloc_body() is inlined so that fast and slow paths are
1514 * generated separately with statically known slow_path.
1515 */
1516 ret = imalloc_body(size, &tsd, &usize, false);
1517 imalloc_post_check(ret, tsd, usize, false);
1518 } else {
1519 ret = imalloc_body(size, &tsd, &usize, true);
1520 imalloc_post_check(ret, tsd, usize, true);
1521 UTRACE(0, size, ret);
1522 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
1523 }
1524
1525 return (ret);
1526 }
1527
1528 static void *
1529 imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
1530 prof_tctx_t *tctx)
1531 {
1532 void *p;
1533
1534 if (tctx == NULL)
1535 return (NULL);
1536 if (usize <= SMALL_MAXCLASS) {
1537 assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
1538 p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
1539 if (p == NULL)
1540 return (NULL);
1541 arena_prof_promoted(p, usize);
1542 } else
1543 p = ipalloc(tsd, usize, alignment, false);
1544
1545 return (p);
1546 }
1547
1548 JEMALLOC_ALWAYS_INLINE_C void *
1549 imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
1550 {
1551 void *p;
1552 prof_tctx_t *tctx;
1553
1554 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1555 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1556 p = imemalign_prof_sample(tsd, alignment, usize, tctx);
1557 else
1558 p = ipalloc(tsd, usize, alignment, false);
1559 if (unlikely(p == NULL)) {
1560 prof_alloc_rollback(tsd, tctx, true);
1561 return (NULL);
1562 }
1563 prof_malloc(p, usize, tctx);
1564
1565 return (p);
1566 }
1567
1568 JEMALLOC_ATTR(nonnull(1))
1569 static int
1570 imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
1571 {
1572 int ret;
1573 tsd_t *tsd;
1574 size_t usize;
1575 void *result;
1576
1577 assert(min_alignment != 0);
1578
1579 if (unlikely(malloc_init())) {
1580 result = NULL;
1581 goto label_oom;
1582 }
1583 tsd = tsd_fetch();
1584 if (size == 0)
1585 size = 1;
1586
1587 /* Make sure that alignment is a large enough power of 2. */
1588 if (unlikely(((alignment - 1) & alignment) != 0
1589 || (alignment < min_alignment))) {
1590 if (config_xmalloc && unlikely(opt_xmalloc)) {
1591 malloc_write("<jemalloc>: Error allocating "
1592 "aligned memory: invalid alignment\n");
1593 abort();
1594 }
1595 result = NULL;
1596 ret = EINVAL;
1597 goto label_return;
1598 }
1599
1600 usize = sa2u(size, alignment);
1601 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
1602 result = NULL;
1603 goto label_oom;
1604 }
1605
1606 if (config_prof && opt_prof)
1607 result = imemalign_prof(tsd, alignment, usize);
1608 else
1609 result = ipalloc(tsd, usize, alignment, false);
1610 if (unlikely(result == NULL))
1611 goto label_oom;
1612 assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
1613
1614 *memptr = result;
1615 ret = 0;
1616 label_return:
1617 if (config_stats && likely(result != NULL)) {
1618 assert(usize == isalloc(result, config_prof));
1619 *tsd_thread_allocatedp_get(tsd) += usize;
1620 }
1621 UTRACE(0, size, result);
1622 return (ret);
1623 label_oom:
1624 assert(result == NULL);
1625 if (config_xmalloc && unlikely(opt_xmalloc)) {
1626 malloc_write("<jemalloc>: Error allocating aligned memory: "
1627 "out of memory\n");
1628 abort();
1629 }
1630 ret = ENOMEM;
1631 goto label_return;
1632 }
1633
1634 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
1635 JEMALLOC_ATTR(nonnull(1))
1636 je_posix_memalign(void **memptr, size_t alignment, size_t size)
1637 {
1638 int ret = imemalign(memptr, alignment, size, sizeof(void *));
1639 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
1640 config_prof), false);
1641 return (ret);
1642 }
1643
1644 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1645 void JEMALLOC_NOTHROW *
1646 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
1647 je_aligned_alloc(size_t alignment, size_t size)
1648 {
1649 void *ret;
1650 int err;
1651
1652 if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
1653 ret = NULL;
1654 set_errno(err);
1655 }
1656 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
1657 false);
1658 return (ret);
1659 }
1660
1661 static void *
1662 icalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, prof_tctx_t *tctx)
1663 {
1664 void *p;
1665
1666 if (tctx == NULL)
1667 return (NULL);
1668 if (usize <= SMALL_MAXCLASS) {
1669 szind_t ind_large = size2index(LARGE_MINCLASS);
1670 p = icalloc(tsd, LARGE_MINCLASS, ind_large);
1671 if (p == NULL)
1672 return (NULL);
1673 arena_prof_promoted(p, usize);
1674 } else
1675 p = icalloc(tsd, usize, ind);
1676
1677 return (p);
1678 }
1679
1680 JEMALLOC_ALWAYS_INLINE_C void *
1681 icalloc_prof(tsd_t *tsd, size_t usize, szind_t ind)
1682 {
1683 void *p;
1684 prof_tctx_t *tctx;
1685
1686 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1687 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1688 p = icalloc_prof_sample(tsd, usize, ind, tctx);
1689 else
1690 p = icalloc(tsd, usize, ind);
1691 if (unlikely(p == NULL)) {
1692 prof_alloc_rollback(tsd, tctx, true);
1693 return (NULL);
1694 }
1695 prof_malloc(p, usize, tctx);
1696
1697 return (p);
1698 }
1699
1700 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1701 void JEMALLOC_NOTHROW *
1702 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
1703 je_calloc(size_t num, size_t size)
1704 {
1705 void *ret;
1706 tsd_t *tsd;
1707 size_t num_size;
1708 szind_t ind;
1709 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1710
1711 if (unlikely(malloc_init())) {
1712 num_size = 0;
1713 ret = NULL;
1714 goto label_return;
1715 }
1716 tsd = tsd_fetch();
1717
1718 num_size = num * size;
1719 if (unlikely(num_size == 0)) {
1720 if (num == 0 || size == 0)
1721 num_size = 1;
1722 else {
1723 ret = NULL;
1724 goto label_return;
1725 }
1726 /*
1727 * Try to avoid division here. We know that it isn't possible to
1728 * overflow during multiplication if neither operand uses any of the
1729 * most significant half of the bits in a size_t.
1730 */
1731 } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
1732 2))) && (num_size / size != num))) {
1733 /* size_t overflow. */
1734 ret = NULL;
1735 goto label_return;
1736 }
1737
1738 ind = size2index(num_size);
1739 if (unlikely(ind >= NSIZES)) {
1740 ret = NULL;
1741 goto label_return;
1742 }
1743 if (config_prof && opt_prof) {
1744 usize = index2size(ind);
1745 ret = icalloc_prof(tsd, usize, ind);
1746 } else {
1747 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
1748 usize = index2size(ind);
1749 ret = icalloc(tsd, num_size, ind);
1750 }
1751
1752 label_return:
1753 if (unlikely(ret == NULL)) {
1754 if (config_xmalloc && unlikely(opt_xmalloc)) {
1755 malloc_write("<jemalloc>: Error in calloc(): out of "
1756 "memory\n");
1757 abort();
1758 }
1759 set_errno(ENOMEM);
1760 }
1761 if (config_stats && likely(ret != NULL)) {
1762 assert(usize == isalloc(ret, config_prof));
1763 *tsd_thread_allocatedp_get(tsd) += usize;
1764 }
1765 UTRACE(0, num_size, ret);
1766 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1767 return (ret);
1768 }
1769
1770 static void *
1771 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
1772 prof_tctx_t *tctx)
1773 {
1774 void *p;
1775
1776 if (tctx == NULL)
1777 return (NULL);
1778 if (usize <= SMALL_MAXCLASS) {
1779 p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
1780 if (p == NULL)
1781 return (NULL);
1782 arena_prof_promoted(p, usize);
1783 } else
1784 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1785
1786 return (p);
1787 }
1788
1789 JEMALLOC_ALWAYS_INLINE_C void *
1790 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
1791 {
1792 void *p;
1793 bool prof_active;
1794 prof_tctx_t *old_tctx, *tctx;
1795
1796 prof_active = prof_active_get_unlocked();
1797 old_tctx = prof_tctx_get(old_ptr);
1798 tctx = prof_alloc_prep(tsd, usize, prof_active, true);
1799 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1800 p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
1801 else
1802 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1803 if (unlikely(p == NULL)) {
1804 prof_alloc_rollback(tsd, tctx, true);
1805 return (NULL);
1806 }
1807 prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
1808 old_tctx);
1809
1810 return (p);
1811 }
1812
1813 JEMALLOC_INLINE_C void
1814 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
1815 {
1816 size_t usize;
1817 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1818
1819 assert(ptr != NULL);
1820 assert(malloc_initialized() || IS_INITIALIZER);
1821
1822 if (config_prof && opt_prof) {
1823 usize = isalloc(ptr, config_prof);
1824 prof_free(tsd, ptr, usize);
1825 } else if (config_stats || config_valgrind)
1826 usize = isalloc(ptr, config_prof);
1827 if (config_stats)
1828 *tsd_thread_deallocatedp_get(tsd) += usize;
1829
1830 if (likely(!slow_path))
1831 iqalloc(tsd, ptr, tcache, false);
1832 else {
1833 if (config_valgrind && unlikely(in_valgrind))
1834 rzsize = p2rz(ptr);
1835 iqalloc(tsd, ptr, tcache, true);
1836 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1837 }
1838 }
1839
1840 JEMALLOC_INLINE_C void
1841 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
1842 {
1843 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1844
1845 assert(ptr != NULL);
1846 assert(malloc_initialized() || IS_INITIALIZER);
1847
1848 if (config_prof && opt_prof)
1849 prof_free(tsd, ptr, usize);
1850 if (config_stats)
1851 *tsd_thread_deallocatedp_get(tsd) += usize;
1852 if (config_valgrind && unlikely(in_valgrind))
1853 rzsize = p2rz(ptr);
1854 isqalloc(tsd, ptr, usize, tcache);
1855 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1856 }
1857
1858 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1859 void JEMALLOC_NOTHROW *
1860 JEMALLOC_ALLOC_SIZE(2)
1861 je_realloc(void *ptr, size_t size)
1862 {
1863 void *ret;
1864 tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
1865 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1866 size_t old_usize = 0;
1867 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1868
1869 if (unlikely(size == 0)) {
1870 if (ptr != NULL) {
1871 /* realloc(ptr, 0) is equivalent to free(ptr). */
1872 UTRACE(ptr, 0, 0);
1873 tsd = tsd_fetch();
1874 ifree(tsd, ptr, tcache_get(tsd, false), true);
1875 return (NULL);
1876 }
1877 size = 1;
1878 }
1879
1880 if (likely(ptr != NULL)) {
1881 assert(malloc_initialized() || IS_INITIALIZER);
1882 malloc_thread_init();
1883 tsd = tsd_fetch();
1884
1885 old_usize = isalloc(ptr, config_prof);
1886 if (config_valgrind && unlikely(in_valgrind))
1887 old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
1888
1889 if (config_prof && opt_prof) {
1890 usize = s2u(size);
1891 ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
1892 NULL : irealloc_prof(tsd, ptr, old_usize, usize);
1893 } else {
1894 if (config_stats || (config_valgrind &&
1895 unlikely(in_valgrind)))
1896 usize = s2u(size);
1897 ret = iralloc(tsd, ptr, old_usize, size, 0, false);
1898 }
1899 } else {
1900 /* realloc(NULL, size) is equivalent to malloc(size). */
1901 if (likely(!malloc_slow))
1902 ret = imalloc_body(size, &tsd, &usize, false);
1903 else
1904 ret = imalloc_body(size, &tsd, &usize, true);
1905 }
1906
1907 if (unlikely(ret == NULL)) {
1908 if (config_xmalloc && unlikely(opt_xmalloc)) {
1909 malloc_write("<jemalloc>: Error in realloc(): "
1910 "out of memory\n");
1911 abort();
1912 }
1913 set_errno(ENOMEM);
1914 }
1915 if (config_stats && likely(ret != NULL)) {
1916 assert(usize == isalloc(ret, config_prof));
1917 *tsd_thread_allocatedp_get(tsd) += usize;
1918 *tsd_thread_deallocatedp_get(tsd) += old_usize;
1919 }
1920 UTRACE(ptr, size, ret);
1921 JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
1922 old_rzsize, true, false);
1923 return (ret);
1924 }
1925
1926 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
1927 je_free(void *ptr)
1928 {
1929
1930 UTRACE(ptr, 0, 0);
1931 if (likely(ptr != NULL)) {
1932 tsd_t *tsd = tsd_fetch();
1933 if (likely(!malloc_slow))
1934 ifree(tsd, ptr, tcache_get(tsd, false), false);
1935 else
1936 ifree(tsd, ptr, tcache_get(tsd, false), true);
1937 }
1938 }
1939
1940 /*
1941 * End malloc(3)-compatible functions.
1942 */
1943 /******************************************************************************/
1944 /*
1945 * Begin non-standard override functions.
1946 */
1947
1948 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1949 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1950 void JEMALLOC_NOTHROW *
1951 JEMALLOC_ATTR(malloc)
1952 je_memalign(size_t alignment, size_t size)
1953 {
1954 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1955 if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
1956 ret = NULL;
1957 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1958 return (ret);
1959 }
1960 #endif
1961
1962 #ifdef JEMALLOC_OVERRIDE_VALLOC
1963 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1964 void JEMALLOC_NOTHROW *
1965 JEMALLOC_ATTR(malloc)
1966 je_valloc(size_t size)
1967 {
1968 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1969 if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
1970 ret = NULL;
1971 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1972 return (ret);
1973 }
1974 #endif
1975
1976 /*
1977 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1978 * #define je_malloc malloc
1979 */
1980 #define malloc_is_malloc 1
1981 #define is_malloc_(a) malloc_is_ ## a
1982 #define is_malloc(a) is_malloc_(a)
1983
1984 #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
1985 /*
1986 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1987 * to inconsistently reference libc's malloc(3)-compatible functions
1988 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1989 *
1990 * These definitions interpose hooks in glibc. The functions are actually
1991 * passed an extra argument for the caller return address, which will be
1992 * ignored.
1993 */
1994 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
1995 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
1996 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
1997 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
1998 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
1999 je_memalign;
2000 # endif
2001 #endif
2002
2003 /*
2004 * End non-standard override functions.
2005 */
2006 /******************************************************************************/
2007 /*
2008 * Begin non-standard functions.
2009 */
2010
2011 JEMALLOC_ALWAYS_INLINE_C bool
2012 imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
2013 size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
2014 {
2015
2016 if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
2017 *alignment = 0;
2018 *usize = s2u(size);
2019 } else {
2020 *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2021 *usize = sa2u(size, *alignment);
2022 }
2023 if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
2024 return (true);
2025 *zero = MALLOCX_ZERO_GET(flags);
2026 if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2027 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2028 *tcache = NULL;
2029 else
2030 *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2031 } else
2032 *tcache = tcache_get(tsd, true);
2033 if ((flags & MALLOCX_ARENA_MASK) != 0) {
2034 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2035 *arena = arena_get(arena_ind, true);
2036 if (unlikely(*arena == NULL))
2037 return (true);
2038 } else
2039 *arena = NULL;
2040 return (false);
2041 }
2042
2043 JEMALLOC_ALWAYS_INLINE_C bool
2044 imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
2045 size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
2046 {
2047
2048 if (likely(flags == 0)) {
2049 *usize = s2u(size);
2050 if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
2051 return (true);
2052 *alignment = 0;
2053 *zero = false;
2054 *tcache = tcache_get(tsd, true);
2055 *arena = NULL;
2056 return (false);
2057 } else {
2058 return (imallocx_flags_decode_hard(tsd, size, flags, usize,
2059 alignment, zero, tcache, arena));
2060 }
2061 }
2062
2063 JEMALLOC_ALWAYS_INLINE_C void *
2064 imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
2065 tcache_t *tcache, arena_t *arena)
2066 {
2067 szind_t ind;
2068
2069 if (unlikely(alignment != 0))
2070 return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
2071 ind = size2index(usize);
2072 assert(ind < NSIZES);
2073 if (unlikely(zero))
2074 return (icalloct(tsd, usize, ind, tcache, arena));
2075 return (imalloct(tsd, usize, ind, tcache, arena));
2076 }
2077
2078 static void *
2079 imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
2080 tcache_t *tcache, arena_t *arena)
2081 {
2082 void *p;
2083
2084 if (usize <= SMALL_MAXCLASS) {
2085 assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
2086 sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
2087 p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache,
2088 arena);
2089 if (p == NULL)
2090 return (NULL);
2091 arena_prof_promoted(p, usize);
2092 } else
2093 p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena);
2094
2095 return (p);
2096 }
2097
2098 JEMALLOC_ALWAYS_INLINE_C void *
2099 imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
2100 {
2101 void *p;
2102 size_t alignment;
2103 bool zero;
2104 tcache_t *tcache;
2105 arena_t *arena;
2106 prof_tctx_t *tctx;
2107
2108 if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
2109 &zero, &tcache, &arena)))
2110 return (NULL);
2111 tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
2112 if (likely((uintptr_t)tctx == (uintptr_t)1U))
2113 p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
2114 else if ((uintptr_t)tctx > (uintptr_t)1U) {
2115 p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache,
2116 arena);
2117 } else
2118 p = NULL;
2119 if (unlikely(p == NULL)) {
2120 prof_alloc_rollback(tsd, tctx, true);
2121 return (NULL);
2122 }
2123 prof_malloc(p, *usize, tctx);
2124
2125 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2126 return (p);
2127 }
2128
2129 JEMALLOC_ALWAYS_INLINE_C void *
2130 imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
2131 {
2132 void *p;
2133 size_t alignment;
2134 bool zero;
2135 tcache_t *tcache;
2136 arena_t *arena;
2137
2138 if (likely(flags == 0)) {
2139 szind_t ind = size2index(size);
2140 if (unlikely(ind >= NSIZES))
2141 return (NULL);
2142 if (config_stats || (config_valgrind &&
2143 unlikely(in_valgrind))) {
2144 *usize = index2size(ind);
2145 assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
2146 }
2147 return (imalloc(tsd, size, ind, true));
2148 }
2149
2150 if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
2151 &alignment, &zero, &tcache, &arena)))
2152 return (NULL);
2153 p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
2154 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2155 return (p);
2156 }
2157
2158 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2159 void JEMALLOC_NOTHROW *
2160 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2161 je_mallocx(size_t size, int flags)
2162 {
2163 tsd_t *tsd;
2164 void *p;
2165 size_t usize;
2166
2167 assert(size != 0);
2168
2169 if (unlikely(malloc_init()))
2170 goto label_oom;
2171 tsd = tsd_fetch();
2172
2173 if (config_prof && opt_prof)
2174 p = imallocx_prof(tsd, size, flags, &usize);
2175 else
2176 p = imallocx_no_prof(tsd, size, flags, &usize);
2177 if (unlikely(p == NULL))
2178 goto label_oom;
2179
2180 if (config_stats) {
2181 assert(usize == isalloc(p, config_prof));
2182 *tsd_thread_allocatedp_get(tsd) += usize;
2183 }
2184 UTRACE(0, size, p);
2185 JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
2186 return (p);
2187 label_oom:
2188 if (config_xmalloc && unlikely(opt_xmalloc)) {
2189 malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
2190 abort();
2191 }
2192 UTRACE(0, size, 0);
2193 return (NULL);
2194 }
2195
2196 static void *
2197 irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
2198 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2199 prof_tctx_t *tctx)
2200 {
2201 void *p;
2202
2203 if (tctx == NULL)
2204 return (NULL);
2205 if (usize <= SMALL_MAXCLASS) {
2206 p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
2207 zero, tcache, arena);
2208 if (p == NULL)
2209 return (NULL);
2210 arena_prof_promoted(p, usize);
2211 } else {
2212 p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
2213 tcache, arena);
2214 }
2215
2216 return (p);
2217 }
2218
2219 JEMALLOC_ALWAYS_INLINE_C void *
2220 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2221 size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2222 arena_t *arena)
2223 {
2224 void *p;
2225 bool prof_active;
2226 prof_tctx_t *old_tctx, *tctx;
2227
2228 prof_active = prof_active_get_unlocked();
2229 old_tctx = prof_tctx_get(old_ptr);
2230 tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
2231 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2232 p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
2233 alignment, zero, tcache, arena, tctx);
2234 } else {
2235 p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
2236 tcache, arena);
2237 }
2238 if (unlikely(p == NULL)) {
2239 prof_alloc_rollback(tsd, tctx, true);
2240 return (NULL);
2241 }
2242
2243 if (p == old_ptr && alignment != 0) {
2244 /*
2245 * The allocation did not move, so it is possible that the size
2246 * class is smaller than would guarantee the requested
2247 * alignment, and that the alignment constraint was
2248 * serendipitously satisfied. Additionally, old_usize may not
2249 * be the same as the current usize because of in-place large
2250 * reallocation. Therefore, query the actual value of usize.
2251 */
2252 *usize = isalloc(p, config_prof);
2253 }
2254 prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
2255 old_usize, old_tctx);
2256
2257 return (p);
2258 }
2259
2260 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2261 void JEMALLOC_NOTHROW *
2262 JEMALLOC_ALLOC_SIZE(2)
2263 je_rallocx(void *ptr, size_t size, int flags)
2264 {
2265 void *p;
2266 tsd_t *tsd;
2267 size_t usize;
2268 size_t old_usize;
2269 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2270 size_t alignment = MALLOCX_ALIGN_GET(flags);
2271 bool zero = flags & MALLOCX_ZERO;
2272 arena_t *arena;
2273 tcache_t *tcache;
2274
2275 assert(ptr != NULL);
2276 assert(size != 0);
2277 assert(malloc_initialized() || IS_INITIALIZER);
2278 malloc_thread_init();
2279 tsd = tsd_fetch();
2280
2281 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2282 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2283 arena = arena_get(arena_ind, true);
2284 if (unlikely(arena == NULL))
2285 goto label_oom;
2286 } else
2287 arena = NULL;
2288
2289 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2290 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2291 tcache = NULL;
2292 else
2293 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2294 } else
2295 tcache = tcache_get(tsd, true);
2296
2297 old_usize = isalloc(ptr, config_prof);
2298 if (config_valgrind && unlikely(in_valgrind))
2299 old_rzsize = u2rz(old_usize);
2300
2301 if (config_prof && opt_prof) {
2302 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
2303 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
2304 goto label_oom;
2305 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2306 zero, tcache, arena);
2307 if (unlikely(p == NULL))
2308 goto label_oom;
2309 } else {
2310 p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
2311 tcache, arena);
2312 if (unlikely(p == NULL))
2313 goto label_oom;
2314 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
2315 usize = isalloc(p, config_prof);
2316 }
2317 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2318
2319 if (config_stats) {
2320 *tsd_thread_allocatedp_get(tsd) += usize;
2321 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2322 }
2323 UTRACE(ptr, size, p);
2324 JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
2325 old_rzsize, false, zero);
2326 return (p);
2327 label_oom:
2328 if (config_xmalloc && unlikely(opt_xmalloc)) {
2329 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2330 abort();
2331 }
2332 UTRACE(ptr, size, 0);
2333 return (NULL);
2334 }
2335
2336 JEMALLOC_ALWAYS_INLINE_C size_t
2337 ixallocx_helper(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2338 size_t extra, size_t alignment, bool zero)
2339 {
2340 size_t usize;
2341
2342 if (ixalloc(tsd, ptr, old_usize, size, extra, alignment, zero))
2343 return (old_usize);
2344 usize = isalloc(ptr, config_prof);
2345
2346 return (usize);
2347 }
2348
2349 static size_t
2350 ixallocx_prof_sample(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2351 size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
2352 {
2353 size_t usize;
2354
2355 if (tctx == NULL)
2356 return (old_usize);
2357 usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, alignment,
2358 zero);
2359
2360 return (usize);
2361 }
2362
2363 JEMALLOC_ALWAYS_INLINE_C size_t
2364 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2365 size_t extra, size_t alignment, bool zero)
2366 {
2367 size_t usize_max, usize;
2368 bool prof_active;
2369 prof_tctx_t *old_tctx, *tctx;
2370
2371 prof_active = prof_active_get_unlocked();
2372 old_tctx = prof_tctx_get(ptr);
2373 /*
2374 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2375 * Therefore, compute its maximum possible value and use that in
2376 * prof_alloc_prep() to decide whether to capture a backtrace.
2377 * prof_realloc() will use the actual usize to decide whether to sample.
2378 */
2379 if (alignment == 0) {
2380 usize_max = s2u(size+extra);
2381 assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS);
2382 } else {
2383 usize_max = sa2u(size+extra, alignment);
2384 if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
2385 /*
2386 * usize_max is out of range, and chances are that
2387 * allocation will fail, but use the maximum possible
2388 * value and carry on with prof_alloc_prep(), just in
2389 * case allocation succeeds.
2390 */
2391 usize_max = HUGE_MAXCLASS;
2392 }
2393 }
2394 tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
2395
2396 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2397 usize = ixallocx_prof_sample(tsd, ptr, old_usize, size, extra,
2398 alignment, zero, tctx);
2399 } else {
2400 usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
2401 alignment, zero);
2402 }
2403 if (usize == old_usize) {
2404 prof_alloc_rollback(tsd, tctx, false);
2405 return (usize);
2406 }
2407 prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
2408 old_tctx);
2409
2410 return (usize);
2411 }
2412
2413 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2414 je_xallocx(void *ptr, size_t size, size_t extra, int flags)
2415 {
2416 tsd_t *tsd;
2417 size_t usize, old_usize;
2418 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2419 size_t alignment = MALLOCX_ALIGN_GET(flags);
2420 bool zero = flags & MALLOCX_ZERO;
2421
2422 assert(ptr != NULL);
2423 assert(size != 0);
2424 assert(SIZE_T_MAX - size >= extra);
2425 assert(malloc_initialized() || IS_INITIALIZER);
2426 malloc_thread_init();
2427 tsd = tsd_fetch();
2428
2429 old_usize = isalloc(ptr, config_prof);
2430
2431 /*
2432 * The API explicitly absolves itself of protecting against (size +
2433 * extra) numerical overflow, but we may need to clamp extra to avoid
2434 * exceeding HUGE_MAXCLASS.
2435 *
2436 * Ordinarily, size limit checking is handled deeper down, but here we
2437 * have to check as part of (size + extra) clamping, since we need the
2438 * clamped value in the above helper functions.
2439 */
2440 if (unlikely(size > HUGE_MAXCLASS)) {
2441 usize = old_usize;
2442 goto label_not_resized;
2443 }
2444 if (unlikely(HUGE_MAXCLASS - size < extra))
2445 extra = HUGE_MAXCLASS - size;
2446
2447 if (config_valgrind && unlikely(in_valgrind))
2448 old_rzsize = u2rz(old_usize);
2449
2450 if (config_prof && opt_prof) {
2451 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2452 alignment, zero);
2453 } else {
2454 usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
2455 alignment, zero);
2456 }
2457 if (unlikely(usize == old_usize))
2458 goto label_not_resized;
2459
2460 if (config_stats) {
2461 *tsd_thread_allocatedp_get(tsd) += usize;
2462 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2463 }
2464 JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
2465 old_rzsize, false, zero);
2466 label_not_resized:
2467 UTRACE(ptr, size, ptr);
2468 return (usize);
2469 }
2470
2471 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2472 JEMALLOC_ATTR(pure)
2473 je_sallocx(const void *ptr, int flags)
2474 {
2475 size_t usize;
2476
2477 assert(malloc_initialized() || IS_INITIALIZER);
2478 malloc_thread_init();
2479
2480 if (config_ivsalloc)
2481 usize = ivsalloc(ptr, config_prof);
2482 else
2483 usize = isalloc(ptr, config_prof);
2484
2485 return (usize);
2486 }
2487
2488 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2489 je_dallocx(void *ptr, int flags)
2490 {
2491 tsd_t *tsd;
2492 tcache_t *tcache;
2493
2494 assert(ptr != NULL);
2495 assert(malloc_initialized() || IS_INITIALIZER);
2496
2497 tsd = tsd_fetch();
2498 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2499 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2500 tcache = NULL;
2501 else
2502 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2503 } else
2504 tcache = tcache_get(tsd, false);
2505
2506 UTRACE(ptr, 0, 0);
2507 ifree(tsd_fetch(), ptr, tcache, true);
2508 }
2509
2510 JEMALLOC_ALWAYS_INLINE_C size_t
2511 inallocx(size_t size, int flags)
2512 {
2513 size_t usize;
2514
2515 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
2516 usize = s2u(size);
2517 else
2518 usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2519 return (usize);
2520 }
2521
2522 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2523 je_sdallocx(void *ptr, size_t size, int flags)
2524 {
2525 tsd_t *tsd;
2526 tcache_t *tcache;
2527 size_t usize;
2528
2529 assert(ptr != NULL);
2530 assert(malloc_initialized() || IS_INITIALIZER);
2531 usize = inallocx(size, flags);
2532 assert(usize == isalloc(ptr, config_prof));
2533
2534 tsd = tsd_fetch();
2535 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2536 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2537 tcache = NULL;
2538 else
2539 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2540 } else
2541 tcache = tcache_get(tsd, false);
2542
2543 UTRACE(ptr, 0, 0);
2544 isfree(tsd, ptr, usize, tcache);
2545 }
2546
2547 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2548 JEMALLOC_ATTR(pure)
2549 je_nallocx(size_t size, int flags)
2550 {
2551 size_t usize;
2552
2553 assert(size != 0);
2554
2555 if (unlikely(malloc_init()))
2556 return (0);
2557
2558 usize = inallocx(size, flags);
2559 if (unlikely(usize > HUGE_MAXCLASS))
2560 return (0);
2561
2562 return (usize);
2563 }
2564
2565 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2566 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
2567 size_t newlen)
2568 {
2569
2570 if (unlikely(malloc_init()))
2571 return (EAGAIN);
2572
2573 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
2574 }
2575
2576 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2577 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
2578 {
2579
2580 if (unlikely(malloc_init()))
2581 return (EAGAIN);
2582
2583 return (ctl_nametomib(name, mibp, miblenp));
2584 }
2585
2586 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2587 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
2588 void *newp, size_t newlen)
2589 {
2590
2591 if (unlikely(malloc_init()))
2592 return (EAGAIN);
2593
2594 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
2595 }
2596
2597 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2598 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
2599 const char *opts)
2600 {
2601
2602 stats_print(write_cb, cbopaque, opts);
2603 }
2604
2605 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2606 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
2607 {
2608 size_t ret;
2609
2610 assert(malloc_initialized() || IS_INITIALIZER);
2611 malloc_thread_init();
2612
2613 if (config_ivsalloc)
2614 ret = ivsalloc(ptr, config_prof);
2615 else
2616 ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
2617
2618 return (ret);
2619 }
2620
2621 /*
2622 * End non-standard functions.
2623 */
2624 /******************************************************************************/
2625 /*
2626 * The following functions are used by threading libraries for protection of
2627 * malloc during fork().
2628 */
2629
2630 /*
2631 * If an application creates a thread before doing any allocation in the main
2632 * thread, then calls fork(2) in the main thread followed by memory allocation
2633 * in the child process, a race can occur that results in deadlock within the
2634 * child: the main thread may have forked while the created thread had
2635 * partially initialized the allocator. Ordinarily jemalloc prevents
2636 * fork/malloc races via the following functions it registers during
2637 * initialization using pthread_atfork(), but of course that does no good if
2638 * the allocator isn't fully initialized at fork time. The following library
2639 * constructor is a partial solution to this problem. It may still be possible
2640 * to trigger the deadlock described above, but doing so would involve forking
2641 * via a library constructor that runs before jemalloc's runs.
2642 */
2643 JEMALLOC_ATTR(constructor)
2644 static void
2645 jemalloc_constructor(void)
2646 {
2647
2648 malloc_init();
2649 }
2650
2651 #ifndef JEMALLOC_MUTEX_INIT_CB
2652 void
2653 jemalloc_prefork(void)
2654 #else
2655 JEMALLOC_EXPORT void
2656 _malloc_prefork(void)
2657 #endif
2658 {
2659 unsigned i, narenas;
2660
2661 #ifdef JEMALLOC_MUTEX_INIT_CB
2662 if (!malloc_initialized())
2663 return;
2664 #endif
2665 assert(malloc_initialized());
2666
2667 /* Acquire all mutexes in a safe order. */
2668 ctl_prefork();
2669 prof_prefork();
2670 malloc_mutex_prefork(&arenas_lock);
2671 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2672 arena_t *arena;
2673
2674 if ((arena = arena_get(i, false)) != NULL)
2675 arena_prefork(arena);
2676 }
2677 chunk_prefork();
2678 base_prefork();
2679 }
2680
2681 #ifndef JEMALLOC_MUTEX_INIT_CB
2682 void
2683 jemalloc_postfork_parent(void)
2684 #else
2685 JEMALLOC_EXPORT void
2686 _malloc_postfork(void)
2687 #endif
2688 {
2689 unsigned i, narenas;
2690
2691 #ifdef JEMALLOC_MUTEX_INIT_CB
2692 if (!malloc_initialized())
2693 return;
2694 #endif
2695 assert(malloc_initialized());
2696
2697 /* Release all mutexes, now that fork() has completed. */
2698 base_postfork_parent();
2699 chunk_postfork_parent();
2700 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2701 arena_t *arena;
2702
2703 if ((arena = arena_get(i, false)) != NULL)
2704 arena_postfork_parent(arena);
2705 }
2706 malloc_mutex_postfork_parent(&arenas_lock);
2707 prof_postfork_parent();
2708 ctl_postfork_parent();
2709 }
2710
2711 void
2712 jemalloc_postfork_child(void)
2713 {
2714 unsigned i, narenas;
2715
2716 assert(malloc_initialized());
2717
2718 /* Release all mutexes, now that fork() has completed. */
2719 base_postfork_child();
2720 chunk_postfork_child();
2721 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2722 arena_t *arena;
2723
2724 if ((arena = arena_get(i, false)) != NULL)
2725 arena_postfork_child(arena);
2726 }
2727 malloc_mutex_postfork_child(&arenas_lock);
2728 prof_postfork_child();
2729 ctl_postfork_child();
2730 }
2731
2732 /******************************************************************************/
2733
2734 /* ANDROID extension */
2735 arena_t * a0get(void)
2736 {
2737 assert(a0 != NULL);
2738 return (a0);
2739 }
2740
2741 #include "android_je_iterate.c"
2742 #include "android_je_mallinfo.c"
2743 /* End ANDROID extension */
2744