1 #define JEMALLOC_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/atomic.h"
7 #include "jemalloc/internal/ctl.h"
8 #include "jemalloc/internal/extent_dss.h"
9 #include "jemalloc/internal/extent_mmap.h"
10 #include "jemalloc/internal/jemalloc_internal_types.h"
11 #include "jemalloc/internal/log.h"
12 #include "jemalloc/internal/malloc_io.h"
13 #include "jemalloc/internal/mutex.h"
14 #include "jemalloc/internal/rtree.h"
15 #include "jemalloc/internal/size_classes.h"
16 #include "jemalloc/internal/spin.h"
17 #include "jemalloc/internal/sz.h"
18 #include "jemalloc/internal/ticker.h"
19 #include "jemalloc/internal/util.h"
20
21 /******************************************************************************/
22 /* Data. */
23
24 /* Runtime configuration options. */
25 const char *je_malloc_conf
26 #ifndef _WIN32
27 JEMALLOC_ATTR(weak)
28 #endif
29 ;
30 bool opt_abort =
31 #ifdef JEMALLOC_DEBUG
32 true
33 #else
34 false
35 #endif
36 ;
37 bool opt_abort_conf =
38 #ifdef JEMALLOC_DEBUG
39 true
40 #else
41 false
42 #endif
43 ;
44 const char *opt_junk =
45 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
46 "true"
47 #else
48 "false"
49 #endif
50 ;
51 bool opt_junk_alloc =
52 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
53 true
54 #else
55 false
56 #endif
57 ;
58 bool opt_junk_free =
59 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
60 true
61 #else
62 false
63 #endif
64 ;
65
66 bool opt_utrace = false;
67 bool opt_xmalloc = false;
68 bool opt_zero = false;
69 unsigned opt_narenas = 0;
70
71 unsigned ncpus;
72
73 /* Protects arenas initialization. */
74 malloc_mutex_t arenas_lock;
75 /*
76 * Arenas that are used to service external requests. Not all elements of the
77 * arenas array are necessarily used; arenas are created lazily as needed.
78 *
79 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
80 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
81 * takes some action to create them and allocate from them.
82 *
83 * Points to an arena_t.
84 */
85 JEMALLOC_ALIGNED(CACHELINE)
86 atomic_p_t arenas[MALLOCX_ARENA_LIMIT];
87 static atomic_u_t narenas_total; /* Use narenas_total_*(). */
88 static arena_t *a0; /* arenas[0]; read-only after initialization. */
89 unsigned narenas_auto; /* Read-only after initialization. */
90
91 typedef enum {
92 malloc_init_uninitialized = 3,
93 malloc_init_a0_initialized = 2,
94 malloc_init_recursible = 1,
95 malloc_init_initialized = 0 /* Common case --> jnz. */
96 } malloc_init_t;
97 static malloc_init_t malloc_init_state = malloc_init_uninitialized;
98
99 /* False should be the common case. Set to true to trigger initialization. */
100 bool malloc_slow = true;
101
102 /* When malloc_slow is true, set the corresponding bits for sanity check. */
103 enum {
104 flag_opt_junk_alloc = (1U),
105 flag_opt_junk_free = (1U << 1),
106 flag_opt_zero = (1U << 2),
107 flag_opt_utrace = (1U << 3),
108 flag_opt_xmalloc = (1U << 4)
109 };
110 static uint8_t malloc_slow_flags;
111
112 #ifdef JEMALLOC_THREADED_INIT
113 /* Used to let the initializing thread recursively allocate. */
114 # define NO_INITIALIZER ((pthread_t)0)
115 # define INITIALIZER pthread_self()
116 # define IS_INITIALIZER (malloc_initializer == pthread_self())
117 static pthread_t malloc_initializer = NO_INITIALIZER;
118 #else
119 # define NO_INITIALIZER false
120 # define INITIALIZER true
121 # define IS_INITIALIZER malloc_initializer
122 static bool malloc_initializer = NO_INITIALIZER;
123 #endif
124
125 /* Used to avoid initialization races. */
126 #ifdef _WIN32
127 #if _WIN32_WINNT >= 0x0600
128 static malloc_mutex_t init_lock = SRWLOCK_INIT;
129 #else
130 static malloc_mutex_t init_lock;
131 static bool init_lock_initialized = false;
132
JEMALLOC_ATTR(constructor)133 JEMALLOC_ATTR(constructor)
134 static void WINAPI
135 _init_init_lock(void) {
136 /*
137 * If another constructor in the same binary is using mallctl to e.g.
138 * set up extent hooks, it may end up running before this one, and
139 * malloc_init_hard will crash trying to lock the uninitialized lock. So
140 * we force an initialization of the lock in malloc_init_hard as well.
141 * We don't try to care about atomicity of the accessed to the
142 * init_lock_initialized boolean, since it really only matters early in
143 * the process creation, before any separate thread normally starts
144 * doing anything.
145 */
146 if (!init_lock_initialized) {
147 malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
148 malloc_mutex_rank_exclusive);
149 }
150 init_lock_initialized = true;
151 }
152
153 #ifdef _MSC_VER
154 # pragma section(".CRT$XCU", read)
155 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
156 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
157 #endif
158 #endif
159 #else
160 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
161 #endif
162
163 typedef struct {
164 void *p; /* Input pointer (as in realloc(p, s)). */
165 size_t s; /* Request size. */
166 void *r; /* Result pointer. */
167 } malloc_utrace_t;
168
169 #ifdef JEMALLOC_UTRACE
170 # define UTRACE(a, b, c) do { \
171 if (unlikely(opt_utrace)) { \
172 int utrace_serrno = errno; \
173 malloc_utrace_t ut; \
174 ut.p = (a); \
175 ut.s = (b); \
176 ut.r = (c); \
177 utrace(&ut, sizeof(ut)); \
178 errno = utrace_serrno; \
179 } \
180 } while (0)
181 #else
182 # define UTRACE(a, b, c)
183 #endif
184
185 /* Whether encountered any invalid config options. */
186 static bool had_conf_error = false;
187
188 /******************************************************************************/
189 /*
190 * Function prototypes for static functions that are referenced prior to
191 * definition.
192 */
193
194 static bool malloc_init_hard_a0(void);
195 static bool malloc_init_hard(void);
196
197 /******************************************************************************/
198 /*
199 * Begin miscellaneous support functions.
200 */
201
202 bool
malloc_initialized(void)203 malloc_initialized(void) {
204 return (malloc_init_state == malloc_init_initialized);
205 }
206
207 JEMALLOC_ALWAYS_INLINE bool
malloc_init_a0(void)208 malloc_init_a0(void) {
209 if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
210 return malloc_init_hard_a0();
211 }
212 return false;
213 }
214
215 JEMALLOC_ALWAYS_INLINE bool
malloc_init(void)216 malloc_init(void) {
217 if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
218 return true;
219 }
220 return false;
221 }
222
223 /*
224 * The a0*() functions are used instead of i{d,}alloc() in situations that
225 * cannot tolerate TLS variable access.
226 */
227
228 static void *
a0ialloc(size_t size,bool zero,bool is_internal)229 a0ialloc(size_t size, bool zero, bool is_internal) {
230 if (unlikely(malloc_init_a0())) {
231 return NULL;
232 }
233
234 return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
235 is_internal, arena_get(TSDN_NULL, 0, true), true);
236 }
237
238 static void
a0idalloc(void * ptr,bool is_internal)239 a0idalloc(void *ptr, bool is_internal) {
240 idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
241 }
242
243 void *
a0malloc(size_t size)244 a0malloc(size_t size) {
245 return a0ialloc(size, false, true);
246 }
247
248 void
a0dalloc(void * ptr)249 a0dalloc(void *ptr) {
250 a0idalloc(ptr, true);
251 }
252
253 /*
254 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
255 * situations that cannot tolerate TLS variable access (TLS allocation and very
256 * early internal data structure initialization).
257 */
258
259 void *
bootstrap_malloc(size_t size)260 bootstrap_malloc(size_t size) {
261 if (unlikely(size == 0)) {
262 size = 1;
263 }
264
265 return a0ialloc(size, false, false);
266 }
267
268 void *
bootstrap_calloc(size_t num,size_t size)269 bootstrap_calloc(size_t num, size_t size) {
270 size_t num_size;
271
272 num_size = num * size;
273 if (unlikely(num_size == 0)) {
274 assert(num == 0 || size == 0);
275 num_size = 1;
276 }
277
278 return a0ialloc(num_size, true, false);
279 }
280
281 void
bootstrap_free(void * ptr)282 bootstrap_free(void *ptr) {
283 if (unlikely(ptr == NULL)) {
284 return;
285 }
286
287 a0idalloc(ptr, false);
288 }
289
290 void
arena_set(unsigned ind,arena_t * arena)291 arena_set(unsigned ind, arena_t *arena) {
292 atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
293 }
294
295 static void
narenas_total_set(unsigned narenas)296 narenas_total_set(unsigned narenas) {
297 atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
298 }
299
300 static void
narenas_total_inc(void)301 narenas_total_inc(void) {
302 atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
303 }
304
305 unsigned
narenas_total_get(void)306 narenas_total_get(void) {
307 return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
308 }
309
310 /* Create a new arena and insert it into the arenas array at index ind. */
311 static arena_t *
arena_init_locked(tsdn_t * tsdn,unsigned ind,extent_hooks_t * extent_hooks)312 arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
313 arena_t *arena;
314
315 assert(ind <= narenas_total_get());
316 if (ind >= MALLOCX_ARENA_LIMIT) {
317 return NULL;
318 }
319 if (ind == narenas_total_get()) {
320 narenas_total_inc();
321 }
322
323 /*
324 * Another thread may have already initialized arenas[ind] if it's an
325 * auto arena.
326 */
327 arena = arena_get(tsdn, ind, false);
328 if (arena != NULL) {
329 assert(ind < narenas_auto);
330 return arena;
331 }
332
333 /* Actually initialize the arena. */
334 arena = arena_new(tsdn, ind, extent_hooks);
335
336 return arena;
337 }
338
339 static void
arena_new_create_background_thread(tsdn_t * tsdn,unsigned ind)340 arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
341 if (ind == 0) {
342 return;
343 }
344 if (have_background_thread) {
345 bool err;
346 malloc_mutex_lock(tsdn, &background_thread_lock);
347 err = background_thread_create(tsdn_tsd(tsdn), ind);
348 malloc_mutex_unlock(tsdn, &background_thread_lock);
349 if (err) {
350 malloc_printf("<jemalloc>: error in background thread "
351 "creation for arena %u. Abort.\n", ind);
352 abort();
353 }
354 }
355 }
356
357 arena_t *
arena_init(tsdn_t * tsdn,unsigned ind,extent_hooks_t * extent_hooks)358 arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
359 arena_t *arena;
360
361 malloc_mutex_lock(tsdn, &arenas_lock);
362 arena = arena_init_locked(tsdn, ind, extent_hooks);
363 malloc_mutex_unlock(tsdn, &arenas_lock);
364
365 arena_new_create_background_thread(tsdn, ind);
366
367 return arena;
368 }
369
370 static void
arena_bind(tsd_t * tsd,unsigned ind,bool internal)371 arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
372 arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
373 arena_nthreads_inc(arena, internal);
374
375 if (internal) {
376 tsd_iarena_set(tsd, arena);
377 } else {
378 tsd_arena_set(tsd, arena);
379 }
380 }
381
382 void
arena_migrate(tsd_t * tsd,unsigned oldind,unsigned newind)383 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
384 arena_t *oldarena, *newarena;
385
386 oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
387 newarena = arena_get(tsd_tsdn(tsd), newind, false);
388 arena_nthreads_dec(oldarena, false);
389 arena_nthreads_inc(newarena, false);
390 tsd_arena_set(tsd, newarena);
391 }
392
393 static void
arena_unbind(tsd_t * tsd,unsigned ind,bool internal)394 arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
395 arena_t *arena;
396
397 arena = arena_get(tsd_tsdn(tsd), ind, false);
398 arena_nthreads_dec(arena, internal);
399
400 if (internal) {
401 tsd_iarena_set(tsd, NULL);
402 } else {
403 tsd_arena_set(tsd, NULL);
404 }
405 }
406
407 arena_tdata_t *
arena_tdata_get_hard(tsd_t * tsd,unsigned ind)408 arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
409 arena_tdata_t *tdata, *arenas_tdata_old;
410 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
411 unsigned narenas_tdata_old, i;
412 unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
413 unsigned narenas_actual = narenas_total_get();
414
415 /*
416 * Dissociate old tdata array (and set up for deallocation upon return)
417 * if it's too small.
418 */
419 if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
420 arenas_tdata_old = arenas_tdata;
421 narenas_tdata_old = narenas_tdata;
422 arenas_tdata = NULL;
423 narenas_tdata = 0;
424 tsd_arenas_tdata_set(tsd, arenas_tdata);
425 tsd_narenas_tdata_set(tsd, narenas_tdata);
426 } else {
427 arenas_tdata_old = NULL;
428 narenas_tdata_old = 0;
429 }
430
431 /* Allocate tdata array if it's missing. */
432 if (arenas_tdata == NULL) {
433 bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
434 narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
435
436 if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
437 *arenas_tdata_bypassp = true;
438 arenas_tdata = (arena_tdata_t *)a0malloc(
439 sizeof(arena_tdata_t) * narenas_tdata);
440 *arenas_tdata_bypassp = false;
441 }
442 if (arenas_tdata == NULL) {
443 tdata = NULL;
444 goto label_return;
445 }
446 assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
447 tsd_arenas_tdata_set(tsd, arenas_tdata);
448 tsd_narenas_tdata_set(tsd, narenas_tdata);
449 }
450
451 /*
452 * Copy to tdata array. It's possible that the actual number of arenas
453 * has increased since narenas_total_get() was called above, but that
454 * causes no correctness issues unless two threads concurrently execute
455 * the arenas.create mallctl, which we trust mallctl synchronization to
456 * prevent.
457 */
458
459 /* Copy/initialize tickers. */
460 for (i = 0; i < narenas_actual; i++) {
461 if (i < narenas_tdata_old) {
462 ticker_copy(&arenas_tdata[i].decay_ticker,
463 &arenas_tdata_old[i].decay_ticker);
464 } else {
465 ticker_init(&arenas_tdata[i].decay_ticker,
466 DECAY_NTICKS_PER_UPDATE);
467 }
468 }
469 if (narenas_tdata > narenas_actual) {
470 memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
471 * (narenas_tdata - narenas_actual));
472 }
473
474 /* Read the refreshed tdata array. */
475 tdata = &arenas_tdata[ind];
476 label_return:
477 if (arenas_tdata_old != NULL) {
478 a0dalloc(arenas_tdata_old);
479 }
480 return tdata;
481 }
482
483 /* Slow path, called only by arena_choose(). */
484 arena_t *
arena_choose_hard(tsd_t * tsd,bool internal)485 arena_choose_hard(tsd_t *tsd, bool internal) {
486 arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
487
488 if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
489 unsigned choose = percpu_arena_choose();
490 ret = arena_get(tsd_tsdn(tsd), choose, true);
491 assert(ret != NULL);
492 arena_bind(tsd, arena_ind_get(ret), false);
493 arena_bind(tsd, arena_ind_get(ret), true);
494
495 return ret;
496 }
497
498 if (narenas_auto > 1) {
499 unsigned i, j, choose[2], first_null;
500 bool is_new_arena[2];
501
502 /*
503 * Determine binding for both non-internal and internal
504 * allocation.
505 *
506 * choose[0]: For application allocation.
507 * choose[1]: For internal metadata allocation.
508 */
509
510 for (j = 0; j < 2; j++) {
511 choose[j] = 0;
512 is_new_arena[j] = false;
513 }
514
515 first_null = narenas_auto;
516 malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
517 assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
518 for (i = 1; i < narenas_auto; i++) {
519 if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
520 /*
521 * Choose the first arena that has the lowest
522 * number of threads assigned to it.
523 */
524 for (j = 0; j < 2; j++) {
525 if (arena_nthreads_get(arena_get(
526 tsd_tsdn(tsd), i, false), !!j) <
527 arena_nthreads_get(arena_get(
528 tsd_tsdn(tsd), choose[j], false),
529 !!j)) {
530 choose[j] = i;
531 }
532 }
533 } else if (first_null == narenas_auto) {
534 /*
535 * Record the index of the first uninitialized
536 * arena, in case all extant arenas are in use.
537 *
538 * NB: It is possible for there to be
539 * discontinuities in terms of initialized
540 * versus uninitialized arenas, due to the
541 * "thread.arena" mallctl.
542 */
543 first_null = i;
544 }
545 }
546
547 for (j = 0; j < 2; j++) {
548 if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
549 choose[j], false), !!j) == 0 || first_null ==
550 narenas_auto) {
551 /*
552 * Use an unloaded arena, or the least loaded
553 * arena if all arenas are already initialized.
554 */
555 if (!!j == internal) {
556 ret = arena_get(tsd_tsdn(tsd),
557 choose[j], false);
558 }
559 } else {
560 arena_t *arena;
561
562 /* Initialize a new arena. */
563 choose[j] = first_null;
564 arena = arena_init_locked(tsd_tsdn(tsd),
565 choose[j],
566 (extent_hooks_t *)&extent_hooks_default);
567 if (arena == NULL) {
568 malloc_mutex_unlock(tsd_tsdn(tsd),
569 &arenas_lock);
570 return NULL;
571 }
572 is_new_arena[j] = true;
573 if (!!j == internal) {
574 ret = arena;
575 }
576 }
577 arena_bind(tsd, choose[j], !!j);
578 }
579 malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
580
581 for (j = 0; j < 2; j++) {
582 if (is_new_arena[j]) {
583 assert(choose[j] > 0);
584 arena_new_create_background_thread(
585 tsd_tsdn(tsd), choose[j]);
586 }
587 }
588
589 } else {
590 ret = arena_get(tsd_tsdn(tsd), 0, false);
591 arena_bind(tsd, 0, false);
592 arena_bind(tsd, 0, true);
593 }
594
595 return ret;
596 }
597
598 void
iarena_cleanup(tsd_t * tsd)599 iarena_cleanup(tsd_t *tsd) {
600 arena_t *iarena;
601
602 iarena = tsd_iarena_get(tsd);
603 if (iarena != NULL) {
604 arena_unbind(tsd, arena_ind_get(iarena), true);
605 }
606 }
607
608 void
arena_cleanup(tsd_t * tsd)609 arena_cleanup(tsd_t *tsd) {
610 arena_t *arena;
611
612 arena = tsd_arena_get(tsd);
613 if (arena != NULL) {
614 arena_unbind(tsd, arena_ind_get(arena), false);
615 }
616 }
617
618 void
arenas_tdata_cleanup(tsd_t * tsd)619 arenas_tdata_cleanup(tsd_t *tsd) {
620 arena_tdata_t *arenas_tdata;
621
622 /* Prevent tsd->arenas_tdata from being (re)created. */
623 *tsd_arenas_tdata_bypassp_get(tsd) = true;
624
625 arenas_tdata = tsd_arenas_tdata_get(tsd);
626 if (arenas_tdata != NULL) {
627 tsd_arenas_tdata_set(tsd, NULL);
628 a0dalloc(arenas_tdata);
629 }
630 }
631
632 static void
stats_print_atexit(void)633 stats_print_atexit(void) {
634 if (config_stats) {
635 tsdn_t *tsdn;
636 unsigned narenas, i;
637
638 tsdn = tsdn_fetch();
639
640 /*
641 * Merge stats from extant threads. This is racy, since
642 * individual threads do not lock when recording tcache stats
643 * events. As a consequence, the final stats may be slightly
644 * out of date by the time they are reported, if other threads
645 * continue to allocate.
646 */
647 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
648 arena_t *arena = arena_get(tsdn, i, false);
649 if (arena != NULL) {
650 tcache_t *tcache;
651
652 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
653 ql_foreach(tcache, &arena->tcache_ql, link) {
654 tcache_stats_merge(tsdn, tcache, arena);
655 }
656 malloc_mutex_unlock(tsdn,
657 &arena->tcache_ql_mtx);
658 }
659 }
660 }
661 je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
662 }
663
664 /*
665 * Ensure that we don't hold any locks upon entry to or exit from allocator
666 * code (in a "broad" sense that doesn't count a reentrant allocation as an
667 * entrance or exit).
668 */
669 JEMALLOC_ALWAYS_INLINE void
check_entry_exit_locking(tsdn_t * tsdn)670 check_entry_exit_locking(tsdn_t *tsdn) {
671 if (!config_debug) {
672 return;
673 }
674 if (tsdn_null(tsdn)) {
675 return;
676 }
677 tsd_t *tsd = tsdn_tsd(tsdn);
678 /*
679 * It's possible we hold locks at entry/exit if we're in a nested
680 * allocation.
681 */
682 int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
683 if (reentrancy_level != 0) {
684 return;
685 }
686 witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
687 }
688
689 /*
690 * End miscellaneous support functions.
691 */
692 /******************************************************************************/
693 /*
694 * Begin initialization functions.
695 */
696
697 static char *
jemalloc_secure_getenv(const char * name)698 jemalloc_secure_getenv(const char *name) {
699 #ifdef JEMALLOC_HAVE_SECURE_GETENV
700 return secure_getenv(name);
701 #else
702 # ifdef JEMALLOC_HAVE_ISSETUGID
703 if (issetugid() != 0) {
704 return NULL;
705 }
706 # endif
707 return getenv(name);
708 #endif
709 }
710
711 static unsigned
malloc_ncpus(void)712 malloc_ncpus(void) {
713 long result;
714
715 #ifdef _WIN32
716 SYSTEM_INFO si;
717 GetSystemInfo(&si);
718 result = si.dwNumberOfProcessors;
719 #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
720 /*
721 * glibc >= 2.6 has the CPU_COUNT macro.
722 *
723 * glibc's sysconf() uses isspace(). glibc allocates for the first time
724 * *before* setting up the isspace tables. Therefore we need a
725 * different method to get the number of CPUs.
726 */
727 {
728 cpu_set_t set;
729
730 pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
731 result = CPU_COUNT(&set);
732 }
733 #else
734 result = sysconf(_SC_NPROCESSORS_ONLN);
735 #endif
736 return ((result == -1) ? 1 : (unsigned)result);
737 }
738
739 static void
init_opt_stats_print_opts(const char * v,size_t vlen)740 init_opt_stats_print_opts(const char *v, size_t vlen) {
741 size_t opts_len = strlen(opt_stats_print_opts);
742 assert(opts_len <= stats_print_tot_num_options);
743
744 for (size_t i = 0; i < vlen; i++) {
745 switch (v[i]) {
746 #define OPTION(o, v, d, s) case o: break;
747 STATS_PRINT_OPTIONS
748 #undef OPTION
749 default: continue;
750 }
751
752 if (strchr(opt_stats_print_opts, v[i]) != NULL) {
753 /* Ignore repeated. */
754 continue;
755 }
756
757 opt_stats_print_opts[opts_len++] = v[i];
758 opt_stats_print_opts[opts_len] = '\0';
759 assert(opts_len <= stats_print_tot_num_options);
760 }
761 assert(opts_len == strlen(opt_stats_print_opts));
762 }
763
764 static bool
malloc_conf_next(char const ** opts_p,char const ** k_p,size_t * klen_p,char const ** v_p,size_t * vlen_p)765 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
766 char const **v_p, size_t *vlen_p) {
767 bool accept;
768 const char *opts = *opts_p;
769
770 *k_p = opts;
771
772 for (accept = false; !accept;) {
773 switch (*opts) {
774 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
775 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
776 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
777 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
778 case 'Y': case 'Z':
779 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
780 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
781 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
782 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
783 case 'y': case 'z':
784 case '0': case '1': case '2': case '3': case '4': case '5':
785 case '6': case '7': case '8': case '9':
786 case '_':
787 opts++;
788 break;
789 case ':':
790 opts++;
791 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
792 *v_p = opts;
793 accept = true;
794 break;
795 case '\0':
796 if (opts != *opts_p) {
797 malloc_write("<jemalloc>: Conf string ends "
798 "with key\n");
799 }
800 return true;
801 default:
802 malloc_write("<jemalloc>: Malformed conf string\n");
803 return true;
804 }
805 }
806
807 for (accept = false; !accept;) {
808 switch (*opts) {
809 case ',':
810 opts++;
811 /*
812 * Look ahead one character here, because the next time
813 * this function is called, it will assume that end of
814 * input has been cleanly reached if no input remains,
815 * but we have optimistically already consumed the
816 * comma if one exists.
817 */
818 if (*opts == '\0') {
819 malloc_write("<jemalloc>: Conf string ends "
820 "with comma\n");
821 }
822 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
823 accept = true;
824 break;
825 case '\0':
826 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
827 accept = true;
828 break;
829 default:
830 opts++;
831 break;
832 }
833 }
834
835 *opts_p = opts;
836 return false;
837 }
838
839 static void
malloc_abort_invalid_conf(void)840 malloc_abort_invalid_conf(void) {
841 assert(opt_abort_conf);
842 malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
843 "value (see above).\n");
844 abort();
845 }
846
847 static void
malloc_conf_error(const char * msg,const char * k,size_t klen,const char * v,size_t vlen)848 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
849 size_t vlen) {
850 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
851 (int)vlen, v);
852 /* If abort_conf is set, error out after processing all options. */
853 had_conf_error = true;
854 }
855
856 static void
malloc_slow_flag_init(void)857 malloc_slow_flag_init(void) {
858 /*
859 * Combine the runtime options into malloc_slow for fast path. Called
860 * after processing all the options.
861 */
862 malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
863 | (opt_junk_free ? flag_opt_junk_free : 0)
864 | (opt_zero ? flag_opt_zero : 0)
865 | (opt_utrace ? flag_opt_utrace : 0)
866 | (opt_xmalloc ? flag_opt_xmalloc : 0);
867
868 malloc_slow = (malloc_slow_flags != 0);
869 }
870
871 static void
malloc_conf_init(void)872 malloc_conf_init(void) {
873 unsigned i;
874 char buf[PATH_MAX + 1];
875 const char *opts, *k, *v;
876 size_t klen, vlen;
877
878 #if defined(__BIONIC__)
879 /* For Android, do not look at files nor environment variables for
880 * config data.
881 */
882 for (i = 0; i < 2; i++) {
883 #else
884 for (i = 0; i < 4; i++) {
885 #endif
886 /* Get runtime configuration. */
887 switch (i) {
888 case 0:
889 opts = config_malloc_conf;
890 break;
891 case 1:
892 if (je_malloc_conf != NULL) {
893 /*
894 * Use options that were compiled into the
895 * program.
896 */
897 opts = je_malloc_conf;
898 } else {
899 /* No configuration specified. */
900 buf[0] = '\0';
901 opts = buf;
902 }
903 break;
904 case 2: {
905 ssize_t linklen = 0;
906 #ifndef _WIN32
907 int saved_errno = errno;
908 const char *linkname =
909 # ifdef JEMALLOC_PREFIX
910 "/etc/"JEMALLOC_PREFIX"malloc.conf"
911 # else
912 "/etc/malloc.conf"
913 # endif
914 ;
915
916 /*
917 * Try to use the contents of the "/etc/malloc.conf"
918 * symbolic link's name.
919 */
920 linklen = readlink(linkname, buf, sizeof(buf) - 1);
921 if (linklen == -1) {
922 /* No configuration specified. */
923 linklen = 0;
924 /* Restore errno. */
925 set_errno(saved_errno);
926 }
927 #endif
928 buf[linklen] = '\0';
929 opts = buf;
930 break;
931 } case 3: {
932 const char *envname =
933 #ifdef JEMALLOC_PREFIX
934 JEMALLOC_CPREFIX"MALLOC_CONF"
935 #else
936 "MALLOC_CONF"
937 #endif
938 ;
939
940 if ((opts = jemalloc_secure_getenv(envname)) != NULL) {
941 /*
942 * Do nothing; opts is already initialized to
943 * the value of the MALLOC_CONF environment
944 * variable.
945 */
946 } else {
947 /* No configuration specified. */
948 buf[0] = '\0';
949 opts = buf;
950 }
951 break;
952 } default:
953 not_reached();
954 buf[0] = '\0';
955 opts = buf;
956 }
957
958 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
959 &vlen)) {
960 #define CONF_MATCH(n) \
961 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
962 #define CONF_MATCH_VALUE(n) \
963 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
964 #define CONF_HANDLE_BOOL(o, n) \
965 if (CONF_MATCH(n)) { \
966 if (CONF_MATCH_VALUE("true")) { \
967 o = true; \
968 } else if (CONF_MATCH_VALUE("false")) { \
969 o = false; \
970 } else { \
971 malloc_conf_error( \
972 "Invalid conf value", \
973 k, klen, v, vlen); \
974 } \
975 continue; \
976 }
977 #define CONF_MIN_no(um, min) false
978 #define CONF_MIN_yes(um, min) ((um) < (min))
979 #define CONF_MAX_no(um, max) false
980 #define CONF_MAX_yes(um, max) ((um) > (max))
981 #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
982 if (CONF_MATCH(n)) { \
983 uintmax_t um; \
984 char *end; \
985 \
986 set_errno(0); \
987 um = malloc_strtoumax(v, &end, 0); \
988 if (get_errno() != 0 || (uintptr_t)end -\
989 (uintptr_t)v != vlen) { \
990 malloc_conf_error( \
991 "Invalid conf value", \
992 k, klen, v, vlen); \
993 } else if (clip) { \
994 if (CONF_MIN_##check_min(um, \
995 (t)(min))) { \
996 o = (t)(min); \
997 } else if ( \
998 CONF_MAX_##check_max(um, \
999 (t)(max))) { \
1000 o = (t)(max); \
1001 } else { \
1002 o = (t)um; \
1003 } \
1004 } else { \
1005 if (CONF_MIN_##check_min(um, \
1006 (t)(min)) || \
1007 CONF_MAX_##check_max(um, \
1008 (t)(max))) { \
1009 malloc_conf_error( \
1010 "Out-of-range " \
1011 "conf value", \
1012 k, klen, v, vlen); \
1013 } else { \
1014 o = (t)um; \
1015 } \
1016 } \
1017 continue; \
1018 }
1019 #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
1020 clip) \
1021 CONF_HANDLE_T_U(unsigned, o, n, min, max, \
1022 check_min, check_max, clip)
1023 #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
1024 CONF_HANDLE_T_U(size_t, o, n, min, max, \
1025 check_min, check_max, clip)
1026 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
1027 if (CONF_MATCH(n)) { \
1028 long l; \
1029 char *end; \
1030 \
1031 set_errno(0); \
1032 l = strtol(v, &end, 0); \
1033 if (get_errno() != 0 || (uintptr_t)end -\
1034 (uintptr_t)v != vlen) { \
1035 malloc_conf_error( \
1036 "Invalid conf value", \
1037 k, klen, v, vlen); \
1038 } else if (l < (ssize_t)(min) || l > \
1039 (ssize_t)(max)) { \
1040 malloc_conf_error( \
1041 "Out-of-range conf value", \
1042 k, klen, v, vlen); \
1043 } else { \
1044 o = l; \
1045 } \
1046 continue; \
1047 }
1048 #define CONF_HANDLE_CHAR_P(o, n, d) \
1049 if (CONF_MATCH(n)) { \
1050 size_t cpylen = (vlen <= \
1051 sizeof(o)-1) ? vlen : \
1052 sizeof(o)-1; \
1053 strncpy(o, v, cpylen); \
1054 o[cpylen] = '\0'; \
1055 continue; \
1056 }
1057
1058 CONF_HANDLE_BOOL(opt_abort, "abort")
1059 CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
1060 if (strncmp("metadata_thp", k, klen) == 0) {
1061 int i;
1062 bool match = false;
1063 for (i = 0; i < metadata_thp_mode_limit; i++) {
1064 if (strncmp(metadata_thp_mode_names[i],
1065 v, vlen) == 0) {
1066 opt_metadata_thp = i;
1067 match = true;
1068 break;
1069 }
1070 }
1071 if (!match) {
1072 malloc_conf_error("Invalid conf value",
1073 k, klen, v, vlen);
1074 }
1075 continue;
1076 }
1077 CONF_HANDLE_BOOL(opt_retain, "retain")
1078 if (strncmp("dss", k, klen) == 0) {
1079 int i;
1080 bool match = false;
1081 for (i = 0; i < dss_prec_limit; i++) {
1082 if (strncmp(dss_prec_names[i], v, vlen)
1083 == 0) {
1084 if (extent_dss_prec_set(i)) {
1085 malloc_conf_error(
1086 "Error setting dss",
1087 k, klen, v, vlen);
1088 } else {
1089 opt_dss =
1090 dss_prec_names[i];
1091 match = true;
1092 break;
1093 }
1094 }
1095 }
1096 if (!match) {
1097 malloc_conf_error("Invalid conf value",
1098 k, klen, v, vlen);
1099 }
1100 continue;
1101 }
1102 CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1103 UINT_MAX, yes, no, false)
1104 CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
1105 "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1106 QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1107 SSIZE_MAX);
1108 CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
1109 "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1110 QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1111 SSIZE_MAX);
1112 CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
1113 if (CONF_MATCH("stats_print_opts")) {
1114 init_opt_stats_print_opts(v, vlen);
1115 continue;
1116 }
1117 if (config_fill) {
1118 if (CONF_MATCH("junk")) {
1119 if (CONF_MATCH_VALUE("true")) {
1120 opt_junk = "true";
1121 opt_junk_alloc = opt_junk_free =
1122 true;
1123 } else if (CONF_MATCH_VALUE("false")) {
1124 opt_junk = "false";
1125 opt_junk_alloc = opt_junk_free =
1126 false;
1127 } else if (CONF_MATCH_VALUE("alloc")) {
1128 opt_junk = "alloc";
1129 opt_junk_alloc = true;
1130 opt_junk_free = false;
1131 } else if (CONF_MATCH_VALUE("free")) {
1132 opt_junk = "free";
1133 opt_junk_alloc = false;
1134 opt_junk_free = true;
1135 } else {
1136 malloc_conf_error(
1137 "Invalid conf value", k,
1138 klen, v, vlen);
1139 }
1140 continue;
1141 }
1142 CONF_HANDLE_BOOL(opt_zero, "zero")
1143 }
1144 if (config_utrace) {
1145 CONF_HANDLE_BOOL(opt_utrace, "utrace")
1146 }
1147 if (config_xmalloc) {
1148 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
1149 }
1150 CONF_HANDLE_BOOL(opt_tcache, "tcache")
1151 CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
1152 "lg_extent_max_active_fit", 0,
1153 (sizeof(size_t) << 3), yes, yes, false)
1154 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
1155 -1, (sizeof(size_t) << 3) - 1)
1156 if (strncmp("percpu_arena", k, klen) == 0) {
1157 bool match = false;
1158 for (int i = percpu_arena_mode_names_base; i <
1159 percpu_arena_mode_names_limit; i++) {
1160 if (strncmp(percpu_arena_mode_names[i],
1161 v, vlen) == 0) {
1162 if (!have_percpu_arena) {
1163 malloc_conf_error(
1164 "No getcpu support",
1165 k, klen, v, vlen);
1166 }
1167 opt_percpu_arena = i;
1168 match = true;
1169 break;
1170 }
1171 }
1172 if (!match) {
1173 malloc_conf_error("Invalid conf value",
1174 k, klen, v, vlen);
1175 }
1176 continue;
1177 }
1178 CONF_HANDLE_BOOL(opt_background_thread,
1179 "background_thread");
1180 CONF_HANDLE_SIZE_T(opt_max_background_threads,
1181 "max_background_threads", 1,
1182 opt_max_background_threads, yes, yes,
1183 true);
1184 if (config_prof) {
1185 CONF_HANDLE_BOOL(opt_prof, "prof")
1186 CONF_HANDLE_CHAR_P(opt_prof_prefix,
1187 "prof_prefix", "jeprof")
1188 CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
1189 CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1190 "prof_thread_active_init")
1191 CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1192 "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
1193 - 1, no, yes, true)
1194 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
1195 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1196 "lg_prof_interval", -1,
1197 (sizeof(uint64_t) << 3) - 1)
1198 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
1199 CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
1200 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
1201 }
1202 if (config_log) {
1203 if (CONF_MATCH("log")) {
1204 size_t cpylen = (
1205 vlen <= sizeof(log_var_names) ?
1206 vlen : sizeof(log_var_names) - 1);
1207 strncpy(log_var_names, v, cpylen);
1208 log_var_names[cpylen] = '\0';
1209 continue;
1210 }
1211 }
1212 if (CONF_MATCH("thp")) {
1213 bool match = false;
1214 for (int i = 0; i < thp_mode_names_limit; i++) {
1215 if (strncmp(thp_mode_names[i],v, vlen)
1216 == 0) {
1217 if (!have_madvise_huge) {
1218 malloc_conf_error(
1219 "No THP support",
1220 k, klen, v, vlen);
1221 }
1222 opt_thp = i;
1223 match = true;
1224 break;
1225 }
1226 }
1227 if (!match) {
1228 malloc_conf_error("Invalid conf value",
1229 k, klen, v, vlen);
1230 }
1231 continue;
1232 }
1233 malloc_conf_error("Invalid conf pair", k, klen, v,
1234 vlen);
1235 #undef CONF_MATCH
1236 #undef CONF_MATCH_VALUE
1237 #undef CONF_HANDLE_BOOL
1238 #undef CONF_MIN_no
1239 #undef CONF_MIN_yes
1240 #undef CONF_MAX_no
1241 #undef CONF_MAX_yes
1242 #undef CONF_HANDLE_T_U
1243 #undef CONF_HANDLE_UNSIGNED
1244 #undef CONF_HANDLE_SIZE_T
1245 #undef CONF_HANDLE_SSIZE_T
1246 #undef CONF_HANDLE_CHAR_P
1247 }
1248 if (opt_abort_conf && had_conf_error) {
1249 malloc_abort_invalid_conf();
1250 }
1251 }
1252 atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
1253 }
1254
1255 static bool
1256 malloc_init_hard_needed(void) {
1257 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1258 malloc_init_recursible)) {
1259 /*
1260 * Another thread initialized the allocator before this one
1261 * acquired init_lock, or this thread is the initializing
1262 * thread, and it is recursively allocating.
1263 */
1264 return false;
1265 }
1266 #ifdef JEMALLOC_THREADED_INIT
1267 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1268 /* Busy-wait until the initializing thread completes. */
1269 spin_t spinner = SPIN_INITIALIZER;
1270 do {
1271 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1272 spin_adaptive(&spinner);
1273 malloc_mutex_lock(TSDN_NULL, &init_lock);
1274 } while (!malloc_initialized());
1275 return false;
1276 }
1277 #endif
1278 return true;
1279 }
1280
1281 static bool
1282 malloc_init_hard_a0_locked() {
1283 malloc_initializer = INITIALIZER;
1284
1285 if (config_prof) {
1286 prof_boot0();
1287 }
1288 malloc_conf_init();
1289 if (opt_stats_print) {
1290 /* Print statistics at exit. */
1291 if (atexit(stats_print_atexit) != 0) {
1292 malloc_write("<jemalloc>: Error in atexit()\n");
1293 if (opt_abort) {
1294 abort();
1295 }
1296 }
1297 }
1298 if (pages_boot()) {
1299 return true;
1300 }
1301 if (base_boot(TSDN_NULL)) {
1302 return true;
1303 }
1304 if (extent_boot()) {
1305 return true;
1306 }
1307 if (ctl_boot()) {
1308 return true;
1309 }
1310 if (config_prof) {
1311 prof_boot1();
1312 }
1313 arena_boot();
1314 if (tcache_boot(TSDN_NULL)) {
1315 return true;
1316 }
1317 if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
1318 malloc_mutex_rank_exclusive)) {
1319 return true;
1320 }
1321 /*
1322 * Create enough scaffolding to allow recursive allocation in
1323 * malloc_ncpus().
1324 */
1325 narenas_auto = 1;
1326 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1327 /*
1328 * Initialize one arena here. The rest are lazily created in
1329 * arena_choose_hard().
1330 */
1331 if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default)
1332 == NULL) {
1333 return true;
1334 }
1335 a0 = arena_get(TSDN_NULL, 0, false);
1336 malloc_init_state = malloc_init_a0_initialized;
1337
1338 return false;
1339 }
1340
1341 static bool
1342 malloc_init_hard_a0(void) {
1343 bool ret;
1344
1345 malloc_mutex_lock(TSDN_NULL, &init_lock);
1346 ret = malloc_init_hard_a0_locked();
1347 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1348 return ret;
1349 }
1350
1351 /* Initialize data structures which may trigger recursive allocation. */
1352 static bool
1353 malloc_init_hard_recursible(void) {
1354 malloc_init_state = malloc_init_recursible;
1355
1356 #if defined(__BIONIC__) && defined(ANDROID_NUM_ARENAS)
1357 /* Hardcode since this value won't be used. */
1358 ncpus = 2;
1359 #else
1360 ncpus = malloc_ncpus();
1361 #endif
1362
1363 #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
1364 && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
1365 !defined(__native_client__))
1366 /* LinuxThreads' pthread_atfork() allocates. */
1367 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1368 jemalloc_postfork_child) != 0) {
1369 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1370 if (opt_abort) {
1371 abort();
1372 }
1373 return true;
1374 }
1375 #endif
1376
1377 if (background_thread_boot0()) {
1378 return true;
1379 }
1380
1381 return false;
1382 }
1383
1384 static unsigned
1385 malloc_narenas_default(void) {
1386 #if defined(ANDROID_NUM_ARENAS)
1387 return ANDROID_NUM_ARENAS;
1388 #else
1389 assert(ncpus > 0);
1390 /*
1391 * For SMP systems, create more than one arena per CPU by
1392 * default.
1393 */
1394 if (ncpus > 1) {
1395 return ncpus << 2;
1396 } else {
1397 return 1;
1398 }
1399 #endif
1400 }
1401
1402 static percpu_arena_mode_t
1403 percpu_arena_as_initialized(percpu_arena_mode_t mode) {
1404 assert(!malloc_initialized());
1405 assert(mode <= percpu_arena_disabled);
1406
1407 if (mode != percpu_arena_disabled) {
1408 mode += percpu_arena_mode_enabled_base;
1409 }
1410
1411 return mode;
1412 }
1413
1414 static bool
1415 malloc_init_narenas(void) {
1416 assert(ncpus > 0);
1417
1418 if (opt_percpu_arena != percpu_arena_disabled) {
1419 if (!have_percpu_arena || malloc_getcpu() < 0) {
1420 opt_percpu_arena = percpu_arena_disabled;
1421 malloc_printf("<jemalloc>: perCPU arena getcpu() not "
1422 "available. Setting narenas to %u.\n", opt_narenas ?
1423 opt_narenas : malloc_narenas_default());
1424 if (opt_abort) {
1425 abort();
1426 }
1427 } else {
1428 if (ncpus >= MALLOCX_ARENA_LIMIT) {
1429 malloc_printf("<jemalloc>: narenas w/ percpu"
1430 "arena beyond limit (%d)\n", ncpus);
1431 if (opt_abort) {
1432 abort();
1433 }
1434 return true;
1435 }
1436 /* NB: opt_percpu_arena isn't fully initialized yet. */
1437 if (percpu_arena_as_initialized(opt_percpu_arena) ==
1438 per_phycpu_arena && ncpus % 2 != 0) {
1439 malloc_printf("<jemalloc>: invalid "
1440 "configuration -- per physical CPU arena "
1441 "with odd number (%u) of CPUs (no hyper "
1442 "threading?).\n", ncpus);
1443 if (opt_abort)
1444 abort();
1445 }
1446 unsigned n = percpu_arena_ind_limit(
1447 percpu_arena_as_initialized(opt_percpu_arena));
1448 if (opt_narenas < n) {
1449 /*
1450 * If narenas is specified with percpu_arena
1451 * enabled, actual narenas is set as the greater
1452 * of the two. percpu_arena_choose will be free
1453 * to use any of the arenas based on CPU
1454 * id. This is conservative (at a small cost)
1455 * but ensures correctness.
1456 *
1457 * If for some reason the ncpus determined at
1458 * boot is not the actual number (e.g. because
1459 * of affinity setting from numactl), reserving
1460 * narenas this way provides a workaround for
1461 * percpu_arena.
1462 */
1463 opt_narenas = n;
1464 }
1465 }
1466 }
1467 if (opt_narenas == 0) {
1468 opt_narenas = malloc_narenas_default();
1469 }
1470 assert(opt_narenas > 0);
1471
1472 narenas_auto = opt_narenas;
1473 /*
1474 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1475 */
1476 if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
1477 narenas_auto = MALLOCX_ARENA_LIMIT - 1;
1478 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1479 narenas_auto);
1480 }
1481 narenas_total_set(narenas_auto);
1482
1483 return false;
1484 }
1485
1486 static void
1487 malloc_init_percpu(void) {
1488 opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
1489 }
1490
1491 static bool
1492 malloc_init_hard_finish(void) {
1493 if (malloc_mutex_boot()) {
1494 return true;
1495 }
1496
1497 malloc_init_state = malloc_init_initialized;
1498 malloc_slow_flag_init();
1499
1500 return false;
1501 }
1502
1503 static void
1504 malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
1505 malloc_mutex_assert_owner(tsdn, &init_lock);
1506 malloc_mutex_unlock(tsdn, &init_lock);
1507 if (reentrancy_set) {
1508 assert(!tsdn_null(tsdn));
1509 tsd_t *tsd = tsdn_tsd(tsdn);
1510 assert(tsd_reentrancy_level_get(tsd) > 0);
1511 post_reentrancy(tsd);
1512 }
1513 }
1514
1515 static bool
1516 malloc_init_hard(void) {
1517 tsd_t *tsd;
1518
1519 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1520 _init_init_lock();
1521 #endif
1522 malloc_mutex_lock(TSDN_NULL, &init_lock);
1523
1524 #define UNLOCK_RETURN(tsdn, ret, reentrancy) \
1525 malloc_init_hard_cleanup(tsdn, reentrancy); \
1526 return ret;
1527
1528 if (!malloc_init_hard_needed()) {
1529 UNLOCK_RETURN(TSDN_NULL, false, false)
1530 }
1531
1532 if (malloc_init_state != malloc_init_a0_initialized &&
1533 malloc_init_hard_a0_locked()) {
1534 UNLOCK_RETURN(TSDN_NULL, true, false)
1535 }
1536
1537 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1538 /* Recursive allocation relies on functional tsd. */
1539 tsd = malloc_tsd_boot0();
1540 if (tsd == NULL) {
1541 return true;
1542 }
1543 if (malloc_init_hard_recursible()) {
1544 return true;
1545 }
1546
1547 malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
1548 /* Set reentrancy level to 1 during init. */
1549 pre_reentrancy(tsd, NULL);
1550 /* Initialize narenas before prof_boot2 (for allocation). */
1551 if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
1552 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1553 }
1554 if (config_prof && prof_boot2(tsd)) {
1555 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1556 }
1557
1558 malloc_init_percpu();
1559
1560 if (malloc_init_hard_finish()) {
1561 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1562 }
1563 post_reentrancy(tsd);
1564 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1565
1566 witness_assert_lockless(witness_tsd_tsdn(
1567 tsd_witness_tsdp_get_unsafe(tsd)));
1568 malloc_tsd_boot1();
1569 /* Update TSD after tsd_boot1. */
1570 tsd = tsd_fetch();
1571 if (opt_background_thread) {
1572 assert(have_background_thread);
1573 /*
1574 * Need to finish init & unlock first before creating background
1575 * threads (pthread_create depends on malloc). ctl_init (which
1576 * sets isthreaded) needs to be called without holding any lock.
1577 */
1578 background_thread_ctl_init(tsd_tsdn(tsd));
1579
1580 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1581 bool err = background_thread_create(tsd, 0);
1582 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1583 if (err) {
1584 return true;
1585 }
1586 }
1587 #undef UNLOCK_RETURN
1588 return false;
1589 }
1590
1591 /*
1592 * End initialization functions.
1593 */
1594 /******************************************************************************/
1595 /*
1596 * Begin allocation-path internal functions and data structures.
1597 */
1598
1599 /*
1600 * Settings determined by the documented behavior of the allocation functions.
1601 */
1602 typedef struct static_opts_s static_opts_t;
1603 struct static_opts_s {
1604 /* Whether or not allocation size may overflow. */
1605 bool may_overflow;
1606 /* Whether or not allocations of size 0 should be treated as size 1. */
1607 bool bump_empty_alloc;
1608 /*
1609 * Whether to assert that allocations are not of size 0 (after any
1610 * bumping).
1611 */
1612 bool assert_nonempty_alloc;
1613
1614 /*
1615 * Whether or not to modify the 'result' argument to malloc in case of
1616 * error.
1617 */
1618 bool null_out_result_on_error;
1619 /* Whether to set errno when we encounter an error condition. */
1620 bool set_errno_on_error;
1621
1622 /*
1623 * The minimum valid alignment for functions requesting aligned storage.
1624 */
1625 size_t min_alignment;
1626
1627 /* The error string to use if we oom. */
1628 const char *oom_string;
1629 /* The error string to use if the passed-in alignment is invalid. */
1630 const char *invalid_alignment_string;
1631
1632 /*
1633 * False if we're configured to skip some time-consuming operations.
1634 *
1635 * This isn't really a malloc "behavior", but it acts as a useful
1636 * summary of several other static (or at least, static after program
1637 * initialization) options.
1638 */
1639 bool slow;
1640 };
1641
1642 JEMALLOC_ALWAYS_INLINE void
1643 static_opts_init(static_opts_t *static_opts) {
1644 static_opts->may_overflow = false;
1645 static_opts->bump_empty_alloc = false;
1646 static_opts->assert_nonempty_alloc = false;
1647 static_opts->null_out_result_on_error = false;
1648 static_opts->set_errno_on_error = false;
1649 static_opts->min_alignment = 0;
1650 static_opts->oom_string = "";
1651 static_opts->invalid_alignment_string = "";
1652 static_opts->slow = false;
1653 }
1654
1655 /*
1656 * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we
1657 * should have one constant here per magic value there. Note however that the
1658 * representations need not be related.
1659 */
1660 #define TCACHE_IND_NONE ((unsigned)-1)
1661 #define TCACHE_IND_AUTOMATIC ((unsigned)-2)
1662 #define ARENA_IND_AUTOMATIC ((unsigned)-1)
1663
1664 typedef struct dynamic_opts_s dynamic_opts_t;
1665 struct dynamic_opts_s {
1666 void **result;
1667 size_t num_items;
1668 size_t item_size;
1669 size_t alignment;
1670 bool zero;
1671 unsigned tcache_ind;
1672 unsigned arena_ind;
1673 };
1674
1675 JEMALLOC_ALWAYS_INLINE void
1676 dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
1677 dynamic_opts->result = NULL;
1678 dynamic_opts->num_items = 0;
1679 dynamic_opts->item_size = 0;
1680 dynamic_opts->alignment = 0;
1681 dynamic_opts->zero = false;
1682 dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
1683 dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
1684 }
1685
1686 /* ind is ignored if dopts->alignment > 0. */
1687 JEMALLOC_ALWAYS_INLINE void *
1688 imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1689 size_t size, size_t usize, szind_t ind) {
1690 tcache_t *tcache;
1691 arena_t *arena;
1692
1693 /* Fill in the tcache. */
1694 if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
1695 if (likely(!sopts->slow)) {
1696 /* Getting tcache ptr unconditionally. */
1697 tcache = tsd_tcachep_get(tsd);
1698 assert(tcache == tcache_get(tsd));
1699 } else {
1700 tcache = tcache_get(tsd);
1701 }
1702 } else if (dopts->tcache_ind == TCACHE_IND_NONE) {
1703 tcache = NULL;
1704 } else {
1705 tcache = tcaches_get(tsd, dopts->tcache_ind);
1706 }
1707
1708 /* Fill in the arena. */
1709 if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
1710 /*
1711 * In case of automatic arena management, we defer arena
1712 * computation until as late as we can, hoping to fill the
1713 * allocation out of the tcache.
1714 */
1715 arena = NULL;
1716 } else {
1717 arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
1718 }
1719
1720 if (unlikely(dopts->alignment != 0)) {
1721 return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
1722 dopts->zero, tcache, arena);
1723 }
1724
1725 return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
1726 arena, sopts->slow);
1727 }
1728
1729 JEMALLOC_ALWAYS_INLINE void *
1730 imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1731 size_t usize, szind_t ind) {
1732 void *ret;
1733
1734 /*
1735 * For small allocations, sampling bumps the usize. If so, we allocate
1736 * from the ind_large bucket.
1737 */
1738 szind_t ind_large;
1739 size_t bumped_usize = usize;
1740
1741 if (usize <= SMALL_MAXCLASS) {
1742 assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) :
1743 sz_sa2u(LARGE_MINCLASS, dopts->alignment))
1744 == LARGE_MINCLASS);
1745 ind_large = sz_size2index(LARGE_MINCLASS);
1746 bumped_usize = sz_s2u(LARGE_MINCLASS);
1747 ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
1748 bumped_usize, ind_large);
1749 if (unlikely(ret == NULL)) {
1750 return NULL;
1751 }
1752 arena_prof_promote(tsd_tsdn(tsd), ret, usize);
1753 } else {
1754 ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
1755 }
1756
1757 return ret;
1758 }
1759
1760 /*
1761 * Returns true if the allocation will overflow, and false otherwise. Sets
1762 * *size to the product either way.
1763 */
1764 JEMALLOC_ALWAYS_INLINE bool
1765 compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
1766 size_t *size) {
1767 /*
1768 * This function is just num_items * item_size, except that we may have
1769 * to check for overflow.
1770 */
1771
1772 if (!may_overflow) {
1773 assert(dopts->num_items == 1);
1774 *size = dopts->item_size;
1775 return false;
1776 }
1777
1778 /* A size_t with its high-half bits all set to 1. */
1779 static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
1780
1781 *size = dopts->item_size * dopts->num_items;
1782
1783 if (unlikely(*size == 0)) {
1784 return (dopts->num_items != 0 && dopts->item_size != 0);
1785 }
1786
1787 /*
1788 * We got a non-zero size, but we don't know if we overflowed to get
1789 * there. To avoid having to do a divide, we'll be clever and note that
1790 * if both A and B can be represented in N/2 bits, then their product
1791 * can be represented in N bits (without the possibility of overflow).
1792 */
1793 if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
1794 return false;
1795 }
1796 if (likely(*size / dopts->item_size == dopts->num_items)) {
1797 return false;
1798 }
1799 return true;
1800 }
1801
1802 JEMALLOC_ALWAYS_INLINE int
1803 imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
1804 /* Where the actual allocated memory will live. */
1805 void *allocation = NULL;
1806 /* Filled in by compute_size_with_overflow below. */
1807 size_t size = 0;
1808 /*
1809 * For unaligned allocations, we need only ind. For aligned
1810 * allocations, or in case of stats or profiling we need usize.
1811 *
1812 * These are actually dead stores, in that their values are reset before
1813 * any branch on their value is taken. Sometimes though, it's
1814 * convenient to pass them as arguments before this point. To avoid
1815 * undefined behavior then, we initialize them with dummy stores.
1816 */
1817 szind_t ind = 0;
1818 size_t usize = 0;
1819
1820 /* Reentrancy is only checked on slow path. */
1821 int8_t reentrancy_level;
1822
1823 /* Compute the amount of memory the user wants. */
1824 if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
1825 &size))) {
1826 goto label_oom;
1827 }
1828
1829 /* Validate the user input. */
1830 if (sopts->bump_empty_alloc) {
1831 if (unlikely(size == 0)) {
1832 size = 1;
1833 }
1834 }
1835
1836 if (sopts->assert_nonempty_alloc) {
1837 assert (size != 0);
1838 }
1839
1840 if (unlikely(dopts->alignment < sopts->min_alignment
1841 || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
1842 goto label_invalid_alignment;
1843 }
1844
1845 /* This is the beginning of the "core" algorithm. */
1846
1847 if (dopts->alignment == 0) {
1848 ind = sz_size2index(size);
1849 if (unlikely(ind >= NSIZES)) {
1850 goto label_oom;
1851 }
1852 if (config_stats || (config_prof && opt_prof)) {
1853 usize = sz_index2size(ind);
1854 assert(usize > 0 && usize <= LARGE_MAXCLASS);
1855 }
1856 } else {
1857 usize = sz_sa2u(size, dopts->alignment);
1858 if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
1859 goto label_oom;
1860 }
1861 }
1862
1863 check_entry_exit_locking(tsd_tsdn(tsd));
1864
1865 /*
1866 * If we need to handle reentrancy, we can do it out of a
1867 * known-initialized arena (i.e. arena 0).
1868 */
1869 reentrancy_level = tsd_reentrancy_level_get(tsd);
1870 if (sopts->slow && unlikely(reentrancy_level > 0)) {
1871 /*
1872 * We should never specify particular arenas or tcaches from
1873 * within our internal allocations.
1874 */
1875 assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
1876 dopts->tcache_ind == TCACHE_IND_NONE);
1877 assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
1878 dopts->tcache_ind = TCACHE_IND_NONE;
1879 /* We know that arena 0 has already been initialized. */
1880 dopts->arena_ind = 0;
1881 }
1882
1883 /* If profiling is on, get our profiling context. */
1884 if (config_prof && opt_prof) {
1885 /*
1886 * Note that if we're going down this path, usize must have been
1887 * initialized in the previous if statement.
1888 */
1889 prof_tctx_t *tctx = prof_alloc_prep(
1890 tsd, usize, prof_active_get_unlocked(), true);
1891
1892 alloc_ctx_t alloc_ctx;
1893 if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
1894 alloc_ctx.slab = (usize <= SMALL_MAXCLASS);
1895 allocation = imalloc_no_sample(
1896 sopts, dopts, tsd, usize, usize, ind);
1897 } else if ((uintptr_t)tctx > (uintptr_t)1U) {
1898 /*
1899 * Note that ind might still be 0 here. This is fine;
1900 * imalloc_sample ignores ind if dopts->alignment > 0.
1901 */
1902 allocation = imalloc_sample(
1903 sopts, dopts, tsd, usize, ind);
1904 alloc_ctx.slab = false;
1905 } else {
1906 allocation = NULL;
1907 }
1908
1909 if (unlikely(allocation == NULL)) {
1910 prof_alloc_rollback(tsd, tctx, true);
1911 goto label_oom;
1912 }
1913 prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
1914 } else {
1915 /*
1916 * If dopts->alignment > 0, then ind is still 0, but usize was
1917 * computed in the previous if statement. Down the positive
1918 * alignment path, imalloc_no_sample ignores ind and size
1919 * (relying only on usize).
1920 */
1921 allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
1922 ind);
1923 if (unlikely(allocation == NULL)) {
1924 goto label_oom;
1925 }
1926 }
1927
1928 /*
1929 * Allocation has been done at this point. We still have some
1930 * post-allocation work to do though.
1931 */
1932 assert(dopts->alignment == 0
1933 || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
1934
1935 if (config_stats) {
1936 assert(usize == isalloc(tsd_tsdn(tsd), allocation));
1937 *tsd_thread_allocatedp_get(tsd) += usize;
1938 }
1939
1940 if (sopts->slow) {
1941 UTRACE(0, size, allocation);
1942 }
1943
1944 /* Success! */
1945 check_entry_exit_locking(tsd_tsdn(tsd));
1946 *dopts->result = allocation;
1947 return 0;
1948
1949 label_oom:
1950 if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
1951 malloc_write(sopts->oom_string);
1952 abort();
1953 }
1954
1955 if (sopts->slow) {
1956 UTRACE(NULL, size, NULL);
1957 }
1958
1959 check_entry_exit_locking(tsd_tsdn(tsd));
1960
1961 if (sopts->set_errno_on_error) {
1962 set_errno(ENOMEM);
1963 }
1964
1965 if (sopts->null_out_result_on_error) {
1966 *dopts->result = NULL;
1967 }
1968
1969 return ENOMEM;
1970
1971 /*
1972 * This label is only jumped to by one goto; we move it out of line
1973 * anyways to avoid obscuring the non-error paths, and for symmetry with
1974 * the oom case.
1975 */
1976 label_invalid_alignment:
1977 if (config_xmalloc && unlikely(opt_xmalloc)) {
1978 malloc_write(sopts->invalid_alignment_string);
1979 abort();
1980 }
1981
1982 if (sopts->set_errno_on_error) {
1983 set_errno(EINVAL);
1984 }
1985
1986 if (sopts->slow) {
1987 UTRACE(NULL, size, NULL);
1988 }
1989
1990 check_entry_exit_locking(tsd_tsdn(tsd));
1991
1992 if (sopts->null_out_result_on_error) {
1993 *dopts->result = NULL;
1994 }
1995
1996 return EINVAL;
1997 }
1998
1999 /* Returns the errno-style error code of the allocation. */
2000 JEMALLOC_ALWAYS_INLINE int
2001 imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
2002 if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
2003 if (config_xmalloc && unlikely(opt_xmalloc)) {
2004 malloc_write(sopts->oom_string);
2005 abort();
2006 }
2007 UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
2008 set_errno(ENOMEM);
2009 *dopts->result = NULL;
2010
2011 return ENOMEM;
2012 }
2013
2014 /* We always need the tsd. Let's grab it right away. */
2015 tsd_t *tsd = tsd_fetch();
2016 assert(tsd);
2017 if (likely(tsd_fast(tsd))) {
2018 /* Fast and common path. */
2019 tsd_assert_fast(tsd);
2020 sopts->slow = false;
2021 return imalloc_body(sopts, dopts, tsd);
2022 } else {
2023 sopts->slow = true;
2024 return imalloc_body(sopts, dopts, tsd);
2025 }
2026 }
2027 /******************************************************************************/
2028 /*
2029 * Begin malloc(3)-compatible functions.
2030 */
2031
2032 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2033 void JEMALLOC_NOTHROW *
2034 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2035 je_malloc(size_t size) {
2036 void *ret;
2037 static_opts_t sopts;
2038 dynamic_opts_t dopts;
2039
2040 LOG("core.malloc.entry", "size: %zu", size);
2041
2042 static_opts_init(&sopts);
2043 dynamic_opts_init(&dopts);
2044
2045 sopts.bump_empty_alloc = true;
2046 sopts.null_out_result_on_error = true;
2047 sopts.set_errno_on_error = true;
2048 sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
2049
2050 dopts.result = &ret;
2051 dopts.num_items = 1;
2052 dopts.item_size = size;
2053
2054 imalloc(&sopts, &dopts);
2055
2056 LOG("core.malloc.exit", "result: %p", ret);
2057
2058 return ret;
2059 }
2060
2061 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2062 JEMALLOC_ATTR(nonnull(1))
2063 je_posix_memalign(void **memptr, size_t alignment, size_t size) {
2064 int ret;
2065 static_opts_t sopts;
2066 dynamic_opts_t dopts;
2067
2068 LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, "
2069 "size: %zu", memptr, alignment, size);
2070
2071 static_opts_init(&sopts);
2072 dynamic_opts_init(&dopts);
2073
2074 sopts.bump_empty_alloc = true;
2075 sopts.min_alignment = sizeof(void *);
2076 sopts.oom_string =
2077 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2078 sopts.invalid_alignment_string =
2079 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2080
2081 dopts.result = memptr;
2082 dopts.num_items = 1;
2083 dopts.item_size = size;
2084 dopts.alignment = alignment;
2085
2086 ret = imalloc(&sopts, &dopts);
2087
2088 LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
2089 *memptr);
2090
2091 return ret;
2092 }
2093
2094 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2095 void JEMALLOC_NOTHROW *
2096 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
2097 je_aligned_alloc(size_t alignment, size_t size) {
2098 void *ret;
2099
2100 static_opts_t sopts;
2101 dynamic_opts_t dopts;
2102
2103 LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n",
2104 alignment, size);
2105
2106 static_opts_init(&sopts);
2107 dynamic_opts_init(&dopts);
2108
2109 sopts.bump_empty_alloc = true;
2110 sopts.null_out_result_on_error = true;
2111 sopts.set_errno_on_error = true;
2112 sopts.min_alignment = 1;
2113 sopts.oom_string =
2114 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2115 sopts.invalid_alignment_string =
2116 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2117
2118 dopts.result = &ret;
2119 dopts.num_items = 1;
2120 dopts.item_size = size;
2121 dopts.alignment = alignment;
2122
2123 imalloc(&sopts, &dopts);
2124
2125 LOG("core.aligned_alloc.exit", "result: %p", ret);
2126
2127 return ret;
2128 }
2129
2130 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2131 void JEMALLOC_NOTHROW *
2132 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
2133 je_calloc(size_t num, size_t size) {
2134 void *ret;
2135 static_opts_t sopts;
2136 dynamic_opts_t dopts;
2137
2138 LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size);
2139
2140 static_opts_init(&sopts);
2141 dynamic_opts_init(&dopts);
2142
2143 sopts.may_overflow = true;
2144 sopts.bump_empty_alloc = true;
2145 sopts.null_out_result_on_error = true;
2146 sopts.set_errno_on_error = true;
2147 sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
2148
2149 dopts.result = &ret;
2150 dopts.num_items = num;
2151 dopts.item_size = size;
2152 dopts.zero = true;
2153
2154 imalloc(&sopts, &dopts);
2155
2156 LOG("core.calloc.exit", "result: %p", ret);
2157
2158 return ret;
2159 }
2160
2161 static void *
2162 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2163 prof_tctx_t *tctx) {
2164 void *p;
2165
2166 if (tctx == NULL) {
2167 return NULL;
2168 }
2169 if (usize <= SMALL_MAXCLASS) {
2170 p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
2171 if (p == NULL) {
2172 return NULL;
2173 }
2174 arena_prof_promote(tsd_tsdn(tsd), p, usize);
2175 } else {
2176 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2177 }
2178
2179 return p;
2180 }
2181
2182 JEMALLOC_ALWAYS_INLINE void *
2183 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2184 alloc_ctx_t *alloc_ctx) {
2185 void *p;
2186 bool prof_active;
2187 prof_tctx_t *old_tctx, *tctx;
2188
2189 prof_active = prof_active_get_unlocked();
2190 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2191 tctx = prof_alloc_prep(tsd, usize, prof_active, true);
2192 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2193 p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
2194 } else {
2195 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2196 }
2197 if (unlikely(p == NULL)) {
2198 prof_alloc_rollback(tsd, tctx, true);
2199 return NULL;
2200 }
2201 prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
2202 old_tctx);
2203
2204 return p;
2205 }
2206
2207 JEMALLOC_ALWAYS_INLINE void
2208 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
2209 if (!slow_path) {
2210 tsd_assert_fast(tsd);
2211 }
2212 check_entry_exit_locking(tsd_tsdn(tsd));
2213 if (tsd_reentrancy_level_get(tsd) != 0) {
2214 assert(slow_path);
2215 }
2216
2217 assert(ptr != NULL);
2218 assert(malloc_initialized() || IS_INITIALIZER);
2219
2220 alloc_ctx_t alloc_ctx;
2221 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2222 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2223 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2224 assert(alloc_ctx.szind != NSIZES);
2225
2226 size_t usize;
2227 if (config_prof && opt_prof) {
2228 usize = sz_index2size(alloc_ctx.szind);
2229 prof_free(tsd, ptr, usize, &alloc_ctx);
2230 } else if (config_stats) {
2231 usize = sz_index2size(alloc_ctx.szind);
2232 }
2233 if (config_stats) {
2234 *tsd_thread_deallocatedp_get(tsd) += usize;
2235 }
2236
2237 if (likely(!slow_path)) {
2238 idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2239 false);
2240 } else {
2241 idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2242 true);
2243 }
2244 }
2245
2246 JEMALLOC_ALWAYS_INLINE void
2247 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
2248 if (!slow_path) {
2249 tsd_assert_fast(tsd);
2250 }
2251 check_entry_exit_locking(tsd_tsdn(tsd));
2252 if (tsd_reentrancy_level_get(tsd) != 0) {
2253 assert(slow_path);
2254 }
2255
2256 assert(ptr != NULL);
2257 assert(malloc_initialized() || IS_INITIALIZER);
2258
2259 alloc_ctx_t alloc_ctx, *ctx;
2260 if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) {
2261 /*
2262 * When cache_oblivious is disabled and ptr is not page aligned,
2263 * the allocation was not sampled -- usize can be used to
2264 * determine szind directly.
2265 */
2266 alloc_ctx.szind = sz_size2index(usize);
2267 alloc_ctx.slab = true;
2268 ctx = &alloc_ctx;
2269 if (config_debug) {
2270 alloc_ctx_t dbg_ctx;
2271 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2272 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree,
2273 rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind,
2274 &dbg_ctx.slab);
2275 assert(dbg_ctx.szind == alloc_ctx.szind);
2276 assert(dbg_ctx.slab == alloc_ctx.slab);
2277 }
2278 } else if (config_prof && opt_prof) {
2279 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2280 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2281 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2282 assert(alloc_ctx.szind == sz_size2index(usize));
2283 ctx = &alloc_ctx;
2284 } else {
2285 ctx = NULL;
2286 }
2287
2288 if (config_prof && opt_prof) {
2289 prof_free(tsd, ptr, usize, ctx);
2290 }
2291 if (config_stats) {
2292 *tsd_thread_deallocatedp_get(tsd) += usize;
2293 }
2294
2295 if (likely(!slow_path)) {
2296 isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
2297 } else {
2298 isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
2299 }
2300 }
2301
2302 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2303 void JEMALLOC_NOTHROW *
2304 JEMALLOC_ALLOC_SIZE(2)
2305 je_realloc(void *ptr, size_t size) {
2306 void *ret;
2307 tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
2308 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
2309 size_t old_usize = 0;
2310
2311 LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
2312
2313 if (unlikely(size == 0)) {
2314 if (ptr != NULL) {
2315 /* realloc(ptr, 0) is equivalent to free(ptr). */
2316 UTRACE(ptr, 0, 0);
2317 tcache_t *tcache;
2318 tsd_t *tsd = tsd_fetch();
2319 if (tsd_reentrancy_level_get(tsd) == 0) {
2320 tcache = tcache_get(tsd);
2321 } else {
2322 tcache = NULL;
2323 }
2324 ifree(tsd, ptr, tcache, true);
2325
2326 LOG("core.realloc.exit", "result: %p", NULL);
2327 return NULL;
2328 }
2329 size = 1;
2330 }
2331
2332 if (likely(ptr != NULL)) {
2333 assert(malloc_initialized() || IS_INITIALIZER);
2334 tsd_t *tsd = tsd_fetch();
2335
2336 check_entry_exit_locking(tsd_tsdn(tsd));
2337
2338 alloc_ctx_t alloc_ctx;
2339 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2340 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2341 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2342 assert(alloc_ctx.szind != NSIZES);
2343 old_usize = sz_index2size(alloc_ctx.szind);
2344 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2345 if (config_prof && opt_prof) {
2346 usize = sz_s2u(size);
2347 ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
2348 NULL : irealloc_prof(tsd, ptr, old_usize, usize,
2349 &alloc_ctx);
2350 } else {
2351 if (config_stats) {
2352 usize = sz_s2u(size);
2353 }
2354 ret = iralloc(tsd, ptr, old_usize, size, 0, false);
2355 }
2356 tsdn = tsd_tsdn(tsd);
2357 } else {
2358 /* realloc(NULL, size) is equivalent to malloc(size). */
2359 void *ret = je_malloc(size);
2360 LOG("core.realloc.exit", "result: %p", ret);
2361 return ret;
2362 }
2363
2364 if (unlikely(ret == NULL)) {
2365 if (config_xmalloc && unlikely(opt_xmalloc)) {
2366 malloc_write("<jemalloc>: Error in realloc(): "
2367 "out of memory\n");
2368 abort();
2369 }
2370 set_errno(ENOMEM);
2371 }
2372 if (config_stats && likely(ret != NULL)) {
2373 tsd_t *tsd;
2374
2375 assert(usize == isalloc(tsdn, ret));
2376 tsd = tsdn_tsd(tsdn);
2377 *tsd_thread_allocatedp_get(tsd) += usize;
2378 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2379 }
2380 UTRACE(ptr, size, ret);
2381 check_entry_exit_locking(tsdn);
2382
2383 LOG("core.realloc.exit", "result: %p", ret);
2384 return ret;
2385 }
2386
2387 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2388 je_free(void *ptr) {
2389 LOG("core.free.entry", "ptr: %p", ptr);
2390
2391 UTRACE(ptr, 0, 0);
2392 if (likely(ptr != NULL)) {
2393 /*
2394 * We avoid setting up tsd fully (e.g. tcache, arena binding)
2395 * based on only free() calls -- other activities trigger the
2396 * minimal to full transition. This is because free() may
2397 * happen during thread shutdown after tls deallocation: if a
2398 * thread never had any malloc activities until then, a
2399 * fully-setup tsd won't be destructed properly.
2400 */
2401 tsd_t *tsd = tsd_fetch_min();
2402 check_entry_exit_locking(tsd_tsdn(tsd));
2403
2404 tcache_t *tcache;
2405 if (likely(tsd_fast(tsd))) {
2406 tsd_assert_fast(tsd);
2407 /* Unconditionally get tcache ptr on fast path. */
2408 tcache = tsd_tcachep_get(tsd);
2409 ifree(tsd, ptr, tcache, false);
2410 } else {
2411 if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2412 tcache = tcache_get(tsd);
2413 } else {
2414 tcache = NULL;
2415 }
2416 ifree(tsd, ptr, tcache, true);
2417 }
2418 check_entry_exit_locking(tsd_tsdn(tsd));
2419 }
2420 LOG("core.free.exit", "");
2421 }
2422
2423 /*
2424 * End malloc(3)-compatible functions.
2425 */
2426 /******************************************************************************/
2427 /*
2428 * Begin non-standard override functions.
2429 */
2430
2431 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
2432 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2433 void JEMALLOC_NOTHROW *
2434 JEMALLOC_ATTR(malloc)
2435 je_memalign(size_t alignment, size_t size) {
2436 void *ret;
2437 static_opts_t sopts;
2438 dynamic_opts_t dopts;
2439
2440 LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment,
2441 size);
2442
2443 static_opts_init(&sopts);
2444 dynamic_opts_init(&dopts);
2445
2446 sopts.bump_empty_alloc = true;
2447 sopts.min_alignment = 1;
2448 sopts.oom_string =
2449 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2450 sopts.invalid_alignment_string =
2451 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2452 sopts.null_out_result_on_error = true;
2453
2454 dopts.result = &ret;
2455 dopts.num_items = 1;
2456 dopts.item_size = size;
2457 dopts.alignment = alignment;
2458
2459 imalloc(&sopts, &dopts);
2460
2461 LOG("core.memalign.exit", "result: %p", ret);
2462 return ret;
2463 }
2464 #endif
2465
2466 #ifdef JEMALLOC_OVERRIDE_VALLOC
2467 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2468 void JEMALLOC_NOTHROW *
2469 JEMALLOC_ATTR(malloc)
2470 je_valloc(size_t size) {
2471 void *ret;
2472
2473 static_opts_t sopts;
2474 dynamic_opts_t dopts;
2475
2476 LOG("core.valloc.entry", "size: %zu\n", size);
2477
2478 static_opts_init(&sopts);
2479 dynamic_opts_init(&dopts);
2480
2481 sopts.bump_empty_alloc = true;
2482 sopts.null_out_result_on_error = true;
2483 sopts.min_alignment = PAGE;
2484 sopts.oom_string =
2485 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2486 sopts.invalid_alignment_string =
2487 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2488
2489 dopts.result = &ret;
2490 dopts.num_items = 1;
2491 dopts.item_size = size;
2492 dopts.alignment = PAGE;
2493
2494 imalloc(&sopts, &dopts);
2495
2496 LOG("core.valloc.exit", "result: %p\n", ret);
2497 return ret;
2498 }
2499 #endif
2500
2501 #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
2502 /*
2503 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
2504 * to inconsistently reference libc's malloc(3)-compatible functions
2505 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
2506 *
2507 * These definitions interpose hooks in glibc. The functions are actually
2508 * passed an extra argument for the caller return address, which will be
2509 * ignored.
2510 */
2511 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
2512 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
2513 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
2514 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
2515 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
2516 je_memalign;
2517 # endif
2518
2519 # ifdef CPU_COUNT
2520 /*
2521 * To enable static linking with glibc, the libc specific malloc interface must
2522 * be implemented also, so none of glibc's malloc.o functions are added to the
2523 * link.
2524 */
2525 # define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
2526 /* To force macro expansion of je_ prefix before stringification. */
2527 # define PREALIAS(je_fn) ALIAS(je_fn)
2528 # ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
2529 void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
2530 # endif
2531 # ifdef JEMALLOC_OVERRIDE___LIBC_FREE
2532 void __libc_free(void* ptr) PREALIAS(je_free);
2533 # endif
2534 # ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
2535 void *__libc_malloc(size_t size) PREALIAS(je_malloc);
2536 # endif
2537 # ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
2538 void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
2539 # endif
2540 # ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
2541 void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
2542 # endif
2543 # ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
2544 void *__libc_valloc(size_t size) PREALIAS(je_valloc);
2545 # endif
2546 # ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
2547 int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
2548 # endif
2549 # undef PREALIAS
2550 # undef ALIAS
2551 # endif
2552 #endif
2553
2554 /*
2555 * End non-standard override functions.
2556 */
2557 /******************************************************************************/
2558 /*
2559 * Begin non-standard functions.
2560 */
2561
2562 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2563 void JEMALLOC_NOTHROW *
2564 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2565 je_mallocx(size_t size, int flags) {
2566 void *ret;
2567 static_opts_t sopts;
2568 dynamic_opts_t dopts;
2569
2570 LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags);
2571
2572 static_opts_init(&sopts);
2573 dynamic_opts_init(&dopts);
2574
2575 sopts.assert_nonempty_alloc = true;
2576 sopts.null_out_result_on_error = true;
2577 sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
2578
2579 dopts.result = &ret;
2580 dopts.num_items = 1;
2581 dopts.item_size = size;
2582 if (unlikely(flags != 0)) {
2583 if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
2584 dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2585 }
2586
2587 dopts.zero = MALLOCX_ZERO_GET(flags);
2588
2589 if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2590 if ((flags & MALLOCX_TCACHE_MASK)
2591 == MALLOCX_TCACHE_NONE) {
2592 dopts.tcache_ind = TCACHE_IND_NONE;
2593 } else {
2594 dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
2595 }
2596 } else {
2597 dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
2598 }
2599
2600 if ((flags & MALLOCX_ARENA_MASK) != 0)
2601 dopts.arena_ind = MALLOCX_ARENA_GET(flags);
2602 }
2603
2604 imalloc(&sopts, &dopts);
2605
2606 LOG("core.mallocx.exit", "result: %p", ret);
2607 return ret;
2608 }
2609
2610 static void *
2611 irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
2612 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2613 prof_tctx_t *tctx) {
2614 void *p;
2615
2616 if (tctx == NULL) {
2617 return NULL;
2618 }
2619 if (usize <= SMALL_MAXCLASS) {
2620 p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS,
2621 alignment, zero, tcache, arena);
2622 if (p == NULL) {
2623 return NULL;
2624 }
2625 arena_prof_promote(tsdn, p, usize);
2626 } else {
2627 p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
2628 tcache, arena);
2629 }
2630
2631 return p;
2632 }
2633
2634 JEMALLOC_ALWAYS_INLINE void *
2635 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2636 size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2637 arena_t *arena, alloc_ctx_t *alloc_ctx) {
2638 void *p;
2639 bool prof_active;
2640 prof_tctx_t *old_tctx, *tctx;
2641
2642 prof_active = prof_active_get_unlocked();
2643 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2644 tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
2645 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2646 p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
2647 *usize, alignment, zero, tcache, arena, tctx);
2648 } else {
2649 p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
2650 zero, tcache, arena);
2651 }
2652 if (unlikely(p == NULL)) {
2653 prof_alloc_rollback(tsd, tctx, false);
2654 return NULL;
2655 }
2656
2657 if (p == old_ptr && alignment != 0) {
2658 /*
2659 * The allocation did not move, so it is possible that the size
2660 * class is smaller than would guarantee the requested
2661 * alignment, and that the alignment constraint was
2662 * serendipitously satisfied. Additionally, old_usize may not
2663 * be the same as the current usize because of in-place large
2664 * reallocation. Therefore, query the actual value of usize.
2665 */
2666 *usize = isalloc(tsd_tsdn(tsd), p);
2667 }
2668 prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
2669 old_usize, old_tctx);
2670
2671 return p;
2672 }
2673
2674 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2675 void JEMALLOC_NOTHROW *
2676 JEMALLOC_ALLOC_SIZE(2)
2677 je_rallocx(void *ptr, size_t size, int flags) {
2678 void *p;
2679 tsd_t *tsd;
2680 size_t usize;
2681 size_t old_usize;
2682 size_t alignment = MALLOCX_ALIGN_GET(flags);
2683 bool zero = flags & MALLOCX_ZERO;
2684 arena_t *arena;
2685 tcache_t *tcache;
2686
2687 LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
2688 size, flags);
2689
2690
2691 assert(ptr != NULL);
2692 assert(size != 0);
2693 assert(malloc_initialized() || IS_INITIALIZER);
2694 tsd = tsd_fetch();
2695 check_entry_exit_locking(tsd_tsdn(tsd));
2696
2697 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2698 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2699 arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
2700 if (unlikely(arena == NULL)) {
2701 goto label_oom;
2702 }
2703 } else {
2704 arena = NULL;
2705 }
2706
2707 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2708 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2709 tcache = NULL;
2710 } else {
2711 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2712 }
2713 } else {
2714 tcache = tcache_get(tsd);
2715 }
2716
2717 alloc_ctx_t alloc_ctx;
2718 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2719 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2720 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2721 assert(alloc_ctx.szind != NSIZES);
2722 old_usize = sz_index2size(alloc_ctx.szind);
2723 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2724 if (config_prof && opt_prof) {
2725 usize = (alignment == 0) ?
2726 sz_s2u(size) : sz_sa2u(size, alignment);
2727 if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
2728 goto label_oom;
2729 }
2730 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2731 zero, tcache, arena, &alloc_ctx);
2732 if (unlikely(p == NULL)) {
2733 goto label_oom;
2734 }
2735 } else {
2736 p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
2737 zero, tcache, arena);
2738 if (unlikely(p == NULL)) {
2739 goto label_oom;
2740 }
2741 if (config_stats) {
2742 usize = isalloc(tsd_tsdn(tsd), p);
2743 }
2744 }
2745 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2746
2747 if (config_stats) {
2748 *tsd_thread_allocatedp_get(tsd) += usize;
2749 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2750 }
2751 UTRACE(ptr, size, p);
2752 check_entry_exit_locking(tsd_tsdn(tsd));
2753
2754 LOG("core.rallocx.exit", "result: %p", p);
2755 return p;
2756 label_oom:
2757 if (config_xmalloc && unlikely(opt_xmalloc)) {
2758 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2759 abort();
2760 }
2761 UTRACE(ptr, size, 0);
2762 check_entry_exit_locking(tsd_tsdn(tsd));
2763
2764 LOG("core.rallocx.exit", "result: %p", NULL);
2765 return NULL;
2766 }
2767
2768 JEMALLOC_ALWAYS_INLINE size_t
2769 ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2770 size_t extra, size_t alignment, bool zero) {
2771 size_t usize;
2772
2773 if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) {
2774 return old_usize;
2775 }
2776 usize = isalloc(tsdn, ptr);
2777
2778 return usize;
2779 }
2780
2781 static size_t
2782 ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2783 size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
2784 size_t usize;
2785
2786 if (tctx == NULL) {
2787 return old_usize;
2788 }
2789 usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
2790 zero);
2791
2792 return usize;
2793 }
2794
2795 JEMALLOC_ALWAYS_INLINE size_t
2796 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2797 size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
2798 size_t usize_max, usize;
2799 bool prof_active;
2800 prof_tctx_t *old_tctx, *tctx;
2801
2802 prof_active = prof_active_get_unlocked();
2803 old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
2804 /*
2805 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2806 * Therefore, compute its maximum possible value and use that in
2807 * prof_alloc_prep() to decide whether to capture a backtrace.
2808 * prof_realloc() will use the actual usize to decide whether to sample.
2809 */
2810 if (alignment == 0) {
2811 usize_max = sz_s2u(size+extra);
2812 assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
2813 } else {
2814 usize_max = sz_sa2u(size+extra, alignment);
2815 if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
2816 /*
2817 * usize_max is out of range, and chances are that
2818 * allocation will fail, but use the maximum possible
2819 * value and carry on with prof_alloc_prep(), just in
2820 * case allocation succeeds.
2821 */
2822 usize_max = LARGE_MAXCLASS;
2823 }
2824 }
2825 tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
2826
2827 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2828 usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
2829 size, extra, alignment, zero, tctx);
2830 } else {
2831 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2832 extra, alignment, zero);
2833 }
2834 if (usize == old_usize) {
2835 prof_alloc_rollback(tsd, tctx, false);
2836 return usize;
2837 }
2838 prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
2839 old_tctx);
2840
2841 return usize;
2842 }
2843
2844 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2845 je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
2846 tsd_t *tsd;
2847 size_t usize, old_usize;
2848 size_t alignment = MALLOCX_ALIGN_GET(flags);
2849 bool zero = flags & MALLOCX_ZERO;
2850
2851 LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
2852 "flags: %d", ptr, size, extra, flags);
2853
2854 assert(ptr != NULL);
2855 assert(size != 0);
2856 assert(SIZE_T_MAX - size >= extra);
2857 assert(malloc_initialized() || IS_INITIALIZER);
2858 tsd = tsd_fetch();
2859 check_entry_exit_locking(tsd_tsdn(tsd));
2860
2861 alloc_ctx_t alloc_ctx;
2862 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2863 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2864 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2865 assert(alloc_ctx.szind != NSIZES);
2866 old_usize = sz_index2size(alloc_ctx.szind);
2867 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2868 /*
2869 * The API explicitly absolves itself of protecting against (size +
2870 * extra) numerical overflow, but we may need to clamp extra to avoid
2871 * exceeding LARGE_MAXCLASS.
2872 *
2873 * Ordinarily, size limit checking is handled deeper down, but here we
2874 * have to check as part of (size + extra) clamping, since we need the
2875 * clamped value in the above helper functions.
2876 */
2877 if (unlikely(size > LARGE_MAXCLASS)) {
2878 usize = old_usize;
2879 goto label_not_resized;
2880 }
2881 if (unlikely(LARGE_MAXCLASS - size < extra)) {
2882 extra = LARGE_MAXCLASS - size;
2883 }
2884
2885 if (config_prof && opt_prof) {
2886 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2887 alignment, zero, &alloc_ctx);
2888 } else {
2889 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2890 extra, alignment, zero);
2891 }
2892 if (unlikely(usize == old_usize)) {
2893 goto label_not_resized;
2894 }
2895
2896 if (config_stats) {
2897 *tsd_thread_allocatedp_get(tsd) += usize;
2898 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2899 }
2900 label_not_resized:
2901 UTRACE(ptr, size, ptr);
2902 check_entry_exit_locking(tsd_tsdn(tsd));
2903
2904 LOG("core.xallocx.exit", "result: %zu", usize);
2905 return usize;
2906 }
2907
2908 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2909 JEMALLOC_ATTR(pure)
2910 je_sallocx(const void *ptr, UNUSED int flags) {
2911 size_t usize;
2912 tsdn_t *tsdn;
2913
2914 LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2915
2916 assert(malloc_initialized() || IS_INITIALIZER);
2917 assert(ptr != NULL);
2918
2919 tsdn = tsdn_fetch();
2920 check_entry_exit_locking(tsdn);
2921
2922 if (config_debug || force_ivsalloc) {
2923 usize = ivsalloc(tsdn, ptr);
2924 assert(force_ivsalloc || usize != 0);
2925 } else {
2926 usize = isalloc(tsdn, ptr);
2927 }
2928
2929 check_entry_exit_locking(tsdn);
2930
2931 LOG("core.sallocx.exit", "result: %zu", usize);
2932 return usize;
2933 }
2934
2935 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2936 je_dallocx(void *ptr, int flags) {
2937 LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2938
2939 assert(ptr != NULL);
2940 assert(malloc_initialized() || IS_INITIALIZER);
2941
2942 tsd_t *tsd = tsd_fetch();
2943 bool fast = tsd_fast(tsd);
2944 check_entry_exit_locking(tsd_tsdn(tsd));
2945
2946 tcache_t *tcache;
2947 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2948 /* Not allowed to be reentrant and specify a custom tcache. */
2949 assert(tsd_reentrancy_level_get(tsd) == 0);
2950 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2951 tcache = NULL;
2952 } else {
2953 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2954 }
2955 } else {
2956 if (likely(fast)) {
2957 tcache = tsd_tcachep_get(tsd);
2958 assert(tcache == tcache_get(tsd));
2959 } else {
2960 if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2961 tcache = tcache_get(tsd);
2962 } else {
2963 tcache = NULL;
2964 }
2965 }
2966 }
2967
2968 UTRACE(ptr, 0, 0);
2969 if (likely(fast)) {
2970 tsd_assert_fast(tsd);
2971 ifree(tsd, ptr, tcache, false);
2972 } else {
2973 ifree(tsd, ptr, tcache, true);
2974 }
2975 check_entry_exit_locking(tsd_tsdn(tsd));
2976
2977 LOG("core.dallocx.exit", "");
2978 }
2979
2980 JEMALLOC_ALWAYS_INLINE size_t
2981 inallocx(tsdn_t *tsdn, size_t size, int flags) {
2982 check_entry_exit_locking(tsdn);
2983
2984 size_t usize;
2985 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
2986 usize = sz_s2u(size);
2987 } else {
2988 usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2989 }
2990 check_entry_exit_locking(tsdn);
2991 return usize;
2992 }
2993
2994 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2995 je_sdallocx(void *ptr, size_t size, int flags) {
2996 assert(ptr != NULL);
2997 assert(malloc_initialized() || IS_INITIALIZER);
2998
2999 LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
3000 size, flags);
3001
3002 tsd_t *tsd = tsd_fetch();
3003 bool fast = tsd_fast(tsd);
3004 size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
3005 assert(usize == isalloc(tsd_tsdn(tsd), ptr));
3006 check_entry_exit_locking(tsd_tsdn(tsd));
3007
3008 tcache_t *tcache;
3009 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
3010 /* Not allowed to be reentrant and specify a custom tcache. */
3011 assert(tsd_reentrancy_level_get(tsd) == 0);
3012 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
3013 tcache = NULL;
3014 } else {
3015 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
3016 }
3017 } else {
3018 if (likely(fast)) {
3019 tcache = tsd_tcachep_get(tsd);
3020 assert(tcache == tcache_get(tsd));
3021 } else {
3022 if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
3023 tcache = tcache_get(tsd);
3024 } else {
3025 tcache = NULL;
3026 }
3027 }
3028 }
3029
3030 UTRACE(ptr, 0, 0);
3031 if (likely(fast)) {
3032 tsd_assert_fast(tsd);
3033 isfree(tsd, ptr, usize, tcache, false);
3034 } else {
3035 isfree(tsd, ptr, usize, tcache, true);
3036 }
3037 check_entry_exit_locking(tsd_tsdn(tsd));
3038
3039 LOG("core.sdallocx.exit", "");
3040 }
3041
3042 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
3043 JEMALLOC_ATTR(pure)
3044 je_nallocx(size_t size, int flags) {
3045 size_t usize;
3046 tsdn_t *tsdn;
3047
3048 assert(size != 0);
3049
3050 if (unlikely(malloc_init())) {
3051 LOG("core.nallocx.exit", "result: %zu", ZU(0));
3052 return 0;
3053 }
3054
3055 tsdn = tsdn_fetch();
3056 check_entry_exit_locking(tsdn);
3057
3058 usize = inallocx(tsdn, size, flags);
3059 if (unlikely(usize > LARGE_MAXCLASS)) {
3060 LOG("core.nallocx.exit", "result: %zu", ZU(0));
3061 return 0;
3062 }
3063
3064 check_entry_exit_locking(tsdn);
3065 LOG("core.nallocx.exit", "result: %zu", usize);
3066 return usize;
3067 }
3068
3069 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3070 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
3071 size_t newlen) {
3072 int ret;
3073 tsd_t *tsd;
3074
3075 LOG("core.mallctl.entry", "name: %s", name);
3076
3077 if (unlikely(malloc_init())) {
3078 LOG("core.mallctl.exit", "result: %d", EAGAIN);
3079 return EAGAIN;
3080 }
3081
3082 tsd = tsd_fetch();
3083 check_entry_exit_locking(tsd_tsdn(tsd));
3084 ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
3085 check_entry_exit_locking(tsd_tsdn(tsd));
3086
3087 LOG("core.mallctl.exit", "result: %d", ret);
3088 return ret;
3089 }
3090
3091 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3092 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
3093 int ret;
3094
3095 LOG("core.mallctlnametomib.entry", "name: %s", name);
3096
3097 if (unlikely(malloc_init())) {
3098 LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN);
3099 return EAGAIN;
3100 }
3101
3102 tsd_t *tsd = tsd_fetch();
3103 check_entry_exit_locking(tsd_tsdn(tsd));
3104 ret = ctl_nametomib(tsd, name, mibp, miblenp);
3105 check_entry_exit_locking(tsd_tsdn(tsd));
3106
3107 LOG("core.mallctlnametomib.exit", "result: %d", ret);
3108 return ret;
3109 }
3110
3111 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3112 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
3113 void *newp, size_t newlen) {
3114 int ret;
3115 tsd_t *tsd;
3116
3117 LOG("core.mallctlbymib.entry", "");
3118
3119 if (unlikely(malloc_init())) {
3120 LOG("core.mallctlbymib.exit", "result: %d", EAGAIN);
3121 return EAGAIN;
3122 }
3123
3124 tsd = tsd_fetch();
3125 check_entry_exit_locking(tsd_tsdn(tsd));
3126 ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
3127 check_entry_exit_locking(tsd_tsdn(tsd));
3128 LOG("core.mallctlbymib.exit", "result: %d", ret);
3129 return ret;
3130 }
3131
3132 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
3133 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
3134 const char *opts) {
3135 tsdn_t *tsdn;
3136
3137 LOG("core.malloc_stats_print.entry", "");
3138
3139 tsdn = tsdn_fetch();
3140 check_entry_exit_locking(tsdn);
3141 stats_print(write_cb, cbopaque, opts);
3142 check_entry_exit_locking(tsdn);
3143 LOG("core.malloc_stats_print.exit", "");
3144 }
3145
3146 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
3147 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
3148 size_t ret;
3149 tsdn_t *tsdn;
3150
3151 LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
3152
3153 assert(malloc_initialized() || IS_INITIALIZER);
3154
3155 tsdn = tsdn_fetch();
3156 check_entry_exit_locking(tsdn);
3157
3158 if (unlikely(ptr == NULL)) {
3159 ret = 0;
3160 } else {
3161 if (config_debug || force_ivsalloc) {
3162 ret = ivsalloc(tsdn, ptr);
3163 assert(force_ivsalloc || ret != 0);
3164 } else {
3165 ret = isalloc(tsdn, ptr);
3166 }
3167 }
3168
3169 check_entry_exit_locking(tsdn);
3170 LOG("core.malloc_usable_size.exit", "result: %zu", ret);
3171 return ret;
3172 }
3173
3174 /*
3175 * End non-standard functions.
3176 */
3177 /******************************************************************************/
3178 /*
3179 * The following functions are used by threading libraries for protection of
3180 * malloc during fork().
3181 */
3182
3183 /*
3184 * If an application creates a thread before doing any allocation in the main
3185 * thread, then calls fork(2) in the main thread followed by memory allocation
3186 * in the child process, a race can occur that results in deadlock within the
3187 * child: the main thread may have forked while the created thread had
3188 * partially initialized the allocator. Ordinarily jemalloc prevents
3189 * fork/malloc races via the following functions it registers during
3190 * initialization using pthread_atfork(), but of course that does no good if
3191 * the allocator isn't fully initialized at fork time. The following library
3192 * constructor is a partial solution to this problem. It may still be possible
3193 * to trigger the deadlock described above, but doing so would involve forking
3194 * via a library constructor that runs before jemalloc's runs.
3195 */
3196 #ifndef JEMALLOC_JET
3197 JEMALLOC_ATTR(constructor)
3198 static void
3199 jemalloc_constructor(void) {
3200 malloc_init();
3201 }
3202 #endif
3203
3204 #ifndef JEMALLOC_MUTEX_INIT_CB
3205 void
3206 jemalloc_prefork(void)
3207 #else
3208 JEMALLOC_EXPORT void
3209 _malloc_prefork(void)
3210 #endif
3211 {
3212 tsd_t *tsd;
3213 unsigned i, j, narenas;
3214 arena_t *arena;
3215
3216 #ifdef JEMALLOC_MUTEX_INIT_CB
3217 if (!malloc_initialized()) {
3218 return;
3219 }
3220 #endif
3221 assert(malloc_initialized());
3222
3223 tsd = tsd_fetch();
3224
3225 narenas = narenas_total_get();
3226
3227 witness_prefork(tsd_witness_tsdp_get(tsd));
3228 /* Acquire all mutexes in a safe order. */
3229 ctl_prefork(tsd_tsdn(tsd));
3230 tcache_prefork(tsd_tsdn(tsd));
3231 malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
3232 if (have_background_thread) {
3233 background_thread_prefork0(tsd_tsdn(tsd));
3234 }
3235 prof_prefork0(tsd_tsdn(tsd));
3236 if (have_background_thread) {
3237 background_thread_prefork1(tsd_tsdn(tsd));
3238 }
3239 /* Break arena prefork into stages to preserve lock order. */
3240 for (i = 0; i < 8; i++) {
3241 for (j = 0; j < narenas; j++) {
3242 if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
3243 NULL) {
3244 switch (i) {
3245 case 0:
3246 arena_prefork0(tsd_tsdn(tsd), arena);
3247 break;
3248 case 1:
3249 arena_prefork1(tsd_tsdn(tsd), arena);
3250 break;
3251 case 2:
3252 arena_prefork2(tsd_tsdn(tsd), arena);
3253 break;
3254 case 3:
3255 arena_prefork3(tsd_tsdn(tsd), arena);
3256 break;
3257 case 4:
3258 arena_prefork4(tsd_tsdn(tsd), arena);
3259 break;
3260 case 5:
3261 arena_prefork5(tsd_tsdn(tsd), arena);
3262 break;
3263 case 6:
3264 arena_prefork6(tsd_tsdn(tsd), arena);
3265 break;
3266 case 7:
3267 arena_prefork7(tsd_tsdn(tsd), arena);
3268 break;
3269 default: not_reached();
3270 }
3271 }
3272 }
3273 }
3274 prof_prefork1(tsd_tsdn(tsd));
3275 }
3276
3277 #ifndef JEMALLOC_MUTEX_INIT_CB
3278 void
3279 jemalloc_postfork_parent(void)
3280 #else
3281 JEMALLOC_EXPORT void
3282 _malloc_postfork(void)
3283 #endif
3284 {
3285 tsd_t *tsd;
3286 unsigned i, narenas;
3287
3288 #ifdef JEMALLOC_MUTEX_INIT_CB
3289 if (!malloc_initialized()) {
3290 return;
3291 }
3292 #endif
3293 assert(malloc_initialized());
3294
3295 tsd = tsd_fetch();
3296
3297 witness_postfork_parent(tsd_witness_tsdp_get(tsd));
3298 /* Release all mutexes, now that fork() has completed. */
3299 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3300 arena_t *arena;
3301
3302 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3303 arena_postfork_parent(tsd_tsdn(tsd), arena);
3304 }
3305 }
3306 prof_postfork_parent(tsd_tsdn(tsd));
3307 if (have_background_thread) {
3308 background_thread_postfork_parent(tsd_tsdn(tsd));
3309 }
3310 malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
3311 tcache_postfork_parent(tsd_tsdn(tsd));
3312 ctl_postfork_parent(tsd_tsdn(tsd));
3313 }
3314
3315 void
3316 jemalloc_postfork_child(void) {
3317 tsd_t *tsd;
3318 unsigned i, narenas;
3319
3320 assert(malloc_initialized());
3321
3322 tsd = tsd_fetch();
3323
3324 witness_postfork_child(tsd_witness_tsdp_get(tsd));
3325 /* Release all mutexes, now that fork() has completed. */
3326 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3327 arena_t *arena;
3328
3329 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3330 arena_postfork_child(tsd_tsdn(tsd), arena);
3331 }
3332 }
3333 prof_postfork_child(tsd_tsdn(tsd));
3334 if (have_background_thread) {
3335 background_thread_postfork_child(tsd_tsdn(tsd));
3336 }
3337 malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
3338 tcache_postfork_child(tsd_tsdn(tsd));
3339 ctl_postfork_child(tsd_tsdn(tsd));
3340 }
3341
3342 /******************************************************************************/
3343
3344 #if defined(__BIONIC__) && !defined(JEMALLOC_JET)
3345 #include "android_je_iterate.c"
3346 #include "android_je_mallinfo.c"
3347 #endif
3348