1 #define JEMALLOC_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3
4 /******************************************************************************/
5 /* Data. */
6
7 malloc_tsd_data(, arenas, arena_t *, NULL)
8 malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
10
11 /* Runtime configuration options. */
12 const char *je_malloc_conf;
13 bool opt_abort =
14 #ifdef JEMALLOC_DEBUG
15 true
16 #else
17 false
18 #endif
19 ;
20 bool opt_junk =
21 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
22 true
23 #else
24 false
25 #endif
26 ;
27 size_t opt_quarantine = ZU(0);
28 bool opt_redzone = false;
29 bool opt_utrace = false;
30 bool opt_xmalloc = false;
31 bool opt_zero = false;
32 size_t opt_narenas = 0;
33
34 /* Initialized to true if the process is running inside Valgrind. */
35 bool in_valgrind;
36
37 unsigned ncpus;
38
39 malloc_mutex_t arenas_lock;
40 arena_t **arenas;
41 unsigned narenas_total;
42 unsigned narenas_auto;
43
44 /* Set to true once the allocator has been initialized. */
45 static bool malloc_initialized = false;
46
47 #ifdef JEMALLOC_THREADED_INIT
48 /* Used to let the initializing thread recursively allocate. */
49 # define NO_INITIALIZER ((unsigned long)0)
50 # define INITIALIZER pthread_self()
51 # define IS_INITIALIZER (malloc_initializer == pthread_self())
52 static pthread_t malloc_initializer = NO_INITIALIZER;
53 #else
54 # define NO_INITIALIZER false
55 # define INITIALIZER true
56 # define IS_INITIALIZER malloc_initializer
57 static bool malloc_initializer = NO_INITIALIZER;
58 #endif
59
60 /* Used to avoid initialization races. */
61 #ifdef _WIN32
62 static malloc_mutex_t init_lock;
63
JEMALLOC_ATTR(constructor)64 JEMALLOC_ATTR(constructor)
65 static void WINAPI
66 _init_init_lock(void)
67 {
68
69 malloc_mutex_init(&init_lock);
70 }
71
72 #ifdef _MSC_VER
73 # pragma section(".CRT$XCU", read)
74 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
75 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
76 #endif
77
78 #else
79 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
80 #endif
81
82 typedef struct {
83 void *p; /* Input pointer (as in realloc(p, s)). */
84 size_t s; /* Request size. */
85 void *r; /* Result pointer. */
86 } malloc_utrace_t;
87
88 #ifdef JEMALLOC_UTRACE
89 # define UTRACE(a, b, c) do { \
90 if (opt_utrace) { \
91 int utrace_serrno = errno; \
92 malloc_utrace_t ut; \
93 ut.p = (a); \
94 ut.s = (b); \
95 ut.r = (c); \
96 utrace(&ut, sizeof(ut)); \
97 errno = utrace_serrno; \
98 } \
99 } while (0)
100 #else
101 # define UTRACE(a, b, c)
102 #endif
103
104 /******************************************************************************/
105 /*
106 * Function prototypes for static functions that are referenced prior to
107 * definition.
108 */
109
110 static bool malloc_init_hard(void);
111
112 /******************************************************************************/
113 /*
114 * Begin miscellaneous support functions.
115 */
116
117 /* Create a new arena and insert it into the arenas array at index ind. */
118 arena_t *
arenas_extend(unsigned ind)119 arenas_extend(unsigned ind)
120 {
121 arena_t *ret;
122
123 ret = (arena_t *)base_alloc(sizeof(arena_t));
124 if (ret != NULL && arena_new(ret, ind) == false) {
125 arenas[ind] = ret;
126 return (ret);
127 }
128 /* Only reached if there is an OOM error. */
129
130 /*
131 * OOM here is quite inconvenient to propagate, since dealing with it
132 * would require a check for failure in the fast path. Instead, punt
133 * by using arenas[0]. In practice, this is an extremely unlikely
134 * failure.
135 */
136 malloc_write("<jemalloc>: Error initializing arena\n");
137 if (opt_abort)
138 abort();
139
140 return (arenas[0]);
141 }
142
143 /* Slow path, called only by choose_arena(). */
144 arena_t *
choose_arena_hard(void)145 choose_arena_hard(void)
146 {
147 arena_t *ret;
148
149 if (narenas_auto > 1) {
150 unsigned i, choose, first_null;
151
152 choose = 0;
153 first_null = narenas_auto;
154 malloc_mutex_lock(&arenas_lock);
155 assert(arenas[0] != NULL);
156 for (i = 1; i < narenas_auto; i++) {
157 if (arenas[i] != NULL) {
158 /*
159 * Choose the first arena that has the lowest
160 * number of threads assigned to it.
161 */
162 if (arenas[i]->nthreads <
163 arenas[choose]->nthreads)
164 choose = i;
165 } else if (first_null == narenas_auto) {
166 /*
167 * Record the index of the first uninitialized
168 * arena, in case all extant arenas are in use.
169 *
170 * NB: It is possible for there to be
171 * discontinuities in terms of initialized
172 * versus uninitialized arenas, due to the
173 * "thread.arena" mallctl.
174 */
175 first_null = i;
176 }
177 }
178
179 if (arenas[choose]->nthreads == 0
180 || first_null == narenas_auto) {
181 /*
182 * Use an unloaded arena, or the least loaded arena if
183 * all arenas are already initialized.
184 */
185 ret = arenas[choose];
186 } else {
187 /* Initialize a new arena. */
188 ret = arenas_extend(first_null);
189 }
190 ret->nthreads++;
191 malloc_mutex_unlock(&arenas_lock);
192 } else {
193 ret = arenas[0];
194 malloc_mutex_lock(&arenas_lock);
195 ret->nthreads++;
196 malloc_mutex_unlock(&arenas_lock);
197 }
198
199 arenas_tsd_set(&ret);
200
201 return (ret);
202 }
203
204 static void
stats_print_atexit(void)205 stats_print_atexit(void)
206 {
207
208 if (config_tcache && config_stats) {
209 unsigned narenas, i;
210
211 /*
212 * Merge stats from extant threads. This is racy, since
213 * individual threads do not lock when recording tcache stats
214 * events. As a consequence, the final stats may be slightly
215 * out of date by the time they are reported, if other threads
216 * continue to allocate.
217 */
218 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
219 arena_t *arena = arenas[i];
220 if (arena != NULL) {
221 tcache_t *tcache;
222
223 /*
224 * tcache_stats_merge() locks bins, so if any
225 * code is introduced that acquires both arena
226 * and bin locks in the opposite order,
227 * deadlocks may result.
228 */
229 malloc_mutex_lock(&arena->lock);
230 ql_foreach(tcache, &arena->tcache_ql, link) {
231 tcache_stats_merge(tcache, arena);
232 }
233 malloc_mutex_unlock(&arena->lock);
234 }
235 }
236 }
237 je_malloc_stats_print(NULL, NULL, NULL);
238 }
239
240 /*
241 * End miscellaneous support functions.
242 */
243 /******************************************************************************/
244 /*
245 * Begin initialization functions.
246 */
247
248 static unsigned
malloc_ncpus(void)249 malloc_ncpus(void)
250 {
251 long result;
252
253 #ifdef _WIN32
254 SYSTEM_INFO si;
255 GetSystemInfo(&si);
256 result = si.dwNumberOfProcessors;
257 #else
258 result = sysconf(_SC_NPROCESSORS_ONLN);
259 #endif
260 return ((result == -1) ? 1 : (unsigned)result);
261 }
262
263 void
arenas_cleanup(void * arg)264 arenas_cleanup(void *arg)
265 {
266 arena_t *arena = *(arena_t **)arg;
267
268 malloc_mutex_lock(&arenas_lock);
269 arena->nthreads--;
270 malloc_mutex_unlock(&arenas_lock);
271 }
272
273 JEMALLOC_ALWAYS_INLINE_C void
malloc_thread_init(void)274 malloc_thread_init(void)
275 {
276
277 /*
278 * TSD initialization can't be safely done as a side effect of
279 * deallocation, because it is possible for a thread to do nothing but
280 * deallocate its TLS data via free(), in which case writing to TLS
281 * would cause write-after-free memory corruption. The quarantine
282 * facility *only* gets used as a side effect of deallocation, so make
283 * a best effort attempt at initializing its TSD by hooking all
284 * allocation events.
285 */
286 if (config_fill && opt_quarantine)
287 quarantine_alloc_hook();
288 }
289
290 JEMALLOC_ALWAYS_INLINE_C bool
malloc_init(void)291 malloc_init(void)
292 {
293
294 if (malloc_initialized == false && malloc_init_hard())
295 return (true);
296 malloc_thread_init();
297
298 return (false);
299 }
300
301 static bool
malloc_conf_next(char const ** opts_p,char const ** k_p,size_t * klen_p,char const ** v_p,size_t * vlen_p)302 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
303 char const **v_p, size_t *vlen_p)
304 {
305 bool accept;
306 const char *opts = *opts_p;
307
308 *k_p = opts;
309
310 for (accept = false; accept == false;) {
311 switch (*opts) {
312 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
313 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
314 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
315 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
316 case 'Y': case 'Z':
317 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
318 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
319 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
320 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
321 case 'y': case 'z':
322 case '0': case '1': case '2': case '3': case '4': case '5':
323 case '6': case '7': case '8': case '9':
324 case '_':
325 opts++;
326 break;
327 case ':':
328 opts++;
329 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
330 *v_p = opts;
331 accept = true;
332 break;
333 case '\0':
334 if (opts != *opts_p) {
335 malloc_write("<jemalloc>: Conf string ends "
336 "with key\n");
337 }
338 return (true);
339 default:
340 malloc_write("<jemalloc>: Malformed conf string\n");
341 return (true);
342 }
343 }
344
345 for (accept = false; accept == false;) {
346 switch (*opts) {
347 case ',':
348 opts++;
349 /*
350 * Look ahead one character here, because the next time
351 * this function is called, it will assume that end of
352 * input has been cleanly reached if no input remains,
353 * but we have optimistically already consumed the
354 * comma if one exists.
355 */
356 if (*opts == '\0') {
357 malloc_write("<jemalloc>: Conf string ends "
358 "with comma\n");
359 }
360 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
361 accept = true;
362 break;
363 case '\0':
364 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
365 accept = true;
366 break;
367 default:
368 opts++;
369 break;
370 }
371 }
372
373 *opts_p = opts;
374 return (false);
375 }
376
377 static void
malloc_conf_error(const char * msg,const char * k,size_t klen,const char * v,size_t vlen)378 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
379 size_t vlen)
380 {
381
382 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
383 (int)vlen, v);
384 }
385
386 static void
malloc_conf_init(void)387 malloc_conf_init(void)
388 {
389 unsigned i;
390 char buf[PATH_MAX + 1];
391 const char *opts, *k, *v;
392 size_t klen, vlen;
393
394 /*
395 * Automatically configure valgrind before processing options. The
396 * valgrind option remains in jemalloc 3.x for compatibility reasons.
397 */
398 if (config_valgrind) {
399 in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
400 if (config_fill && in_valgrind) {
401 opt_junk = false;
402 assert(opt_zero == false);
403 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
404 opt_redzone = true;
405 }
406 if (config_tcache && in_valgrind)
407 opt_tcache = false;
408 }
409
410 #if defined(__ANDROID__)
411 /* Android only supports compiled options. */
412 for (i = 0; i < 1; i++) {
413 #else
414 for (i = 0; i < 3; i++) {
415 #endif
416 /* Get runtime configuration. */
417 switch (i) {
418 case 0:
419 if (je_malloc_conf != NULL) {
420 /*
421 * Use options that were compiled into the
422 * program.
423 */
424 opts = je_malloc_conf;
425 } else {
426 /* No configuration specified. */
427 buf[0] = '\0';
428 opts = buf;
429 }
430 break;
431 case 1: {
432 int linklen = 0;
433 #ifndef _WIN32
434 int saved_errno = errno;
435 const char *linkname =
436 # ifdef JEMALLOC_PREFIX
437 "/etc/"JEMALLOC_PREFIX"malloc.conf"
438 # else
439 "/etc/malloc.conf"
440 # endif
441 ;
442
443 /*
444 * Try to use the contents of the "/etc/malloc.conf"
445 * symbolic link's name.
446 */
447 linklen = readlink(linkname, buf, sizeof(buf) - 1);
448 if (linklen == -1) {
449 /* No configuration specified. */
450 linklen = 0;
451 /* restore errno */
452 set_errno(saved_errno);
453 }
454 #endif
455 buf[linklen] = '\0';
456 opts = buf;
457 break;
458 } case 2: {
459 const char *envname =
460 #ifdef JEMALLOC_PREFIX
461 JEMALLOC_CPREFIX"MALLOC_CONF"
462 #else
463 "MALLOC_CONF"
464 #endif
465 ;
466
467 if ((opts = getenv(envname)) != NULL) {
468 /*
469 * Do nothing; opts is already initialized to
470 * the value of the MALLOC_CONF environment
471 * variable.
472 */
473 } else {
474 /* No configuration specified. */
475 buf[0] = '\0';
476 opts = buf;
477 }
478 break;
479 } default:
480 not_reached();
481 buf[0] = '\0';
482 opts = buf;
483 }
484
485 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
486 &vlen) == false) {
487 #define CONF_MATCH(n) \
488 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
489 #define CONF_HANDLE_BOOL(o, n, cont) \
490 if (CONF_MATCH(n)) { \
491 if (strncmp("true", v, vlen) == 0 && \
492 vlen == sizeof("true")-1) \
493 o = true; \
494 else if (strncmp("false", v, vlen) == \
495 0 && vlen == sizeof("false")-1) \
496 o = false; \
497 else { \
498 malloc_conf_error( \
499 "Invalid conf value", \
500 k, klen, v, vlen); \
501 } \
502 if (cont) \
503 continue; \
504 }
505 #define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
506 if (CONF_MATCH(n)) { \
507 uintmax_t um; \
508 char *end; \
509 \
510 set_errno(0); \
511 um = malloc_strtoumax(v, &end, 0); \
512 if (get_errno() != 0 || (uintptr_t)end -\
513 (uintptr_t)v != vlen) { \
514 malloc_conf_error( \
515 "Invalid conf value", \
516 k, klen, v, vlen); \
517 } else if (clip) { \
518 if (min != 0 && um < min) \
519 o = min; \
520 else if (um > max) \
521 o = max; \
522 else \
523 o = um; \
524 } else { \
525 if ((min != 0 && um < min) || \
526 um > max) { \
527 malloc_conf_error( \
528 "Out-of-range " \
529 "conf value", \
530 k, klen, v, vlen); \
531 } else \
532 o = um; \
533 } \
534 continue; \
535 }
536 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
537 if (CONF_MATCH(n)) { \
538 long l; \
539 char *end; \
540 \
541 set_errno(0); \
542 l = strtol(v, &end, 0); \
543 if (get_errno() != 0 || (uintptr_t)end -\
544 (uintptr_t)v != vlen) { \
545 malloc_conf_error( \
546 "Invalid conf value", \
547 k, klen, v, vlen); \
548 } else if (l < (ssize_t)min || l > \
549 (ssize_t)max) { \
550 malloc_conf_error( \
551 "Out-of-range conf value", \
552 k, klen, v, vlen); \
553 } else \
554 o = l; \
555 continue; \
556 }
557 #define CONF_HANDLE_CHAR_P(o, n, d) \
558 if (CONF_MATCH(n)) { \
559 size_t cpylen = (vlen <= \
560 sizeof(o)-1) ? vlen : \
561 sizeof(o)-1; \
562 strncpy(o, v, cpylen); \
563 o[cpylen] = '\0'; \
564 continue; \
565 }
566
567 CONF_HANDLE_BOOL(opt_abort, "abort", true)
568 /*
569 * Chunks always require at least one header page, plus
570 * one data page in the absence of redzones, or three
571 * pages in the presence of redzones. In order to
572 * simplify options processing, fix the limit based on
573 * config_fill.
574 */
575 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
576 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1,
577 true)
578 if (strncmp("dss", k, klen) == 0) {
579 int i;
580 bool match = false;
581 for (i = 0; i < dss_prec_limit; i++) {
582 if (strncmp(dss_prec_names[i], v, vlen)
583 == 0) {
584 if (chunk_dss_prec_set(i)) {
585 malloc_conf_error(
586 "Error setting dss",
587 k, klen, v, vlen);
588 } else {
589 opt_dss =
590 dss_prec_names[i];
591 match = true;
592 break;
593 }
594 }
595 }
596 if (match == false) {
597 malloc_conf_error("Invalid conf value",
598 k, klen, v, vlen);
599 }
600 continue;
601 }
602 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
603 SIZE_T_MAX, false)
604 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
605 -1, (sizeof(size_t) << 3) - 1)
606 CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
607 if (config_fill) {
608 CONF_HANDLE_BOOL(opt_junk, "junk", true)
609 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
610 0, SIZE_T_MAX, false)
611 CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
612 CONF_HANDLE_BOOL(opt_zero, "zero", true)
613 }
614 if (config_utrace) {
615 CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
616 }
617 if (config_xmalloc) {
618 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
619 }
620 if (config_tcache) {
621 CONF_HANDLE_BOOL(opt_tcache, "tcache",
622 !config_valgrind || !in_valgrind)
623 if (CONF_MATCH("tcache")) {
624 assert(config_valgrind && in_valgrind);
625 if (opt_tcache) {
626 opt_tcache = false;
627 malloc_conf_error(
628 "tcache cannot be enabled "
629 "while running inside Valgrind",
630 k, klen, v, vlen);
631 }
632 continue;
633 }
634 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
635 "lg_tcache_max", -1,
636 (sizeof(size_t) << 3) - 1)
637 }
638 if (config_prof) {
639 CONF_HANDLE_BOOL(opt_prof, "prof", true)
640 CONF_HANDLE_CHAR_P(opt_prof_prefix,
641 "prof_prefix", "jeprof")
642 CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
643 true)
644 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
645 "lg_prof_sample", 0,
646 (sizeof(uint64_t) << 3) - 1)
647 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
648 true)
649 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
650 "lg_prof_interval", -1,
651 (sizeof(uint64_t) << 3) - 1)
652 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
653 true)
654 CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
655 true)
656 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
657 true)
658 }
659 malloc_conf_error("Invalid conf pair", k, klen, v,
660 vlen);
661 #undef CONF_MATCH
662 #undef CONF_HANDLE_BOOL
663 #undef CONF_HANDLE_SIZE_T
664 #undef CONF_HANDLE_SSIZE_T
665 #undef CONF_HANDLE_CHAR_P
666 }
667 }
668 }
669
670 static bool
671 malloc_init_hard(void)
672 {
673 arena_t *init_arenas[1];
674
675 malloc_mutex_lock(&init_lock);
676 if (malloc_initialized || IS_INITIALIZER) {
677 /*
678 * Another thread initialized the allocator before this one
679 * acquired init_lock, or this thread is the initializing
680 * thread, and it is recursively allocating.
681 */
682 malloc_mutex_unlock(&init_lock);
683 return (false);
684 }
685 #ifdef JEMALLOC_THREADED_INIT
686 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
687 /* Busy-wait until the initializing thread completes. */
688 do {
689 malloc_mutex_unlock(&init_lock);
690 CPU_SPINWAIT;
691 malloc_mutex_lock(&init_lock);
692 } while (malloc_initialized == false);
693 malloc_mutex_unlock(&init_lock);
694 return (false);
695 }
696 #endif
697 malloc_initializer = INITIALIZER;
698
699 malloc_tsd_boot();
700 if (config_prof)
701 prof_boot0();
702
703 malloc_conf_init();
704
705 if (opt_stats_print) {
706 /* Print statistics at exit. */
707 if (atexit(stats_print_atexit) != 0) {
708 malloc_write("<jemalloc>: Error in atexit()\n");
709 if (opt_abort)
710 abort();
711 }
712 }
713
714 if (base_boot()) {
715 malloc_mutex_unlock(&init_lock);
716 return (true);
717 }
718
719 if (chunk_boot()) {
720 malloc_mutex_unlock(&init_lock);
721 return (true);
722 }
723
724 if (ctl_boot()) {
725 malloc_mutex_unlock(&init_lock);
726 return (true);
727 }
728
729 if (config_prof)
730 prof_boot1();
731
732 arena_boot();
733
734 if (config_tcache && tcache_boot0()) {
735 malloc_mutex_unlock(&init_lock);
736 return (true);
737 }
738
739 if (huge_boot()) {
740 malloc_mutex_unlock(&init_lock);
741 return (true);
742 }
743
744 if (malloc_mutex_init(&arenas_lock)) {
745 malloc_mutex_unlock(&init_lock);
746 return (true);
747 }
748
749 /*
750 * Create enough scaffolding to allow recursive allocation in
751 * malloc_ncpus().
752 */
753 narenas_total = narenas_auto = 1;
754 arenas = init_arenas;
755 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
756
757 /*
758 * Initialize one arena here. The rest are lazily created in
759 * choose_arena_hard().
760 */
761 arenas_extend(0);
762 if (arenas[0] == NULL) {
763 malloc_mutex_unlock(&init_lock);
764 return (true);
765 }
766
767 /* Initialize allocation counters before any allocations can occur. */
768 if (config_stats && thread_allocated_tsd_boot()) {
769 malloc_mutex_unlock(&init_lock);
770 return (true);
771 }
772
773 if (arenas_tsd_boot()) {
774 malloc_mutex_unlock(&init_lock);
775 return (true);
776 }
777
778 if (config_tcache && tcache_boot1()) {
779 malloc_mutex_unlock(&init_lock);
780 return (true);
781 }
782
783 if (config_fill && quarantine_boot()) {
784 malloc_mutex_unlock(&init_lock);
785 return (true);
786 }
787
788 if (config_prof && prof_boot2()) {
789 malloc_mutex_unlock(&init_lock);
790 return (true);
791 }
792
793 malloc_mutex_unlock(&init_lock);
794 /**********************************************************************/
795 /* Recursive allocation may follow. */
796
797 ncpus = malloc_ncpus();
798
799 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
800 && !defined(_WIN32) && !defined(__native_client__))
801 /* LinuxThreads's pthread_atfork() allocates. */
802 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
803 jemalloc_postfork_child) != 0) {
804 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
805 if (opt_abort)
806 abort();
807 }
808 #endif
809
810 /* Done recursively allocating. */
811 /**********************************************************************/
812 malloc_mutex_lock(&init_lock);
813
814 if (mutex_boot()) {
815 malloc_mutex_unlock(&init_lock);
816 return (true);
817 }
818
819 if (opt_narenas == 0) {
820 /*
821 * For SMP systems, create more than one arena per CPU by
822 * default.
823 */
824 if (ncpus > 1)
825 opt_narenas = ncpus << 2;
826 else
827 opt_narenas = 1;
828 }
829 narenas_auto = opt_narenas;
830 /*
831 * Make sure that the arenas array can be allocated. In practice, this
832 * limit is enough to allow the allocator to function, but the ctl
833 * machinery will fail to allocate memory at far lower limits.
834 */
835 if (narenas_auto > chunksize / sizeof(arena_t *)) {
836 narenas_auto = chunksize / sizeof(arena_t *);
837 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
838 narenas_auto);
839 }
840 narenas_total = narenas_auto;
841
842 /* Allocate and initialize arenas. */
843 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
844 if (arenas == NULL) {
845 malloc_mutex_unlock(&init_lock);
846 return (true);
847 }
848 /*
849 * Zero the array. In practice, this should always be pre-zeroed,
850 * since it was just mmap()ed, but let's be sure.
851 */
852 memset(arenas, 0, sizeof(arena_t *) * narenas_total);
853 /* Copy the pointer to the one arena that was already initialized. */
854 arenas[0] = init_arenas[0];
855
856 malloc_initialized = true;
857 malloc_mutex_unlock(&init_lock);
858
859 return (false);
860 }
861
862 /*
863 * End initialization functions.
864 */
865 /******************************************************************************/
866 /*
867 * Begin malloc(3)-compatible functions.
868 */
869
870 static void *
871 imalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
872 {
873 void *p;
874
875 if (cnt == NULL)
876 return (NULL);
877 if (usize <= SMALL_MAXCLASS) {
878 p = imalloc(SMALL_MAXCLASS+1);
879 if (p == NULL)
880 return (NULL);
881 arena_prof_promoted(p, usize);
882 } else
883 p = imalloc(usize);
884
885 return (p);
886 }
887
888 JEMALLOC_ALWAYS_INLINE_C void *
889 imalloc_prof(size_t usize)
890 {
891 void *p;
892 prof_thr_cnt_t *cnt;
893
894 PROF_ALLOC_PREP(usize, cnt);
895 if ((uintptr_t)cnt != (uintptr_t)1U)
896 p = imalloc_prof_sample(usize, cnt);
897 else
898 p = imalloc(usize);
899 if (p == NULL)
900 return (NULL);
901 prof_malloc(p, usize, cnt);
902
903 return (p);
904 }
905
906 JEMALLOC_ALWAYS_INLINE_C void *
907 imalloc_body(size_t size, size_t *usize)
908 {
909
910 if (malloc_init())
911 return (NULL);
912
913 if (config_prof && opt_prof) {
914 *usize = s2u(size);
915 return (imalloc_prof(*usize));
916 }
917
918 if (config_stats || (config_valgrind && in_valgrind))
919 *usize = s2u(size);
920 return (imalloc(size));
921 }
922
923 void *
924 je_malloc(size_t size)
925 {
926 void *ret;
927 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
928
929 if (size == 0)
930 size = 1;
931
932 ret = imalloc_body(size, &usize);
933 if (ret == NULL) {
934 if (config_xmalloc && opt_xmalloc) {
935 malloc_write("<jemalloc>: Error in malloc(): "
936 "out of memory\n");
937 abort();
938 }
939 set_errno(ENOMEM);
940 }
941 if (config_stats && ret != NULL) {
942 assert(usize == isalloc(ret, config_prof));
943 thread_allocated_tsd_get()->allocated += usize;
944 }
945 UTRACE(0, size, ret);
946 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
947 return (ret);
948 }
949
950 static void *
951 imemalign_prof_sample(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
952 {
953 void *p;
954
955 if (cnt == NULL)
956 return (NULL);
957 if (usize <= SMALL_MAXCLASS) {
958 assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0);
959 p = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment,
960 false);
961 if (p == NULL)
962 return (NULL);
963 arena_prof_promoted(p, usize);
964 } else
965 p = ipalloc(usize, alignment, false);
966
967 return (p);
968 }
969
970 JEMALLOC_ALWAYS_INLINE_C void *
971 imemalign_prof(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
972 {
973 void *p;
974
975 if ((uintptr_t)cnt != (uintptr_t)1U)
976 p = imemalign_prof_sample(alignment, usize, cnt);
977 else
978 p = ipalloc(usize, alignment, false);
979 if (p == NULL)
980 return (NULL);
981 prof_malloc(p, usize, cnt);
982
983 return (p);
984 }
985
986 JEMALLOC_ATTR(nonnull(1))
987 static int
988 imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
989 {
990 int ret;
991 size_t usize;
992 void *result;
993
994 assert(min_alignment != 0);
995
996 if (malloc_init()) {
997 result = NULL;
998 goto label_oom;
999 } else {
1000 if (size == 0)
1001 size = 1;
1002
1003 /* Make sure that alignment is a large enough power of 2. */
1004 if (((alignment - 1) & alignment) != 0
1005 || (alignment < min_alignment)) {
1006 if (config_xmalloc && opt_xmalloc) {
1007 malloc_write("<jemalloc>: Error allocating "
1008 "aligned memory: invalid alignment\n");
1009 abort();
1010 }
1011 result = NULL;
1012 ret = EINVAL;
1013 goto label_return;
1014 }
1015
1016 usize = sa2u(size, alignment);
1017 if (usize == 0) {
1018 result = NULL;
1019 goto label_oom;
1020 }
1021
1022 if (config_prof && opt_prof) {
1023 prof_thr_cnt_t *cnt;
1024
1025 PROF_ALLOC_PREP(usize, cnt);
1026 result = imemalign_prof(alignment, usize, cnt);
1027 } else
1028 result = ipalloc(usize, alignment, false);
1029 if (result == NULL)
1030 goto label_oom;
1031 }
1032
1033 *memptr = result;
1034 ret = 0;
1035 label_return:
1036 if (config_stats && result != NULL) {
1037 assert(usize == isalloc(result, config_prof));
1038 thread_allocated_tsd_get()->allocated += usize;
1039 }
1040 UTRACE(0, size, result);
1041 return (ret);
1042 label_oom:
1043 assert(result == NULL);
1044 if (config_xmalloc && opt_xmalloc) {
1045 malloc_write("<jemalloc>: Error allocating aligned memory: "
1046 "out of memory\n");
1047 abort();
1048 }
1049 ret = ENOMEM;
1050 goto label_return;
1051 }
1052
1053 int
1054 je_posix_memalign(void **memptr, size_t alignment, size_t size)
1055 {
1056 int ret = imemalign(memptr, alignment, size, sizeof(void *));
1057 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
1058 config_prof), false);
1059 return (ret);
1060 }
1061
1062 void *
1063 je_aligned_alloc(size_t alignment, size_t size)
1064 {
1065 void *ret;
1066 int err;
1067
1068 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
1069 ret = NULL;
1070 set_errno(err);
1071 }
1072 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
1073 false);
1074 return (ret);
1075 }
1076
1077 static void *
1078 icalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
1079 {
1080 void *p;
1081
1082 if (cnt == NULL)
1083 return (NULL);
1084 if (usize <= SMALL_MAXCLASS) {
1085 p = icalloc(SMALL_MAXCLASS+1);
1086 if (p == NULL)
1087 return (NULL);
1088 arena_prof_promoted(p, usize);
1089 } else
1090 p = icalloc(usize);
1091
1092 return (p);
1093 }
1094
1095 JEMALLOC_ALWAYS_INLINE_C void *
1096 icalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
1097 {
1098 void *p;
1099
1100 if ((uintptr_t)cnt != (uintptr_t)1U)
1101 p = icalloc_prof_sample(usize, cnt);
1102 else
1103 p = icalloc(usize);
1104 if (p == NULL)
1105 return (NULL);
1106 prof_malloc(p, usize, cnt);
1107
1108 return (p);
1109 }
1110
1111 void *
1112 je_calloc(size_t num, size_t size)
1113 {
1114 void *ret;
1115 size_t num_size;
1116 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1117
1118 if (malloc_init()) {
1119 num_size = 0;
1120 ret = NULL;
1121 goto label_return;
1122 }
1123
1124 num_size = num * size;
1125 if (num_size == 0) {
1126 if (num == 0 || size == 0)
1127 num_size = 1;
1128 else {
1129 ret = NULL;
1130 goto label_return;
1131 }
1132 /*
1133 * Try to avoid division here. We know that it isn't possible to
1134 * overflow during multiplication if neither operand uses any of the
1135 * most significant half of the bits in a size_t.
1136 */
1137 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1138 && (num_size / size != num)) {
1139 /* size_t overflow. */
1140 ret = NULL;
1141 goto label_return;
1142 }
1143
1144 if (config_prof && opt_prof) {
1145 prof_thr_cnt_t *cnt;
1146
1147 usize = s2u(num_size);
1148 PROF_ALLOC_PREP(usize, cnt);
1149 ret = icalloc_prof(usize, cnt);
1150 } else {
1151 if (config_stats || (config_valgrind && in_valgrind))
1152 usize = s2u(num_size);
1153 ret = icalloc(num_size);
1154 }
1155
1156 label_return:
1157 if (ret == NULL) {
1158 if (config_xmalloc && opt_xmalloc) {
1159 malloc_write("<jemalloc>: Error in calloc(): out of "
1160 "memory\n");
1161 abort();
1162 }
1163 set_errno(ENOMEM);
1164 }
1165 if (config_stats && ret != NULL) {
1166 assert(usize == isalloc(ret, config_prof));
1167 thread_allocated_tsd_get()->allocated += usize;
1168 }
1169 UTRACE(0, num_size, ret);
1170 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1171 return (ret);
1172 }
1173
1174 static void *
1175 irealloc_prof_sample(void *oldptr, size_t usize, prof_thr_cnt_t *cnt)
1176 {
1177 void *p;
1178
1179 if (cnt == NULL)
1180 return (NULL);
1181 if (usize <= SMALL_MAXCLASS) {
1182 p = iralloc(oldptr, SMALL_MAXCLASS+1, 0, 0, false);
1183 if (p == NULL)
1184 return (NULL);
1185 arena_prof_promoted(p, usize);
1186 } else
1187 p = iralloc(oldptr, usize, 0, 0, false);
1188
1189 return (p);
1190 }
1191
1192 JEMALLOC_ALWAYS_INLINE_C void *
1193 irealloc_prof(void *oldptr, size_t old_usize, size_t usize, prof_thr_cnt_t *cnt)
1194 {
1195 void *p;
1196 prof_ctx_t *old_ctx;
1197
1198 old_ctx = prof_ctx_get(oldptr);
1199 if ((uintptr_t)cnt != (uintptr_t)1U)
1200 p = irealloc_prof_sample(oldptr, usize, cnt);
1201 else
1202 p = iralloc(oldptr, usize, 0, 0, false);
1203 if (p == NULL)
1204 return (NULL);
1205 prof_realloc(p, usize, cnt, old_usize, old_ctx);
1206
1207 return (p);
1208 }
1209
1210 JEMALLOC_INLINE_C void
1211 ifree(void *ptr)
1212 {
1213 size_t usize;
1214 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1215
1216 assert(ptr != NULL);
1217 assert(malloc_initialized || IS_INITIALIZER);
1218
1219 if (config_prof && opt_prof) {
1220 usize = isalloc(ptr, config_prof);
1221 prof_free(ptr, usize);
1222 } else if (config_stats || config_valgrind)
1223 usize = isalloc(ptr, config_prof);
1224 if (config_stats)
1225 thread_allocated_tsd_get()->deallocated += usize;
1226 if (config_valgrind && in_valgrind)
1227 rzsize = p2rz(ptr);
1228 iqalloc(ptr);
1229 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1230 }
1231
1232 void *
1233 je_realloc(void *ptr, size_t size)
1234 {
1235 void *ret;
1236 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1237 size_t old_usize = 0;
1238 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1239
1240 if (size == 0) {
1241 if (ptr != NULL) {
1242 /* realloc(ptr, 0) is equivalent to free(ptr). */
1243 UTRACE(ptr, 0, 0);
1244 ifree(ptr);
1245 return (NULL);
1246 }
1247 size = 1;
1248 }
1249
1250 if (ptr != NULL) {
1251 assert(malloc_initialized || IS_INITIALIZER);
1252 malloc_thread_init();
1253
1254 if ((config_prof && opt_prof) || config_stats ||
1255 (config_valgrind && in_valgrind))
1256 old_usize = isalloc(ptr, config_prof);
1257 if (config_valgrind && in_valgrind)
1258 old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
1259
1260 if (config_prof && opt_prof) {
1261 prof_thr_cnt_t *cnt;
1262
1263 usize = s2u(size);
1264 PROF_ALLOC_PREP(usize, cnt);
1265 ret = irealloc_prof(ptr, old_usize, usize, cnt);
1266 } else {
1267 if (config_stats || (config_valgrind && in_valgrind))
1268 usize = s2u(size);
1269 ret = iralloc(ptr, size, 0, 0, false);
1270 }
1271 } else {
1272 /* realloc(NULL, size) is equivalent to malloc(size). */
1273 ret = imalloc_body(size, &usize);
1274 }
1275
1276 if (ret == NULL) {
1277 if (config_xmalloc && opt_xmalloc) {
1278 malloc_write("<jemalloc>: Error in realloc(): "
1279 "out of memory\n");
1280 abort();
1281 }
1282 set_errno(ENOMEM);
1283 }
1284 if (config_stats && ret != NULL) {
1285 thread_allocated_t *ta;
1286 assert(usize == isalloc(ret, config_prof));
1287 ta = thread_allocated_tsd_get();
1288 ta->allocated += usize;
1289 ta->deallocated += old_usize;
1290 }
1291 UTRACE(ptr, size, ret);
1292 JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
1293 old_rzsize, true, false);
1294 return (ret);
1295 }
1296
1297 void
1298 je_free(void *ptr)
1299 {
1300
1301 UTRACE(ptr, 0, 0);
1302 if (ptr != NULL)
1303 ifree(ptr);
1304 }
1305
1306 /*
1307 * End malloc(3)-compatible functions.
1308 */
1309 /******************************************************************************/
1310 /*
1311 * Begin non-standard override functions.
1312 */
1313
1314 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1315 void *
1316 je_memalign(size_t alignment, size_t size)
1317 {
1318 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1319 imemalign(&ret, alignment, size, 1);
1320 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1321 return (ret);
1322 }
1323 #endif
1324
1325 #ifdef JEMALLOC_OVERRIDE_VALLOC
1326 void *
1327 je_valloc(size_t size)
1328 {
1329 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1330 imemalign(&ret, PAGE, size, 1);
1331 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1332 return (ret);
1333 }
1334 #endif
1335
1336 /*
1337 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1338 * #define je_malloc malloc
1339 */
1340 #define malloc_is_malloc 1
1341 #define is_malloc_(a) malloc_is_ ## a
1342 #define is_malloc(a) is_malloc_(a)
1343
1344 #if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1345 /*
1346 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1347 * to inconsistently reference libc's malloc(3)-compatible functions
1348 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1349 *
1350 * These definitions interpose hooks in glibc. The functions are actually
1351 * passed an extra argument for the caller return address, which will be
1352 * ignored.
1353 */
1354 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
1355 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
1356 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
1357 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
1358 je_memalign;
1359 #endif
1360
1361 /*
1362 * End non-standard override functions.
1363 */
1364 /******************************************************************************/
1365 /*
1366 * Begin non-standard functions.
1367 */
1368
1369 JEMALLOC_ALWAYS_INLINE_C void *
1370 imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
1371 arena_t *arena)
1372 {
1373
1374 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1375 alignment)));
1376
1377 if (alignment != 0)
1378 return (ipalloct(usize, alignment, zero, try_tcache, arena));
1379 else if (zero)
1380 return (icalloct(usize, try_tcache, arena));
1381 else
1382 return (imalloct(usize, try_tcache, arena));
1383 }
1384
1385 static void *
1386 imallocx_prof_sample(size_t usize, size_t alignment, bool zero, bool try_tcache,
1387 arena_t *arena, prof_thr_cnt_t *cnt)
1388 {
1389 void *p;
1390
1391 if (cnt == NULL)
1392 return (NULL);
1393 if (usize <= SMALL_MAXCLASS) {
1394 size_t usize_promoted = (alignment == 0) ?
1395 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment);
1396 assert(usize_promoted != 0);
1397 p = imallocx(usize_promoted, alignment, zero, try_tcache,
1398 arena);
1399 if (p == NULL)
1400 return (NULL);
1401 arena_prof_promoted(p, usize);
1402 } else
1403 p = imallocx(usize, alignment, zero, try_tcache, arena);
1404
1405 return (p);
1406 }
1407
1408 JEMALLOC_ALWAYS_INLINE_C void *
1409 imallocx_prof(size_t usize, size_t alignment, bool zero, bool try_tcache,
1410 arena_t *arena, prof_thr_cnt_t *cnt)
1411 {
1412 void *p;
1413
1414 if ((uintptr_t)cnt != (uintptr_t)1U) {
1415 p = imallocx_prof_sample(usize, alignment, zero, try_tcache,
1416 arena, cnt);
1417 } else
1418 p = imallocx(usize, alignment, zero, try_tcache, arena);
1419 if (p == NULL)
1420 return (NULL);
1421 prof_malloc(p, usize, cnt);
1422
1423 return (p);
1424 }
1425
1426 void *
1427 je_mallocx(size_t size, int flags)
1428 {
1429 void *p;
1430 size_t usize;
1431 size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
1432 & (SIZE_T_MAX-1));
1433 bool zero = flags & MALLOCX_ZERO;
1434 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1435 arena_t *arena;
1436 bool try_tcache;
1437
1438 assert(size != 0);
1439
1440 if (malloc_init())
1441 goto label_oom;
1442
1443 if (arena_ind != UINT_MAX) {
1444 arena = arenas[arena_ind];
1445 try_tcache = false;
1446 } else {
1447 arena = NULL;
1448 try_tcache = true;
1449 }
1450
1451 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1452 assert(usize != 0);
1453
1454 if (config_prof && opt_prof) {
1455 prof_thr_cnt_t *cnt;
1456
1457 PROF_ALLOC_PREP(usize, cnt);
1458 p = imallocx_prof(usize, alignment, zero, try_tcache, arena,
1459 cnt);
1460 } else
1461 p = imallocx(usize, alignment, zero, try_tcache, arena);
1462 if (p == NULL)
1463 goto label_oom;
1464
1465 if (config_stats) {
1466 assert(usize == isalloc(p, config_prof));
1467 thread_allocated_tsd_get()->allocated += usize;
1468 }
1469 UTRACE(0, size, p);
1470 JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
1471 return (p);
1472 label_oom:
1473 if (config_xmalloc && opt_xmalloc) {
1474 malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
1475 abort();
1476 }
1477 UTRACE(0, size, 0);
1478 return (NULL);
1479 }
1480
1481 static void *
1482 irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize,
1483 bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena,
1484 prof_thr_cnt_t *cnt)
1485 {
1486 void *p;
1487
1488 if (cnt == NULL)
1489 return (NULL);
1490 if (usize <= SMALL_MAXCLASS) {
1491 p = iralloct(oldptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1492 size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, zero,
1493 try_tcache_alloc, try_tcache_dalloc, arena);
1494 if (p == NULL)
1495 return (NULL);
1496 arena_prof_promoted(p, usize);
1497 } else {
1498 p = iralloct(oldptr, size, 0, alignment, zero,
1499 try_tcache_alloc, try_tcache_dalloc, arena);
1500 }
1501
1502 return (p);
1503 }
1504
1505 JEMALLOC_ALWAYS_INLINE_C void *
1506 irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment,
1507 size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
1508 arena_t *arena, prof_thr_cnt_t *cnt)
1509 {
1510 void *p;
1511 prof_ctx_t *old_ctx;
1512
1513 old_ctx = prof_ctx_get(oldptr);
1514 if ((uintptr_t)cnt != (uintptr_t)1U)
1515 p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero,
1516 try_tcache_alloc, try_tcache_dalloc, arena, cnt);
1517 else {
1518 p = iralloct(oldptr, size, 0, alignment, zero,
1519 try_tcache_alloc, try_tcache_dalloc, arena);
1520 }
1521 if (p == NULL)
1522 return (NULL);
1523
1524 if (p == oldptr && alignment != 0) {
1525 /*
1526 * The allocation did not move, so it is possible that the size
1527 * class is smaller than would guarantee the requested
1528 * alignment, and that the alignment constraint was
1529 * serendipitously satisfied. Additionally, old_usize may not
1530 * be the same as the current usize because of in-place large
1531 * reallocation. Therefore, query the actual value of usize.
1532 */
1533 *usize = isalloc(p, config_prof);
1534 }
1535 prof_realloc(p, *usize, cnt, old_usize, old_ctx);
1536
1537 return (p);
1538 }
1539
1540 void *
1541 je_rallocx(void *ptr, size_t size, int flags)
1542 {
1543 void *p;
1544 size_t usize, old_usize;
1545 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1546 size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
1547 & (SIZE_T_MAX-1));
1548 bool zero = flags & MALLOCX_ZERO;
1549 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1550 bool try_tcache_alloc, try_tcache_dalloc;
1551 arena_t *arena;
1552
1553 assert(ptr != NULL);
1554 assert(size != 0);
1555 assert(malloc_initialized || IS_INITIALIZER);
1556 malloc_thread_init();
1557
1558 if (arena_ind != UINT_MAX) {
1559 arena_chunk_t *chunk;
1560 try_tcache_alloc = false;
1561 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1562 try_tcache_dalloc = (chunk == ptr || chunk->arena !=
1563 arenas[arena_ind]);
1564 arena = arenas[arena_ind];
1565 } else {
1566 try_tcache_alloc = true;
1567 try_tcache_dalloc = true;
1568 arena = NULL;
1569 }
1570
1571 if ((config_prof && opt_prof) || config_stats ||
1572 (config_valgrind && in_valgrind))
1573 old_usize = isalloc(ptr, config_prof);
1574 if (config_valgrind && in_valgrind)
1575 old_rzsize = u2rz(old_usize);
1576
1577 if (config_prof && opt_prof) {
1578 prof_thr_cnt_t *cnt;
1579
1580 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1581 assert(usize != 0);
1582 PROF_ALLOC_PREP(usize, cnt);
1583 p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero,
1584 try_tcache_alloc, try_tcache_dalloc, arena, cnt);
1585 if (p == NULL)
1586 goto label_oom;
1587 } else {
1588 p = iralloct(ptr, size, 0, alignment, zero, try_tcache_alloc,
1589 try_tcache_dalloc, arena);
1590 if (p == NULL)
1591 goto label_oom;
1592 if (config_stats || (config_valgrind && in_valgrind))
1593 usize = isalloc(p, config_prof);
1594 }
1595
1596 if (config_stats) {
1597 thread_allocated_t *ta;
1598 ta = thread_allocated_tsd_get();
1599 ta->allocated += usize;
1600 ta->deallocated += old_usize;
1601 }
1602 UTRACE(ptr, size, p);
1603 JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
1604 old_rzsize, false, zero);
1605 return (p);
1606 label_oom:
1607 if (config_xmalloc && opt_xmalloc) {
1608 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
1609 abort();
1610 }
1611 UTRACE(ptr, size, 0);
1612 return (NULL);
1613 }
1614
1615 JEMALLOC_ALWAYS_INLINE_C size_t
1616 ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
1617 size_t alignment, bool zero, arena_t *arena)
1618 {
1619 size_t usize;
1620
1621 if (ixalloc(ptr, size, extra, alignment, zero))
1622 return (old_usize);
1623 usize = isalloc(ptr, config_prof);
1624
1625 return (usize);
1626 }
1627
1628 static size_t
1629 ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
1630 size_t alignment, size_t max_usize, bool zero, arena_t *arena,
1631 prof_thr_cnt_t *cnt)
1632 {
1633 size_t usize;
1634
1635 if (cnt == NULL)
1636 return (old_usize);
1637 /* Use minimum usize to determine whether promotion may happen. */
1638 if (((alignment == 0) ? s2u(size) : sa2u(size, alignment)) <=
1639 SMALL_MAXCLASS) {
1640 if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1641 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
1642 alignment, zero))
1643 return (old_usize);
1644 usize = isalloc(ptr, config_prof);
1645 if (max_usize < PAGE)
1646 arena_prof_promoted(ptr, usize);
1647 } else {
1648 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
1649 zero, arena);
1650 }
1651
1652 return (usize);
1653 }
1654
1655 JEMALLOC_ALWAYS_INLINE_C size_t
1656 ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra,
1657 size_t alignment, size_t max_usize, bool zero, arena_t *arena,
1658 prof_thr_cnt_t *cnt)
1659 {
1660 size_t usize;
1661 prof_ctx_t *old_ctx;
1662
1663 old_ctx = prof_ctx_get(ptr);
1664 if ((uintptr_t)cnt != (uintptr_t)1U) {
1665 usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
1666 alignment, zero, max_usize, arena, cnt);
1667 } else {
1668 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
1669 zero, arena);
1670 }
1671 if (usize == old_usize)
1672 return (usize);
1673 prof_realloc(ptr, usize, cnt, old_usize, old_ctx);
1674
1675 return (usize);
1676 }
1677
1678 size_t
1679 je_xallocx(void *ptr, size_t size, size_t extra, int flags)
1680 {
1681 size_t usize, old_usize;
1682 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1683 size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
1684 & (SIZE_T_MAX-1));
1685 bool zero = flags & MALLOCX_ZERO;
1686 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1687 arena_t *arena;
1688
1689 assert(ptr != NULL);
1690 assert(size != 0);
1691 assert(SIZE_T_MAX - size >= extra);
1692 assert(malloc_initialized || IS_INITIALIZER);
1693 malloc_thread_init();
1694
1695 if (arena_ind != UINT_MAX)
1696 arena = arenas[arena_ind];
1697 else
1698 arena = NULL;
1699
1700 old_usize = isalloc(ptr, config_prof);
1701 if (config_valgrind && in_valgrind)
1702 old_rzsize = u2rz(old_usize);
1703
1704 if (config_prof && opt_prof) {
1705 prof_thr_cnt_t *cnt;
1706 /*
1707 * usize isn't knowable before ixalloc() returns when extra is
1708 * non-zero. Therefore, compute its maximum possible value and
1709 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1710 * backtrace. prof_realloc() will use the actual usize to
1711 * decide whether to sample.
1712 */
1713 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1714 sa2u(size+extra, alignment);
1715 PROF_ALLOC_PREP(max_usize, cnt);
1716 usize = ixallocx_prof(ptr, old_usize, size, extra, alignment,
1717 max_usize, zero, arena, cnt);
1718 } else {
1719 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
1720 zero, arena);
1721 }
1722 if (usize == old_usize)
1723 goto label_not_resized;
1724
1725 if (config_stats) {
1726 thread_allocated_t *ta;
1727 ta = thread_allocated_tsd_get();
1728 ta->allocated += usize;
1729 ta->deallocated += old_usize;
1730 }
1731 JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
1732 old_rzsize, false, zero);
1733 label_not_resized:
1734 UTRACE(ptr, size, ptr);
1735 return (usize);
1736 }
1737
1738 size_t
1739 je_sallocx(const void *ptr, int flags)
1740 {
1741 size_t usize;
1742
1743 assert(malloc_initialized || IS_INITIALIZER);
1744 malloc_thread_init();
1745
1746 if (config_ivsalloc)
1747 usize = ivsalloc(ptr, config_prof);
1748 else {
1749 assert(ptr != NULL);
1750 usize = isalloc(ptr, config_prof);
1751 }
1752
1753 return (usize);
1754 }
1755
1756 void
1757 je_dallocx(void *ptr, int flags)
1758 {
1759 size_t usize;
1760 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1761 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1762 bool try_tcache;
1763
1764 assert(ptr != NULL);
1765 assert(malloc_initialized || IS_INITIALIZER);
1766
1767 if (arena_ind != UINT_MAX) {
1768 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1769 try_tcache = (chunk == ptr || chunk->arena !=
1770 arenas[arena_ind]);
1771 } else
1772 try_tcache = true;
1773
1774 UTRACE(ptr, 0, 0);
1775 if (config_stats || config_valgrind)
1776 usize = isalloc(ptr, config_prof);
1777 if (config_prof && opt_prof) {
1778 if (config_stats == false && config_valgrind == false)
1779 usize = isalloc(ptr, config_prof);
1780 prof_free(ptr, usize);
1781 }
1782 if (config_stats)
1783 thread_allocated_tsd_get()->deallocated += usize;
1784 if (config_valgrind && in_valgrind)
1785 rzsize = p2rz(ptr);
1786 iqalloct(ptr, try_tcache);
1787 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1788 }
1789
1790 size_t
1791 je_nallocx(size_t size, int flags)
1792 {
1793 size_t usize;
1794 size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
1795 & (SIZE_T_MAX-1));
1796
1797 assert(size != 0);
1798
1799 if (malloc_init())
1800 return (0);
1801
1802 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1803 assert(usize != 0);
1804 return (usize);
1805 }
1806
1807 int
1808 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
1809 size_t newlen)
1810 {
1811
1812 if (malloc_init())
1813 return (EAGAIN);
1814
1815 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1816 }
1817
1818 int
1819 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
1820 {
1821
1822 if (malloc_init())
1823 return (EAGAIN);
1824
1825 return (ctl_nametomib(name, mibp, miblenp));
1826 }
1827
1828 int
1829 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1830 void *newp, size_t newlen)
1831 {
1832
1833 if (malloc_init())
1834 return (EAGAIN);
1835
1836 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1837 }
1838
1839 void
1840 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1841 const char *opts)
1842 {
1843
1844 stats_print(write_cb, cbopaque, opts);
1845 }
1846
1847 size_t
1848 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
1849 {
1850 size_t ret;
1851
1852 assert(malloc_initialized || IS_INITIALIZER);
1853 malloc_thread_init();
1854
1855 if (config_ivsalloc)
1856 ret = ivsalloc(ptr, config_prof);
1857 else
1858 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
1859
1860 return (ret);
1861 }
1862
1863 /*
1864 * End non-standard functions.
1865 */
1866 /******************************************************************************/
1867 /*
1868 * The following functions are used by threading libraries for protection of
1869 * malloc during fork().
1870 */
1871
1872 /*
1873 * If an application creates a thread before doing any allocation in the main
1874 * thread, then calls fork(2) in the main thread followed by memory allocation
1875 * in the child process, a race can occur that results in deadlock within the
1876 * child: the main thread may have forked while the created thread had
1877 * partially initialized the allocator. Ordinarily jemalloc prevents
1878 * fork/malloc races via the following functions it registers during
1879 * initialization using pthread_atfork(), but of course that does no good if
1880 * the allocator isn't fully initialized at fork time. The following library
1881 * constructor is a partial solution to this problem. It may still possible to
1882 * trigger the deadlock described above, but doing so would involve forking via
1883 * a library constructor that runs before jemalloc's runs.
1884 */
1885 JEMALLOC_ATTR(constructor)
1886 static void
1887 jemalloc_constructor(void)
1888 {
1889
1890 malloc_init();
1891 }
1892
1893 #ifndef JEMALLOC_MUTEX_INIT_CB
1894 void
1895 jemalloc_prefork(void)
1896 #else
1897 JEMALLOC_EXPORT void
1898 _malloc_prefork(void)
1899 #endif
1900 {
1901 unsigned i;
1902
1903 #ifdef JEMALLOC_MUTEX_INIT_CB
1904 if (malloc_initialized == false)
1905 return;
1906 #endif
1907 assert(malloc_initialized);
1908
1909 /* Acquire all mutexes in a safe order. */
1910 ctl_prefork();
1911 prof_prefork();
1912 malloc_mutex_prefork(&arenas_lock);
1913 for (i = 0; i < narenas_total; i++) {
1914 if (arenas[i] != NULL)
1915 arena_prefork(arenas[i]);
1916 }
1917 chunk_prefork();
1918 base_prefork();
1919 huge_prefork();
1920 }
1921
1922 #ifndef JEMALLOC_MUTEX_INIT_CB
1923 void
1924 jemalloc_postfork_parent(void)
1925 #else
1926 JEMALLOC_EXPORT void
1927 _malloc_postfork(void)
1928 #endif
1929 {
1930 unsigned i;
1931
1932 #ifdef JEMALLOC_MUTEX_INIT_CB
1933 if (malloc_initialized == false)
1934 return;
1935 #endif
1936 assert(malloc_initialized);
1937
1938 /* Release all mutexes, now that fork() has completed. */
1939 huge_postfork_parent();
1940 base_postfork_parent();
1941 chunk_postfork_parent();
1942 for (i = 0; i < narenas_total; i++) {
1943 if (arenas[i] != NULL)
1944 arena_postfork_parent(arenas[i]);
1945 }
1946 malloc_mutex_postfork_parent(&arenas_lock);
1947 prof_postfork_parent();
1948 ctl_postfork_parent();
1949 }
1950
1951 void
1952 jemalloc_postfork_child(void)
1953 {
1954 unsigned i;
1955
1956 assert(malloc_initialized);
1957
1958 /* Release all mutexes, now that fork() has completed. */
1959 huge_postfork_child();
1960 base_postfork_child();
1961 chunk_postfork_child();
1962 for (i = 0; i < narenas_total; i++) {
1963 if (arenas[i] != NULL)
1964 arena_postfork_child(arenas[i]);
1965 }
1966 malloc_mutex_postfork_child(&arenas_lock);
1967 prof_postfork_child();
1968 ctl_postfork_child();
1969 }
1970
1971 /******************************************************************************/
1972 /*
1973 * The following functions are used for TLS allocation/deallocation in static
1974 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
1975 * is that these avoid accessing TLS variables.
1976 */
1977
1978 static void *
1979 a0alloc(size_t size, bool zero)
1980 {
1981
1982 if (malloc_init())
1983 return (NULL);
1984
1985 if (size == 0)
1986 size = 1;
1987
1988 if (size <= arena_maxclass)
1989 return (arena_malloc(arenas[0], size, zero, false));
1990 else
1991 return (huge_malloc(NULL, size, zero));
1992 }
1993
1994 void *
1995 a0malloc(size_t size)
1996 {
1997
1998 return (a0alloc(size, false));
1999 }
2000
2001 void *
2002 a0calloc(size_t num, size_t size)
2003 {
2004
2005 return (a0alloc(num * size, true));
2006 }
2007
2008 void
2009 a0free(void *ptr)
2010 {
2011 arena_chunk_t *chunk;
2012
2013 if (ptr == NULL)
2014 return;
2015
2016 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2017 if (chunk != ptr)
2018 arena_dalloc(chunk, ptr, false);
2019 else
2020 huge_dalloc(ptr);
2021 }
2022
2023 /******************************************************************************/
2024