1 #ifndef JEMALLOC_INTERNAL_H
2 #define JEMALLOC_INTERNAL_H
3
4 #include "jemalloc_internal_defs.h"
5 #include "jemalloc/internal/jemalloc_internal_decls.h"
6
7 #ifdef JEMALLOC_UTRACE
8 #include <sys/ktrace.h>
9 #endif
10
11 #define JEMALLOC_NO_DEMANGLE
12 #ifdef JEMALLOC_JET
13 # define JEMALLOC_N(n) jet_##n
14 # include "jemalloc/internal/public_namespace.h"
15 # define JEMALLOC_NO_RENAME
16 # include "../jemalloc.h"
17 # undef JEMALLOC_NO_RENAME
18 #else
19 # define JEMALLOC_N(n) je_##n
20 # include "../jemalloc.h"
21 #endif
22 #include "jemalloc/internal/private_namespace.h"
23
24 static const bool config_debug =
25 #ifdef JEMALLOC_DEBUG
26 true
27 #else
28 false
29 #endif
30 ;
31 static const bool have_dss =
32 #ifdef JEMALLOC_DSS
33 true
34 #else
35 false
36 #endif
37 ;
38 static const bool config_fill =
39 #ifdef JEMALLOC_FILL
40 true
41 #else
42 false
43 #endif
44 ;
45 static const bool config_lazy_lock =
46 #ifdef JEMALLOC_LAZY_LOCK
47 true
48 #else
49 false
50 #endif
51 ;
52 static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
53 static const bool config_prof =
54 #ifdef JEMALLOC_PROF
55 true
56 #else
57 false
58 #endif
59 ;
60 static const bool config_prof_libgcc =
61 #ifdef JEMALLOC_PROF_LIBGCC
62 true
63 #else
64 false
65 #endif
66 ;
67 static const bool config_prof_libunwind =
68 #ifdef JEMALLOC_PROF_LIBUNWIND
69 true
70 #else
71 false
72 #endif
73 ;
74 static const bool maps_coalesce =
75 #ifdef JEMALLOC_MAPS_COALESCE
76 true
77 #else
78 false
79 #endif
80 ;
81 static const bool config_munmap =
82 #ifdef JEMALLOC_MUNMAP
83 true
84 #else
85 false
86 #endif
87 ;
88 static const bool config_stats =
89 #ifdef JEMALLOC_STATS
90 true
91 #else
92 false
93 #endif
94 ;
95 static const bool config_tcache =
96 #ifdef JEMALLOC_TCACHE
97 true
98 #else
99 false
100 #endif
101 ;
102 static const bool config_tls =
103 #ifdef JEMALLOC_TLS
104 true
105 #else
106 false
107 #endif
108 ;
109 static const bool config_utrace =
110 #ifdef JEMALLOC_UTRACE
111 true
112 #else
113 false
114 #endif
115 ;
116 static const bool config_valgrind =
117 #ifdef JEMALLOC_VALGRIND
118 true
119 #else
120 false
121 #endif
122 ;
123 static const bool config_xmalloc =
124 #ifdef JEMALLOC_XMALLOC
125 true
126 #else
127 false
128 #endif
129 ;
130 static const bool config_ivsalloc =
131 #ifdef JEMALLOC_IVSALLOC
132 true
133 #else
134 false
135 #endif
136 ;
137 static const bool config_cache_oblivious =
138 #ifdef JEMALLOC_CACHE_OBLIVIOUS
139 true
140 #else
141 false
142 #endif
143 ;
144
145 #ifdef JEMALLOC_C11ATOMICS
146 #include <stdatomic.h>
147 #endif
148
149 #ifdef JEMALLOC_ATOMIC9
150 #include <machine/atomic.h>
151 #endif
152
153 #if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
154 #include <libkern/OSAtomic.h>
155 #endif
156
157 #ifdef JEMALLOC_ZONE
158 #include <mach/mach_error.h>
159 #include <mach/mach_init.h>
160 #include <mach/vm_map.h>
161 #include <malloc/malloc.h>
162 #endif
163
164 #include "jemalloc/internal/ph.h"
165 #ifndef __PGI
166 #define RB_COMPACT
167 #endif
168 #include "jemalloc/internal/rb.h"
169 #include "jemalloc/internal/qr.h"
170 #include "jemalloc/internal/ql.h"
171
172 /*
173 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
174 * but there are circular dependencies that cannot be broken without
175 * substantial performance degradation. In order to reduce the effect on
176 * visual code flow, read the header files in multiple passes, with one of the
177 * following cpp variables defined during each pass:
178 *
179 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
180 * types.
181 * JEMALLOC_H_STRUCTS : Data structures.
182 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
183 * JEMALLOC_H_INLINES : Inline functions.
184 */
185 /******************************************************************************/
186 #define JEMALLOC_H_TYPES
187
188 #include "jemalloc/internal/jemalloc_internal_macros.h"
189
190 /* Page size index type. */
191 typedef unsigned pszind_t;
192
193 /* Size class index type. */
194 typedef unsigned szind_t;
195
196 /*
197 * Flags bits:
198 *
199 * a: arena
200 * t: tcache
201 * 0: unused
202 * z: zero
203 * n: alignment
204 *
205 * aaaaaaaa aaaatttt tttttttt 0znnnnnn
206 */
207 #define MALLOCX_ARENA_MASK ((int)~0xfffff)
208 #define MALLOCX_ARENA_MAX 0xffe
209 #define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU)
210 #define MALLOCX_TCACHE_MAX 0xffd
211 #define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
212 /* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
213 #define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
214 (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
215 #define MALLOCX_ALIGN_GET(flags) \
216 (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
217 #define MALLOCX_ZERO_GET(flags) \
218 ((bool)(flags & MALLOCX_ZERO))
219
220 #define MALLOCX_TCACHE_GET(flags) \
221 (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2)
222 #define MALLOCX_ARENA_GET(flags) \
223 (((unsigned)(((unsigned)flags) >> 20)) - 1)
224
225 /* Smallest size class to support. */
226 #define TINY_MIN (1U << LG_TINY_MIN)
227
228 /*
229 * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
230 * classes).
231 */
232 #ifndef LG_QUANTUM
233 # if (defined(__i386__) || defined(_M_IX86))
234 # define LG_QUANTUM 4
235 # endif
236 # ifdef __ia64__
237 # define LG_QUANTUM 4
238 # endif
239 # ifdef __alpha__
240 # define LG_QUANTUM 4
241 # endif
242 # if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
243 # define LG_QUANTUM 4
244 # endif
245 # if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
246 # define LG_QUANTUM 4
247 # endif
248 # ifdef __arm__
249 # define LG_QUANTUM 3
250 # endif
251 # ifdef __aarch64__
252 # define LG_QUANTUM 4
253 # endif
254 # ifdef __hppa__
255 # define LG_QUANTUM 4
256 # endif
257 # ifdef __mips__
258 # define LG_QUANTUM 3
259 # endif
260 # ifdef __or1k__
261 # define LG_QUANTUM 3
262 # endif
263 # ifdef __powerpc__
264 # define LG_QUANTUM 4
265 # endif
266 # ifdef __riscv__
267 # define LG_QUANTUM 4
268 # endif
269 # ifdef __s390__
270 # define LG_QUANTUM 4
271 # endif
272 # ifdef __SH4__
273 # define LG_QUANTUM 4
274 # endif
275 # ifdef __tile__
276 # define LG_QUANTUM 4
277 # endif
278 # ifdef __le32__
279 # define LG_QUANTUM 4
280 # endif
281 # ifndef LG_QUANTUM
282 # error "Unknown minimum alignment for architecture; specify via "
283 "--with-lg-quantum"
284 # endif
285 #endif
286
287 #define QUANTUM ((size_t)(1U << LG_QUANTUM))
288 #define QUANTUM_MASK (QUANTUM - 1)
289
290 /* Return the smallest quantum multiple that is >= a. */
291 #define QUANTUM_CEILING(a) \
292 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
293
294 #define LONG ((size_t)(1U << LG_SIZEOF_LONG))
295 #define LONG_MASK (LONG - 1)
296
297 /* Return the smallest long multiple that is >= a. */
298 #define LONG_CEILING(a) \
299 (((a) + LONG_MASK) & ~LONG_MASK)
300
301 #define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
302 #define PTR_MASK (SIZEOF_PTR - 1)
303
304 /* Return the smallest (void *) multiple that is >= a. */
305 #define PTR_CEILING(a) \
306 (((a) + PTR_MASK) & ~PTR_MASK)
307
308 /*
309 * Maximum size of L1 cache line. This is used to avoid cache line aliasing.
310 * In addition, this controls the spacing of cacheline-spaced size classes.
311 *
312 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
313 * only handle raw constants.
314 */
315 #define LG_CACHELINE 6
316 #define CACHELINE 64
317 #define CACHELINE_MASK (CACHELINE - 1)
318
319 /* Return the smallest cacheline multiple that is >= s. */
320 #define CACHELINE_CEILING(s) \
321 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
322
323 /* Page size. LG_PAGE is determined by the configure script. */
324 #ifdef PAGE_MASK
325 # undef PAGE_MASK
326 #endif
327 #define PAGE ((size_t)(1U << LG_PAGE))
328 #define PAGE_MASK ((size_t)(PAGE - 1))
329
330 /* Return the page base address for the page containing address a. */
331 #define PAGE_ADDR2BASE(a) \
332 ((void *)((uintptr_t)(a) & ~PAGE_MASK))
333
334 /* Return the smallest pagesize multiple that is >= s. */
335 #define PAGE_CEILING(s) \
336 (((s) + PAGE_MASK) & ~PAGE_MASK)
337
338 /* Return the nearest aligned address at or below a. */
339 #define ALIGNMENT_ADDR2BASE(a, alignment) \
340 ((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
341
342 /* Return the offset between a and the nearest aligned address at or below a. */
343 #define ALIGNMENT_ADDR2OFFSET(a, alignment) \
344 ((size_t)((uintptr_t)(a) & (alignment - 1)))
345
346 /* Return the smallest alignment multiple that is >= s. */
347 #define ALIGNMENT_CEILING(s, alignment) \
348 (((s) + (alignment - 1)) & ((~(alignment)) + 1))
349
350 /* Declare a variable-length array. */
351 #if __STDC_VERSION__ < 199901L
352 # ifdef _MSC_VER
353 # include <malloc.h>
354 # define alloca _alloca
355 # else
356 # ifdef JEMALLOC_HAS_ALLOCA_H
357 # include <alloca.h>
358 # else
359 # include <stdlib.h>
360 # endif
361 # endif
362 # define VARIABLE_ARRAY(type, name, count) \
363 type *name = alloca(sizeof(type) * (count))
364 #else
365 # define VARIABLE_ARRAY(type, name, count) type name[(count)]
366 #endif
367
368 #include "jemalloc/internal/nstime.h"
369 #include "jemalloc/internal/valgrind.h"
370 #include "jemalloc/internal/util.h"
371 #include "jemalloc/internal/atomic.h"
372 #include "jemalloc/internal/spin.h"
373 #include "jemalloc/internal/prng.h"
374 #include "jemalloc/internal/ticker.h"
375 #include "jemalloc/internal/ckh.h"
376 #include "jemalloc/internal/size_classes.h"
377 #include "jemalloc/internal/smoothstep.h"
378 #include "jemalloc/internal/stats.h"
379 #include "jemalloc/internal/ctl.h"
380 #include "jemalloc/internal/witness.h"
381 #include "jemalloc/internal/mutex.h"
382 #include "jemalloc/internal/tsd.h"
383 #include "jemalloc/internal/mb.h"
384 #include "jemalloc/internal/extent.h"
385 #include "jemalloc/internal/arena.h"
386 #include "jemalloc/internal/bitmap.h"
387 #include "jemalloc/internal/base.h"
388 #include "jemalloc/internal/rtree.h"
389 #include "jemalloc/internal/pages.h"
390 #include "jemalloc/internal/chunk.h"
391 #include "jemalloc/internal/huge.h"
392 #include "jemalloc/internal/tcache.h"
393 #include "jemalloc/internal/hash.h"
394 #include "jemalloc/internal/quarantine.h"
395 #include "jemalloc/internal/prof.h"
396
397 #undef JEMALLOC_H_TYPES
398 /******************************************************************************/
399 #define JEMALLOC_H_STRUCTS
400
401 #include "jemalloc/internal/nstime.h"
402 #include "jemalloc/internal/valgrind.h"
403 #include "jemalloc/internal/util.h"
404 #include "jemalloc/internal/atomic.h"
405 #include "jemalloc/internal/spin.h"
406 #include "jemalloc/internal/prng.h"
407 #include "jemalloc/internal/ticker.h"
408 #include "jemalloc/internal/ckh.h"
409 #include "jemalloc/internal/size_classes.h"
410 #include "jemalloc/internal/smoothstep.h"
411 #include "jemalloc/internal/stats.h"
412 #include "jemalloc/internal/ctl.h"
413 #include "jemalloc/internal/witness.h"
414 #include "jemalloc/internal/mutex.h"
415 #include "jemalloc/internal/mb.h"
416 #include "jemalloc/internal/bitmap.h"
417 #define JEMALLOC_ARENA_STRUCTS_A
418 #include "jemalloc/internal/arena.h"
419 #undef JEMALLOC_ARENA_STRUCTS_A
420 #include "jemalloc/internal/extent.h"
421 #define JEMALLOC_ARENA_STRUCTS_B
422 #include "jemalloc/internal/arena.h"
423 #undef JEMALLOC_ARENA_STRUCTS_B
424 #include "jemalloc/internal/base.h"
425 #include "jemalloc/internal/rtree.h"
426 #include "jemalloc/internal/pages.h"
427 #include "jemalloc/internal/chunk.h"
428 #include "jemalloc/internal/huge.h"
429 #include "jemalloc/internal/tcache.h"
430 #include "jemalloc/internal/hash.h"
431 #include "jemalloc/internal/quarantine.h"
432 #include "jemalloc/internal/prof.h"
433
434 #include "jemalloc/internal/tsd.h"
435
436 #undef JEMALLOC_H_STRUCTS
437 /******************************************************************************/
438 #define JEMALLOC_H_EXTERNS
439
440 extern bool opt_abort;
441 extern const char *opt_junk;
442 extern bool opt_junk_alloc;
443 extern bool opt_junk_free;
444 extern size_t opt_quarantine;
445 extern bool opt_redzone;
446 extern bool opt_utrace;
447 extern bool opt_xmalloc;
448 extern bool opt_zero;
449 extern unsigned opt_narenas;
450
451 extern bool in_valgrind;
452
453 /* Number of CPUs. */
454 extern unsigned ncpus;
455
456 /* Number of arenas used for automatic multiplexing of threads and arenas. */
457 extern unsigned narenas_auto;
458
459 /*
460 * Arenas that are used to service external requests. Not all elements of the
461 * arenas array are necessarily used; arenas are created lazily as needed.
462 */
463 extern arena_t **arenas;
464
465 /*
466 * pind2sz_tab encodes the same information as could be computed by
467 * pind2sz_compute().
468 */
469 extern size_t const pind2sz_tab[NPSIZES];
470 /*
471 * index2size_tab encodes the same information as could be computed (at
472 * unacceptable cost in some code paths) by index2size_compute().
473 */
474 extern size_t const index2size_tab[NSIZES];
475 /*
476 * size2index_tab is a compact lookup table that rounds request sizes up to
477 * size classes. In order to reduce cache footprint, the table is compressed,
478 * and all accesses are via size2index().
479 */
480 extern uint8_t const size2index_tab[];
481
482 arena_t *a0get(void);
483 void *a0malloc(size_t size);
484 void a0dalloc(void *ptr);
485 void *bootstrap_malloc(size_t size);
486 void *bootstrap_calloc(size_t num, size_t size);
487 void bootstrap_free(void *ptr);
488 unsigned narenas_total_get(void);
489 arena_t *arena_init(tsdn_t *tsdn, unsigned ind);
490 arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
491 arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
492 void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
493 void thread_allocated_cleanup(tsd_t *tsd);
494 void thread_deallocated_cleanup(tsd_t *tsd);
495 void iarena_cleanup(tsd_t *tsd);
496 void arena_cleanup(tsd_t *tsd);
497 void arenas_tdata_cleanup(tsd_t *tsd);
498 void narenas_tdata_cleanup(tsd_t *tsd);
499 void arenas_tdata_bypass_cleanup(tsd_t *tsd);
500 void jemalloc_prefork(void);
501 void jemalloc_postfork_parent(void);
502 void jemalloc_postfork_child(void);
503
504 #include "jemalloc/internal/nstime.h"
505 #include "jemalloc/internal/valgrind.h"
506 #include "jemalloc/internal/util.h"
507 #include "jemalloc/internal/atomic.h"
508 #include "jemalloc/internal/spin.h"
509 #include "jemalloc/internal/prng.h"
510 #include "jemalloc/internal/ticker.h"
511 #include "jemalloc/internal/ckh.h"
512 #include "jemalloc/internal/size_classes.h"
513 #include "jemalloc/internal/smoothstep.h"
514 #include "jemalloc/internal/stats.h"
515 #include "jemalloc/internal/ctl.h"
516 #include "jemalloc/internal/witness.h"
517 #include "jemalloc/internal/mutex.h"
518 #include "jemalloc/internal/mb.h"
519 #include "jemalloc/internal/bitmap.h"
520 #include "jemalloc/internal/extent.h"
521 #include "jemalloc/internal/arena.h"
522 #include "jemalloc/internal/base.h"
523 #include "jemalloc/internal/rtree.h"
524 #include "jemalloc/internal/pages.h"
525 #include "jemalloc/internal/chunk.h"
526 #include "jemalloc/internal/huge.h"
527 #include "jemalloc/internal/tcache.h"
528 #include "jemalloc/internal/hash.h"
529 #include "jemalloc/internal/quarantine.h"
530 #include "jemalloc/internal/prof.h"
531 #include "jemalloc/internal/tsd.h"
532
533 #undef JEMALLOC_H_EXTERNS
534 /******************************************************************************/
535 #define JEMALLOC_H_INLINES
536
537 #include "jemalloc/internal/nstime.h"
538 #include "jemalloc/internal/valgrind.h"
539 #include "jemalloc/internal/util.h"
540 #include "jemalloc/internal/atomic.h"
541 #include "jemalloc/internal/spin.h"
542 #include "jemalloc/internal/prng.h"
543 #include "jemalloc/internal/ticker.h"
544 #include "jemalloc/internal/ckh.h"
545 #include "jemalloc/internal/size_classes.h"
546 #include "jemalloc/internal/smoothstep.h"
547 #include "jemalloc/internal/stats.h"
548 #include "jemalloc/internal/ctl.h"
549 #include "jemalloc/internal/tsd.h"
550 #include "jemalloc/internal/witness.h"
551 #include "jemalloc/internal/mutex.h"
552 #include "jemalloc/internal/mb.h"
553 #include "jemalloc/internal/extent.h"
554 #include "jemalloc/internal/base.h"
555 #include "jemalloc/internal/rtree.h"
556 #include "jemalloc/internal/pages.h"
557 #include "jemalloc/internal/chunk.h"
558 #include "jemalloc/internal/huge.h"
559
560 #ifndef JEMALLOC_ENABLE_INLINE
561 pszind_t psz2ind(size_t psz);
562 size_t pind2sz_compute(pszind_t pind);
563 size_t pind2sz_lookup(pszind_t pind);
564 size_t pind2sz(pszind_t pind);
565 size_t psz2u(size_t psz);
566 szind_t size2index_compute(size_t size);
567 szind_t size2index_lookup(size_t size);
568 szind_t size2index(size_t size);
569 size_t index2size_compute(szind_t index);
570 size_t index2size_lookup(szind_t index);
571 size_t index2size(szind_t index);
572 size_t s2u_compute(size_t size);
573 size_t s2u_lookup(size_t size);
574 size_t s2u(size_t size);
575 size_t sa2u(size_t size, size_t alignment);
576 arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal);
577 arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
578 arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena);
579 arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
580 bool refresh_if_missing);
581 arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing);
582 ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
583 #endif
584
585 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
586 JEMALLOC_INLINE pszind_t
psz2ind(size_t psz)587 psz2ind(size_t psz)
588 {
589
590 if (unlikely(psz > HUGE_MAXCLASS))
591 return (NPSIZES);
592 {
593 pszind_t x = lg_floor((psz<<1)-1);
594 pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
595 (LG_SIZE_CLASS_GROUP + LG_PAGE);
596 pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
597
598 pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
599 LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
600
601 size_t delta_inverse_mask = ZI(-1) << lg_delta;
602 pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
603 ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
604
605 pszind_t ind = grp + mod;
606 return (ind);
607 }
608 }
609
610 JEMALLOC_INLINE size_t
pind2sz_compute(pszind_t pind)611 pind2sz_compute(pszind_t pind)
612 {
613
614 {
615 size_t grp = pind >> LG_SIZE_CLASS_GROUP;
616 size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
617
618 size_t grp_size_mask = ~((!!grp)-1);
619 size_t grp_size = ((ZU(1) << (LG_PAGE +
620 (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
621
622 size_t shift = (grp == 0) ? 1 : grp;
623 size_t lg_delta = shift + (LG_PAGE-1);
624 size_t mod_size = (mod+1) << lg_delta;
625
626 size_t sz = grp_size + mod_size;
627 return (sz);
628 }
629 }
630
631 JEMALLOC_INLINE size_t
pind2sz_lookup(pszind_t pind)632 pind2sz_lookup(pszind_t pind)
633 {
634 size_t ret = (size_t)pind2sz_tab[pind];
635 assert(ret == pind2sz_compute(pind));
636 return (ret);
637 }
638
639 JEMALLOC_INLINE size_t
pind2sz(pszind_t pind)640 pind2sz(pszind_t pind)
641 {
642
643 assert(pind < NPSIZES);
644 return (pind2sz_lookup(pind));
645 }
646
647 JEMALLOC_INLINE size_t
psz2u(size_t psz)648 psz2u(size_t psz)
649 {
650
651 if (unlikely(psz > HUGE_MAXCLASS))
652 return (0);
653 {
654 size_t x = lg_floor((psz<<1)-1);
655 size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
656 LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
657 size_t delta = ZU(1) << lg_delta;
658 size_t delta_mask = delta - 1;
659 size_t usize = (psz + delta_mask) & ~delta_mask;
660 return (usize);
661 }
662 }
663
664 JEMALLOC_INLINE szind_t
size2index_compute(size_t size)665 size2index_compute(size_t size)
666 {
667
668 if (unlikely(size > HUGE_MAXCLASS))
669 return (NSIZES);
670 #if (NTBINS != 0)
671 if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
672 szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
673 szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
674 return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
675 }
676 #endif
677 {
678 szind_t x = lg_floor((size<<1)-1);
679 szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
680 x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
681 szind_t grp = shift << LG_SIZE_CLASS_GROUP;
682
683 szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
684 ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
685
686 size_t delta_inverse_mask = ZI(-1) << lg_delta;
687 szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
688 ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
689
690 szind_t index = NTBINS + grp + mod;
691 return (index);
692 }
693 }
694
695 JEMALLOC_ALWAYS_INLINE szind_t
size2index_lookup(size_t size)696 size2index_lookup(size_t size)
697 {
698
699 assert(size <= LOOKUP_MAXCLASS);
700 {
701 szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
702 assert(ret == size2index_compute(size));
703 return (ret);
704 }
705 }
706
707 JEMALLOC_ALWAYS_INLINE szind_t
size2index(size_t size)708 size2index(size_t size)
709 {
710
711 assert(size > 0);
712 if (likely(size <= LOOKUP_MAXCLASS))
713 return (size2index_lookup(size));
714 return (size2index_compute(size));
715 }
716
717 JEMALLOC_INLINE size_t
index2size_compute(szind_t index)718 index2size_compute(szind_t index)
719 {
720
721 #if (NTBINS > 0)
722 if (index < NTBINS)
723 return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
724 #endif
725 {
726 size_t reduced_index = index - NTBINS;
727 size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
728 size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
729 1);
730
731 size_t grp_size_mask = ~((!!grp)-1);
732 size_t grp_size = ((ZU(1) << (LG_QUANTUM +
733 (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
734
735 size_t shift = (grp == 0) ? 1 : grp;
736 size_t lg_delta = shift + (LG_QUANTUM-1);
737 size_t mod_size = (mod+1) << lg_delta;
738
739 size_t usize = grp_size + mod_size;
740 return (usize);
741 }
742 }
743
744 JEMALLOC_ALWAYS_INLINE size_t
index2size_lookup(szind_t index)745 index2size_lookup(szind_t index)
746 {
747 size_t ret = (size_t)index2size_tab[index];
748 assert(ret == index2size_compute(index));
749 return (ret);
750 }
751
752 JEMALLOC_ALWAYS_INLINE size_t
index2size(szind_t index)753 index2size(szind_t index)
754 {
755
756 assert(index < NSIZES);
757 return (index2size_lookup(index));
758 }
759
760 JEMALLOC_ALWAYS_INLINE size_t
s2u_compute(size_t size)761 s2u_compute(size_t size)
762 {
763
764 if (unlikely(size > HUGE_MAXCLASS))
765 return (0);
766 #if (NTBINS > 0)
767 if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
768 size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
769 size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
770 return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
771 (ZU(1) << lg_ceil));
772 }
773 #endif
774 {
775 size_t x = lg_floor((size<<1)-1);
776 size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
777 ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
778 size_t delta = ZU(1) << lg_delta;
779 size_t delta_mask = delta - 1;
780 size_t usize = (size + delta_mask) & ~delta_mask;
781 return (usize);
782 }
783 }
784
785 JEMALLOC_ALWAYS_INLINE size_t
s2u_lookup(size_t size)786 s2u_lookup(size_t size)
787 {
788 size_t ret = index2size_lookup(size2index_lookup(size));
789
790 assert(ret == s2u_compute(size));
791 return (ret);
792 }
793
794 /*
795 * Compute usable size that would result from allocating an object with the
796 * specified size.
797 */
798 JEMALLOC_ALWAYS_INLINE size_t
s2u(size_t size)799 s2u(size_t size)
800 {
801
802 assert(size > 0);
803 if (likely(size <= LOOKUP_MAXCLASS))
804 return (s2u_lookup(size));
805 return (s2u_compute(size));
806 }
807
808 /*
809 * Compute usable size that would result from allocating an object with the
810 * specified size and alignment.
811 */
812 JEMALLOC_ALWAYS_INLINE size_t
sa2u(size_t size,size_t alignment)813 sa2u(size_t size, size_t alignment)
814 {
815 size_t usize;
816
817 assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
818
819 /* Try for a small size class. */
820 if (size <= SMALL_MAXCLASS && alignment < PAGE) {
821 /*
822 * Round size up to the nearest multiple of alignment.
823 *
824 * This done, we can take advantage of the fact that for each
825 * small size class, every object is aligned at the smallest
826 * power of two that is non-zero in the base two representation
827 * of the size. For example:
828 *
829 * Size | Base 2 | Minimum alignment
830 * -----+----------+------------------
831 * 96 | 1100000 | 32
832 * 144 | 10100000 | 32
833 * 192 | 11000000 | 64
834 */
835 usize = s2u(ALIGNMENT_CEILING(size, alignment));
836 if (usize < LARGE_MINCLASS)
837 return (usize);
838 }
839
840 /* Try for a large size class. */
841 if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
842 /*
843 * We can't achieve subpage alignment, so round up alignment
844 * to the minimum that can actually be supported.
845 */
846 alignment = PAGE_CEILING(alignment);
847
848 /* Make sure result is a large size class. */
849 usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
850
851 /*
852 * Calculate the size of the over-size run that arena_palloc()
853 * would need to allocate in order to guarantee the alignment.
854 */
855 if (usize + large_pad + alignment - PAGE <= arena_maxrun)
856 return (usize);
857 }
858
859 /* Huge size class. Beware of overflow. */
860
861 if (unlikely(alignment > HUGE_MAXCLASS))
862 return (0);
863
864 /*
865 * We can't achieve subchunk alignment, so round up alignment to the
866 * minimum that can actually be supported.
867 */
868 alignment = CHUNK_CEILING(alignment);
869
870 /* Make sure result is a huge size class. */
871 if (size <= chunksize)
872 usize = chunksize;
873 else {
874 usize = s2u(size);
875 if (usize < size) {
876 /* size_t overflow. */
877 return (0);
878 }
879 }
880
881 /*
882 * Calculate the multi-chunk mapping that huge_palloc() would need in
883 * order to guarantee the alignment.
884 */
885 if (usize + alignment - PAGE < usize) {
886 /* size_t overflow. */
887 return (0);
888 }
889 return (usize);
890 }
891
892 /* Choose an arena based on a per-thread value. */
893 JEMALLOC_INLINE arena_t *
arena_choose_impl(tsd_t * tsd,arena_t * arena,bool internal)894 arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal)
895 {
896 arena_t *ret;
897
898 if (arena != NULL)
899 return (arena);
900
901 ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
902 if (unlikely(ret == NULL))
903 ret = arena_choose_hard(tsd, internal);
904
905 return (ret);
906 }
907
908 JEMALLOC_INLINE arena_t *
arena_choose(tsd_t * tsd,arena_t * arena)909 arena_choose(tsd_t *tsd, arena_t *arena)
910 {
911
912 return (arena_choose_impl(tsd, arena, false));
913 }
914
915 JEMALLOC_INLINE arena_t *
arena_ichoose(tsd_t * tsd,arena_t * arena)916 arena_ichoose(tsd_t *tsd, arena_t *arena)
917 {
918
919 return (arena_choose_impl(tsd, arena, true));
920 }
921
922 JEMALLOC_INLINE arena_tdata_t *
arena_tdata_get(tsd_t * tsd,unsigned ind,bool refresh_if_missing)923 arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
924 {
925 arena_tdata_t *tdata;
926 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
927
928 if (unlikely(arenas_tdata == NULL)) {
929 /* arenas_tdata hasn't been initialized yet. */
930 return (arena_tdata_get_hard(tsd, ind));
931 }
932 if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
933 /*
934 * ind is invalid, cache is old (too small), or tdata to be
935 * initialized.
936 */
937 return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
938 NULL);
939 }
940
941 tdata = &arenas_tdata[ind];
942 if (likely(tdata != NULL) || !refresh_if_missing)
943 return (tdata);
944 return (arena_tdata_get_hard(tsd, ind));
945 }
946
947 JEMALLOC_INLINE arena_t *
arena_get(tsdn_t * tsdn,unsigned ind,bool init_if_missing)948 arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
949 {
950 arena_t *ret;
951
952 assert(ind <= MALLOCX_ARENA_MAX);
953
954 ret = arenas[ind];
955 if (unlikely(ret == NULL)) {
956 ret = atomic_read_p((void *)&arenas[ind]);
957 if (init_if_missing && unlikely(ret == NULL))
958 ret = arena_init(tsdn, ind);
959 }
960 return (ret);
961 }
962
963 JEMALLOC_INLINE ticker_t *
decay_ticker_get(tsd_t * tsd,unsigned ind)964 decay_ticker_get(tsd_t *tsd, unsigned ind)
965 {
966 arena_tdata_t *tdata;
967
968 tdata = arena_tdata_get(tsd, ind, true);
969 if (unlikely(tdata == NULL))
970 return (NULL);
971 return (&tdata->decay_ticker);
972 }
973 #endif
974
975 #include "jemalloc/internal/bitmap.h"
976 /*
977 * Include portions of arena.h interleaved with tcache.h in order to resolve
978 * circular dependencies.
979 */
980 #define JEMALLOC_ARENA_INLINE_A
981 #include "jemalloc/internal/arena.h"
982 #undef JEMALLOC_ARENA_INLINE_A
983 #include "jemalloc/internal/tcache.h"
984 #define JEMALLOC_ARENA_INLINE_B
985 #include "jemalloc/internal/arena.h"
986 #undef JEMALLOC_ARENA_INLINE_B
987 #include "jemalloc/internal/hash.h"
988 #include "jemalloc/internal/quarantine.h"
989
990 #ifndef JEMALLOC_ENABLE_INLINE
991 arena_t *iaalloc(const void *ptr);
992 size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote);
993 void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
994 tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
995 void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
996 bool slow_path);
997 void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
998 tcache_t *tcache, bool is_metadata, arena_t *arena);
999 void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
1000 tcache_t *tcache, arena_t *arena);
1001 void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
1002 size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote);
1003 size_t u2rz(size_t usize);
1004 size_t p2rz(tsdn_t *tsdn, const void *ptr);
1005 void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
1006 bool slow_path);
1007 void idalloc(tsd_t *tsd, void *ptr);
1008 void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
1009 void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
1010 bool slow_path);
1011 void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache,
1012 bool slow_path);
1013 void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
1014 size_t extra, size_t alignment, bool zero, tcache_t *tcache,
1015 arena_t *arena);
1016 void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
1017 size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
1018 void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
1019 size_t alignment, bool zero);
1020 bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
1021 size_t extra, size_t alignment, bool zero);
1022 #endif
1023
1024 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
1025 JEMALLOC_ALWAYS_INLINE arena_t *
iaalloc(const void * ptr)1026 iaalloc(const void *ptr)
1027 {
1028
1029 assert(ptr != NULL);
1030
1031 return (arena_aalloc(ptr));
1032 }
1033
1034 /*
1035 * Typical usage:
1036 * tsdn_t *tsdn = [...]
1037 * void *ptr = [...]
1038 * size_t sz = isalloc(tsdn, ptr, config_prof);
1039 */
1040 JEMALLOC_ALWAYS_INLINE size_t
isalloc(tsdn_t * tsdn,const void * ptr,bool demote)1041 isalloc(tsdn_t *tsdn, const void *ptr, bool demote)
1042 {
1043
1044 assert(ptr != NULL);
1045 /* Demotion only makes sense if config_prof is true. */
1046 assert(config_prof || !demote);
1047
1048 return (arena_salloc(tsdn, ptr, demote));
1049 }
1050
1051 JEMALLOC_ALWAYS_INLINE void *
iallocztm(tsdn_t * tsdn,size_t size,szind_t ind,bool zero,tcache_t * tcache,bool is_metadata,arena_t * arena,bool slow_path)1052 iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
1053 bool is_metadata, arena_t *arena, bool slow_path)
1054 {
1055 void *ret;
1056
1057 assert(size != 0);
1058 assert(!is_metadata || tcache == NULL);
1059 assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
1060
1061 ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
1062 if (config_stats && is_metadata && likely(ret != NULL)) {
1063 arena_metadata_allocated_add(iaalloc(ret),
1064 isalloc(tsdn, ret, config_prof));
1065 }
1066 return (ret);
1067 }
1068
1069 JEMALLOC_ALWAYS_INLINE void *
ialloc(tsd_t * tsd,size_t size,szind_t ind,bool zero,bool slow_path)1070 ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path)
1071 {
1072
1073 return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
1074 false, NULL, slow_path));
1075 }
1076
1077 JEMALLOC_ALWAYS_INLINE void *
ipallocztm(tsdn_t * tsdn,size_t usize,size_t alignment,bool zero,tcache_t * tcache,bool is_metadata,arena_t * arena)1078 ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
1079 tcache_t *tcache, bool is_metadata, arena_t *arena)
1080 {
1081 void *ret;
1082
1083 assert(usize != 0);
1084 assert(usize == sa2u(usize, alignment));
1085 assert(!is_metadata || tcache == NULL);
1086 assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
1087
1088 ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
1089 assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
1090 if (config_stats && is_metadata && likely(ret != NULL)) {
1091 arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret,
1092 config_prof));
1093 }
1094 return (ret);
1095 }
1096
1097 JEMALLOC_ALWAYS_INLINE void *
ipalloct(tsdn_t * tsdn,size_t usize,size_t alignment,bool zero,tcache_t * tcache,arena_t * arena)1098 ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
1099 tcache_t *tcache, arena_t *arena)
1100 {
1101
1102 return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena));
1103 }
1104
1105 JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t * tsd,size_t usize,size_t alignment,bool zero)1106 ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
1107 {
1108
1109 return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
1110 tcache_get(tsd, true), false, NULL));
1111 }
1112
1113 JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(tsdn_t * tsdn,const void * ptr,bool demote)1114 ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
1115 {
1116 extent_node_t *node;
1117
1118 /* Return 0 if ptr is not within a chunk managed by jemalloc. */
1119 node = chunk_lookup(ptr, false);
1120 if (node == NULL)
1121 return (0);
1122 /* Only arena chunks should be looked up via interior pointers. */
1123 assert(extent_node_addr_get(node) == ptr ||
1124 extent_node_achunk_get(node));
1125
1126 return (isalloc(tsdn, ptr, demote));
1127 }
1128
1129 JEMALLOC_INLINE size_t
u2rz(size_t usize)1130 u2rz(size_t usize)
1131 {
1132 size_t ret;
1133
1134 if (usize <= SMALL_MAXCLASS) {
1135 szind_t binind = size2index(usize);
1136 ret = arena_bin_info[binind].redzone_size;
1137 } else
1138 ret = 0;
1139
1140 return (ret);
1141 }
1142
1143 JEMALLOC_INLINE size_t
p2rz(tsdn_t * tsdn,const void * ptr)1144 p2rz(tsdn_t *tsdn, const void *ptr)
1145 {
1146 size_t usize = isalloc(tsdn, ptr, false);
1147
1148 return (u2rz(usize));
1149 }
1150
1151 JEMALLOC_ALWAYS_INLINE void
idalloctm(tsdn_t * tsdn,void * ptr,tcache_t * tcache,bool is_metadata,bool slow_path)1152 idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
1153 bool slow_path)
1154 {
1155
1156 assert(ptr != NULL);
1157 assert(!is_metadata || tcache == NULL);
1158 assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto);
1159 if (config_stats && is_metadata) {
1160 arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr,
1161 config_prof));
1162 }
1163
1164 arena_dalloc(tsdn, ptr, tcache, slow_path);
1165 }
1166
1167 JEMALLOC_ALWAYS_INLINE void
idalloc(tsd_t * tsd,void * ptr)1168 idalloc(tsd_t *tsd, void *ptr)
1169 {
1170
1171 idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true);
1172 }
1173
1174 JEMALLOC_ALWAYS_INLINE void
iqalloc(tsd_t * tsd,void * ptr,tcache_t * tcache,bool slow_path)1175 iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
1176 {
1177
1178 if (slow_path && config_fill && unlikely(opt_quarantine))
1179 quarantine(tsd, ptr);
1180 else
1181 idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path);
1182 }
1183
1184 JEMALLOC_ALWAYS_INLINE void
isdalloct(tsdn_t * tsdn,void * ptr,size_t size,tcache_t * tcache,bool slow_path)1185 isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
1186 bool slow_path)
1187 {
1188
1189 arena_sdalloc(tsdn, ptr, size, tcache, slow_path);
1190 }
1191
1192 JEMALLOC_ALWAYS_INLINE void
isqalloc(tsd_t * tsd,void * ptr,size_t size,tcache_t * tcache,bool slow_path)1193 isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path)
1194 {
1195
1196 if (slow_path && config_fill && unlikely(opt_quarantine))
1197 quarantine(tsd, ptr);
1198 else
1199 isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path);
1200 }
1201
1202 JEMALLOC_ALWAYS_INLINE void *
iralloct_realign(tsd_t * tsd,void * ptr,size_t oldsize,size_t size,size_t extra,size_t alignment,bool zero,tcache_t * tcache,arena_t * arena)1203 iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
1204 size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
1205 {
1206 void *p;
1207 size_t usize, copysize;
1208
1209 usize = sa2u(size + extra, alignment);
1210 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
1211 return (NULL);
1212 p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena);
1213 if (p == NULL) {
1214 if (extra == 0)
1215 return (NULL);
1216 /* Try again, without extra this time. */
1217 usize = sa2u(size, alignment);
1218 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
1219 return (NULL);
1220 p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache,
1221 arena);
1222 if (p == NULL)
1223 return (NULL);
1224 }
1225 /*
1226 * Copy at most size bytes (not size+extra), since the caller has no
1227 * expectation that the extra bytes will be reliably preserved.
1228 */
1229 copysize = (size < oldsize) ? size : oldsize;
1230 memcpy(p, ptr, copysize);
1231 isqalloc(tsd, ptr, oldsize, tcache, true);
1232 return (p);
1233 }
1234
1235 JEMALLOC_ALWAYS_INLINE void *
iralloct(tsd_t * tsd,void * ptr,size_t oldsize,size_t size,size_t alignment,bool zero,tcache_t * tcache,arena_t * arena)1236 iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
1237 bool zero, tcache_t *tcache, arena_t *arena)
1238 {
1239
1240 assert(ptr != NULL);
1241 assert(size != 0);
1242
1243 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
1244 != 0) {
1245 /*
1246 * Existing object alignment is inadequate; allocate new space
1247 * and copy.
1248 */
1249 return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment,
1250 zero, tcache, arena));
1251 }
1252
1253 return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero,
1254 tcache));
1255 }
1256
1257 JEMALLOC_ALWAYS_INLINE void *
iralloc(tsd_t * tsd,void * ptr,size_t oldsize,size_t size,size_t alignment,bool zero)1258 iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
1259 bool zero)
1260 {
1261
1262 return (iralloct(tsd, ptr, oldsize, size, alignment, zero,
1263 tcache_get(tsd, true), NULL));
1264 }
1265
1266 JEMALLOC_ALWAYS_INLINE bool
ixalloc(tsdn_t * tsdn,void * ptr,size_t oldsize,size_t size,size_t extra,size_t alignment,bool zero)1267 ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
1268 size_t alignment, bool zero)
1269 {
1270
1271 assert(ptr != NULL);
1272 assert(size != 0);
1273
1274 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
1275 != 0) {
1276 /* Existing object alignment is inadequate. */
1277 return (true);
1278 }
1279
1280 return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero));
1281 }
1282 #endif
1283
1284 #include "jemalloc/internal/prof.h"
1285
1286 #undef JEMALLOC_H_INLINES
1287 /******************************************************************************/
1288 #endif /* JEMALLOC_INTERNAL_H */
1289