• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************/
2 #ifdef JEMALLOC_H_TYPES
3 
4 #define	LARGE_MINCLASS		(ZU(1) << LG_LARGE_MINCLASS)
5 
6 /* Maximum number of regions in one run. */
7 #define	LG_RUN_MAXREGS		(LG_PAGE - LG_TINY_MIN)
8 #define	RUN_MAXREGS		(1U << LG_RUN_MAXREGS)
9 
10 /*
11  * Minimum redzone size.  Redzones may be larger than this if necessary to
12  * preserve region alignment.
13  */
14 #define	REDZONE_MINSIZE		16
15 
16 /*
17  * The minimum ratio of active:dirty pages per arena is computed as:
18  *
19  *   (nactive >> lg_dirty_mult) >= ndirty
20  *
21  * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as
22  * many active pages as dirty pages.
23  */
24 #define	LG_DIRTY_MULT_DEFAULT	3
25 
26 typedef enum {
27 	purge_mode_ratio = 0,
28 	purge_mode_decay = 1,
29 
30 	purge_mode_limit = 2
31 } purge_mode_t;
32 /* ANDROID change */
33 /* Use the decay mode purge method.
34  * Setting this value to zero results in performance issues because it
35  * causes purges at every free. Leave the default at zero, but zygote
36  * processes will set this to one using mallopt. This allows apps which
37  * tend to be active to benefit from the extra performance, but allow system
38  * servers to free PSS while they are sitting idle.
39  */
40 #define	PURGE_DEFAULT		purge_mode_decay
41 /* Default decay time in seconds. */
42 #define	DECAY_TIME_DEFAULT	0
43 /* End ANDROID change */
44 /* Number of event ticks between time checks. */
45 #define	DECAY_NTICKS_PER_UPDATE	1000
46 
47 typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
48 typedef struct arena_avail_links_s arena_avail_links_t;
49 typedef struct arena_run_s arena_run_t;
50 typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
51 typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
52 typedef struct arena_chunk_s arena_chunk_t;
53 typedef struct arena_bin_info_s arena_bin_info_t;
54 typedef struct arena_decay_s arena_decay_t;
55 typedef struct arena_bin_s arena_bin_t;
56 typedef struct arena_s arena_t;
57 typedef struct arena_tdata_s arena_tdata_t;
58 
59 #endif /* JEMALLOC_H_TYPES */
60 /******************************************************************************/
61 #ifdef JEMALLOC_H_STRUCTS
62 
63 #ifdef JEMALLOC_ARENA_STRUCTS_A
64 struct arena_run_s {
65 	/* Index of bin this run is associated with. */
66 	szind_t		binind;
67 
68 	/* Number of free regions in run. */
69 	unsigned	nfree;
70 
71 	/* Per region allocated/deallocated bitmap. */
72 	bitmap_t	bitmap[BITMAP_GROUPS_MAX];
73 };
74 
75 /* Each element of the chunk map corresponds to one page within the chunk. */
76 struct arena_chunk_map_bits_s {
77 	/*
78 	 * Run address (or size) and various flags are stored together.  The bit
79 	 * layout looks like (assuming 32-bit system):
80 	 *
81 	 *   ???????? ???????? ???nnnnn nnndumla
82 	 *
83 	 * ? : Unallocated: Run address for first/last pages, unset for internal
84 	 *                  pages.
85 	 *     Small: Run page offset.
86 	 *     Large: Run page count for first page, unset for trailing pages.
87 	 * n : binind for small size class, BININD_INVALID for large size class.
88 	 * d : dirty?
89 	 * u : unzeroed?
90 	 * m : decommitted?
91 	 * l : large?
92 	 * a : allocated?
93 	 *
94 	 * Following are example bit patterns for the three types of runs.
95 	 *
96 	 * p : run page offset
97 	 * s : run size
98 	 * n : binind for size class; large objects set these to BININD_INVALID
99 	 * x : don't care
100 	 * - : 0
101 	 * + : 1
102 	 * [DUMLA] : bit set
103 	 * [dumla] : bit unset
104 	 *
105 	 *   Unallocated (clean):
106 	 *     ssssssss ssssssss sss+++++ +++dum-a
107 	 *     xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx
108 	 *     ssssssss ssssssss sss+++++ +++dUm-a
109 	 *
110 	 *   Unallocated (dirty):
111 	 *     ssssssss ssssssss sss+++++ +++D-m-a
112 	 *     xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
113 	 *     ssssssss ssssssss sss+++++ +++D-m-a
114 	 *
115 	 *   Small:
116 	 *     pppppppp pppppppp pppnnnnn nnnd---A
117 	 *     pppppppp pppppppp pppnnnnn nnn----A
118 	 *     pppppppp pppppppp pppnnnnn nnnd---A
119 	 *
120 	 *   Large:
121 	 *     ssssssss ssssssss sss+++++ +++D--LA
122 	 *     xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
123 	 *     -------- -------- ---+++++ +++D--LA
124 	 *
125 	 *   Large (sampled, size <= LARGE_MINCLASS):
126 	 *     ssssssss ssssssss sssnnnnn nnnD--LA
127 	 *     xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
128 	 *     -------- -------- ---+++++ +++D--LA
129 	 *
130 	 *   Large (not sampled, size == LARGE_MINCLASS):
131 	 *     ssssssss ssssssss sss+++++ +++D--LA
132 	 *     xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
133 	 *     -------- -------- ---+++++ +++D--LA
134 	 */
135 	size_t				bits;
136 #define	CHUNK_MAP_ALLOCATED	((size_t)0x01U)
137 #define	CHUNK_MAP_LARGE		((size_t)0x02U)
138 #define	CHUNK_MAP_STATE_MASK	((size_t)0x3U)
139 
140 #define	CHUNK_MAP_DECOMMITTED	((size_t)0x04U)
141 #define	CHUNK_MAP_UNZEROED	((size_t)0x08U)
142 #define	CHUNK_MAP_DIRTY		((size_t)0x10U)
143 #define	CHUNK_MAP_FLAGS_MASK	((size_t)0x1cU)
144 
145 #define	CHUNK_MAP_BININD_SHIFT	5
146 #define	BININD_INVALID		((size_t)0xffU)
147 #define	CHUNK_MAP_BININD_MASK	(BININD_INVALID << CHUNK_MAP_BININD_SHIFT)
148 #define	CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
149 
150 #define	CHUNK_MAP_RUNIND_SHIFT	(CHUNK_MAP_BININD_SHIFT + 8)
151 #define	CHUNK_MAP_SIZE_SHIFT	(CHUNK_MAP_RUNIND_SHIFT - LG_PAGE)
152 #define	CHUNK_MAP_SIZE_MASK						\
153     (~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK))
154 };
155 
156 struct arena_runs_dirty_link_s {
157 	qr(arena_runs_dirty_link_t)	rd_link;
158 };
159 
160 /*
161  * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just
162  * like arena_chunk_map_bits_t.  Two separate arrays are stored within each
163  * chunk header in order to improve cache locality.
164  */
165 struct arena_chunk_map_misc_s {
166 	/*
167 	 * Linkage for run heaps.  There are two disjoint uses:
168 	 *
169 	 * 1) arena_t's runs_avail heaps.
170 	 * 2) arena_run_t conceptually uses this linkage for in-use non-full
171 	 *    runs, rather than directly embedding linkage.
172 	 */
173 	phn(arena_chunk_map_misc_t)		ph_link;
174 
175 	union {
176 		/* Linkage for list of dirty runs. */
177 		arena_runs_dirty_link_t		rd;
178 
179 		/* Profile counters, used for large object runs. */
180 		union {
181 			void			*prof_tctx_pun;
182 			prof_tctx_t		*prof_tctx;
183 		};
184 
185 		/* Small region run metadata. */
186 		arena_run_t			run;
187 	};
188 };
189 typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
190 #endif /* JEMALLOC_ARENA_STRUCTS_A */
191 
192 #ifdef JEMALLOC_ARENA_STRUCTS_B
193 /* Arena chunk header. */
194 struct arena_chunk_s {
195 	/*
196 	 * A pointer to the arena that owns the chunk is stored within the node.
197 	 * This field as a whole is used by chunks_rtree to support both
198 	 * ivsalloc() and core-based debugging.
199 	 */
200 	extent_node_t		node;
201 
202 	/*
203 	 * True if memory could be backed by transparent huge pages.  This is
204 	 * only directly relevant to Linux, since it is the only supported
205 	 * platform on which jemalloc interacts with explicit transparent huge
206 	 * page controls.
207 	 */
208 	bool			hugepage;
209 
210 	/*
211 	 * Map of pages within chunk that keeps track of free/large/small.  The
212 	 * first map_bias entries are omitted, since the chunk header does not
213 	 * need to be tracked in the map.  This omission saves a header page
214 	 * for common chunk sizes (e.g. 4 MiB).
215 	 */
216 	arena_chunk_map_bits_t	map_bits[1]; /* Dynamically sized. */
217 };
218 
219 /*
220  * Read-only information associated with each element of arena_t's bins array
221  * is stored separately, partly to reduce memory usage (only one copy, rather
222  * than one per arena), but mainly to avoid false cacheline sharing.
223  *
224  * Each run has the following layout:
225  *
226  *               /--------------------\
227  *               | pad?               |
228  *               |--------------------|
229  *               | redzone            |
230  *   reg0_offset | region 0           |
231  *               | redzone            |
232  *               |--------------------| \
233  *               | redzone            | |
234  *               | region 1           |  > reg_interval
235  *               | redzone            | /
236  *               |--------------------|
237  *               | ...                |
238  *               | ...                |
239  *               | ...                |
240  *               |--------------------|
241  *               | redzone            |
242  *               | region nregs-1     |
243  *               | redzone            |
244  *               |--------------------|
245  *               | alignment pad?     |
246  *               \--------------------/
247  *
248  * reg_interval has at least the same minimum alignment as reg_size; this
249  * preserves the alignment constraint that sa2u() depends on.  Alignment pad is
250  * either 0 or redzone_size; it is present only if needed to align reg0_offset.
251  */
252 struct arena_bin_info_s {
253 	/* Size of regions in a run for this bin's size class. */
254 	size_t			reg_size;
255 
256 	/* Redzone size. */
257 	size_t			redzone_size;
258 
259 	/* Interval between regions (reg_size + (redzone_size << 1)). */
260 	size_t			reg_interval;
261 
262 	/* Total size of a run for this bin's size class. */
263 	size_t			run_size;
264 
265 	/* Total number of regions in a run for this bin's size class. */
266 	uint32_t		nregs;
267 
268 	/*
269 	 * Metadata used to manipulate bitmaps for runs associated with this
270 	 * bin.
271 	 */
272 	bitmap_info_t		bitmap_info;
273 
274 	/* Offset of first region in a run for this bin's size class. */
275 	uint32_t		reg0_offset;
276 };
277 
278 struct arena_decay_s {
279 	/*
280 	 * Approximate time in seconds from the creation of a set of unused
281 	 * dirty pages until an equivalent set of unused dirty pages is purged
282 	 * and/or reused.
283 	 */
284 	ssize_t			time;
285 	/* time / SMOOTHSTEP_NSTEPS. */
286 	nstime_t		interval;
287 	/*
288 	 * Time at which the current decay interval logically started.  We do
289 	 * not actually advance to a new epoch until sometime after it starts
290 	 * because of scheduling and computation delays, and it is even possible
291 	 * to completely skip epochs.  In all cases, during epoch advancement we
292 	 * merge all relevant activity into the most recently recorded epoch.
293 	 */
294 	nstime_t		epoch;
295 	/* Deadline randomness generator. */
296 	uint64_t		jitter_state;
297 	/*
298 	 * Deadline for current epoch.  This is the sum of interval and per
299 	 * epoch jitter which is a uniform random variable in [0..interval).
300 	 * Epochs always advance by precise multiples of interval, but we
301 	 * randomize the deadline to reduce the likelihood of arenas purging in
302 	 * lockstep.
303 	 */
304 	nstime_t		deadline;
305 	/*
306 	 * Number of dirty pages at beginning of current epoch.  During epoch
307 	 * advancement we use the delta between arena->decay.ndirty and
308 	 * arena->ndirty to determine how many dirty pages, if any, were
309 	 * generated.
310 	 */
311 	size_t			ndirty;
312 	/*
313 	 * Trailing log of how many unused dirty pages were generated during
314 	 * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
315 	 * element is the most recent epoch.  Corresponding epoch times are
316 	 * relative to epoch.
317 	 */
318 	size_t			backlog[SMOOTHSTEP_NSTEPS];
319 };
320 
321 struct arena_bin_s {
322 	/*
323 	 * All operations on runcur, runs, and stats require that lock be
324 	 * locked.  Run allocation/deallocation are protected by the arena lock,
325 	 * which may be acquired while holding one or more bin locks, but not
326 	 * vise versa.
327 	 */
328 	malloc_mutex_t		lock;
329 
330 	/*
331 	 * Current run being used to service allocations of this bin's size
332 	 * class.
333 	 */
334 	arena_run_t		*runcur;
335 
336 	/*
337 	 * Heap of non-full runs.  This heap is used when looking for an
338 	 * existing run when runcur is no longer usable.  We choose the
339 	 * non-full run that is lowest in memory; this policy tends to keep
340 	 * objects packed well, and it can also help reduce the number of
341 	 * almost-empty chunks.
342 	 */
343 	arena_run_heap_t	runs;
344 
345 	/* Bin statistics. */
346 	malloc_bin_stats_t	stats;
347 };
348 
349 struct arena_s {
350 	/* This arena's index within the arenas array. */
351 	unsigned		ind;
352 
353 	/*
354 	 * Number of threads currently assigned to this arena, synchronized via
355 	 * atomic operations.  Each thread has two distinct assignments, one for
356 	 * application-serving allocation, and the other for internal metadata
357 	 * allocation.  Internal metadata must not be allocated from arenas
358 	 * created via the arenas.extend mallctl, because the arena.<i>.reset
359 	 * mallctl indiscriminately discards all allocations for the affected
360 	 * arena.
361 	 *
362 	 *   0: Application allocation.
363 	 *   1: Internal metadata allocation.
364 	 */
365 	unsigned		nthreads[2];
366 
367 	/*
368 	 * There are three classes of arena operations from a locking
369 	 * perspective:
370 	 * 1) Thread assignment (modifies nthreads) is synchronized via atomics.
371 	 * 2) Bin-related operations are protected by bin locks.
372 	 * 3) Chunk- and run-related operations are protected by this mutex.
373 	 */
374 	malloc_mutex_t		lock;
375 
376 	arena_stats_t		stats;
377 	/*
378 	 * List of tcaches for extant threads associated with this arena.
379 	 * Stats from these are merged incrementally, and at exit if
380 	 * opt_stats_print is enabled.
381 	 */
382 	ql_head(tcache_t)	tcache_ql;
383 
384 	uint64_t		prof_accumbytes;
385 
386 	/*
387 	 * PRNG state for cache index randomization of large allocation base
388 	 * pointers.
389 	 */
390 	size_t			offset_state;
391 
392 	dss_prec_t		dss_prec;
393 
394 	/* Extant arena chunks. */
395 	ql_head(extent_node_t)	achunks;
396 
397 	/* Extent serial number generator state. */
398 	size_t			extent_sn_next;
399 
400 	/*
401 	 * In order to avoid rapid chunk allocation/deallocation when an arena
402 	 * oscillates right on the cusp of needing a new chunk, cache the most
403 	 * recently freed chunk.  The spare is left in the arena's chunk trees
404 	 * until it is deleted.
405 	 *
406 	 * There is one spare chunk per arena, rather than one spare total, in
407 	 * order to avoid interactions between multiple threads that could make
408 	 * a single spare inadequate.
409 	 */
410 	arena_chunk_t		*spare;
411 
412 	/* Minimum ratio (log base 2) of nactive:ndirty. */
413 	ssize_t			lg_dirty_mult;
414 
415 	/* True if a thread is currently executing arena_purge_to_limit(). */
416 	bool			purging;
417 
418 	/* Number of pages in active runs and huge regions. */
419 	size_t			nactive;
420 
421 	/*
422 	 * Current count of pages within unused runs that are potentially
423 	 * dirty, and for which madvise(... MADV_DONTNEED) has not been called.
424 	 * By tracking this, we can institute a limit on how much dirty unused
425 	 * memory is mapped for each arena.
426 	 */
427 	size_t			ndirty;
428 
429 	/*
430 	 * Unused dirty memory this arena manages.  Dirty memory is conceptually
431 	 * tracked as an arbitrarily interleaved LRU of dirty runs and cached
432 	 * chunks, but the list linkage is actually semi-duplicated in order to
433 	 * avoid extra arena_chunk_map_misc_t space overhead.
434 	 *
435 	 *   LRU-----------------------------------------------------------MRU
436 	 *
437 	 *        /-- arena ---\
438 	 *        |            |
439 	 *        |            |
440 	 *        |------------|                             /- chunk -\
441 	 *   ...->|chunks_cache|<--------------------------->|  /----\ |<--...
442 	 *        |------------|                             |  |node| |
443 	 *        |            |                             |  |    | |
444 	 *        |            |    /- run -\    /- run -\   |  |    | |
445 	 *        |            |    |       |    |       |   |  |    | |
446 	 *        |            |    |       |    |       |   |  |    | |
447 	 *        |------------|    |-------|    |-------|   |  |----| |
448 	 *   ...->|runs_dirty  |<-->|rd     |<-->|rd     |<---->|rd  |<----...
449 	 *        |------------|    |-------|    |-------|   |  |----| |
450 	 *        |            |    |       |    |       |   |  |    | |
451 	 *        |            |    |       |    |       |   |  \----/ |
452 	 *        |            |    \-------/    \-------/   |         |
453 	 *        |            |                             |         |
454 	 *        |            |                             |         |
455 	 *        \------------/                             \---------/
456 	 */
457 	arena_runs_dirty_link_t	runs_dirty;
458 	extent_node_t		chunks_cache;
459 
460 	/* Decay-based purging state. */
461 	arena_decay_t		decay;
462 
463 	/* Extant huge allocations. */
464 	ql_head(extent_node_t)	huge;
465 	/* Synchronizes all huge allocation/update/deallocation. */
466 	malloc_mutex_t		huge_mtx;
467 
468 	/*
469 	 * Trees of chunks that were previously allocated (trees differ only in
470 	 * node ordering).  These are used when allocating chunks, in an attempt
471 	 * to re-use address space.  Depending on function, different tree
472 	 * orderings are needed, which is why there are two trees with the same
473 	 * contents.
474 	 */
475 	extent_tree_t		chunks_szsnad_cached;
476 	extent_tree_t		chunks_ad_cached;
477 	extent_tree_t		chunks_szsnad_retained;
478 	extent_tree_t		chunks_ad_retained;
479 
480 	malloc_mutex_t		chunks_mtx;
481 	/* Cache of nodes that were allocated via base_alloc(). */
482 	ql_head(extent_node_t)	node_cache;
483 	malloc_mutex_t		node_cache_mtx;
484 
485 	/* User-configurable chunk hook functions. */
486 	chunk_hooks_t		chunk_hooks;
487 
488 	/* bins is used to store trees of free regions. */
489 	arena_bin_t		bins[NBINS];
490 
491 	/*
492 	 * Size-segregated address-ordered heaps of this arena's available runs,
493 	 * used for first-best-fit run allocation.  Runs are quantized, i.e.
494 	 * they reside in the last heap which corresponds to a size class less
495 	 * than or equal to the run size.
496 	 */
497 	arena_run_heap_t	runs_avail[NPSIZES];
498 };
499 
500 /* Used in conjunction with tsd for fast arena-related context lookup. */
501 struct arena_tdata_s {
502 	ticker_t		decay_ticker;
503 };
504 #endif /* JEMALLOC_ARENA_STRUCTS_B */
505 
506 #endif /* JEMALLOC_H_STRUCTS */
507 /******************************************************************************/
508 #ifdef JEMALLOC_H_EXTERNS
509 
510 static const size_t	large_pad =
511 #ifdef JEMALLOC_CACHE_OBLIVIOUS
512     PAGE
513 #else
514     0
515 #endif
516     ;
517 
518 extern purge_mode_t	opt_purge;
519 extern const char	*purge_mode_names[];
520 extern ssize_t		opt_lg_dirty_mult;
521 extern ssize_t		opt_decay_time;
522 
523 extern arena_bin_info_t	arena_bin_info[NBINS];
524 
525 extern size_t		map_bias; /* Number of arena chunk header pages. */
526 extern size_t		map_misc_offset;
527 extern size_t		arena_maxrun; /* Max run size for arenas. */
528 extern size_t		large_maxclass; /* Max large size class. */
529 extern unsigned		nlclasses; /* Number of large size classes. */
530 extern unsigned		nhclasses; /* Number of huge size classes. */
531 
532 #ifdef JEMALLOC_JET
533 typedef size_t (run_quantize_t)(size_t);
534 extern run_quantize_t *run_quantize_floor;
535 extern run_quantize_t *run_quantize_ceil;
536 #endif
537 void	arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
538     bool cache);
539 void	arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
540     bool cache);
541 extent_node_t	*arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
542 void	arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
543 void	*arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
544     size_t alignment, size_t *sn, bool *zero);
545 void	arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
546     size_t usize, size_t sn);
547 void	arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
548     void *chunk, size_t oldsize, size_t usize);
549 void	arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
550     void *chunk, size_t oldsize, size_t usize, size_t sn);
551 bool	arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
552     void *chunk, size_t oldsize, size_t usize, bool *zero);
553 ssize_t	arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
554 bool	arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
555     ssize_t lg_dirty_mult);
556 ssize_t	arena_decay_time_get(tsdn_t *tsdn, arena_t *arena);
557 bool	arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time);
558 void	arena_purge(tsdn_t *tsdn, arena_t *arena, bool all);
559 void	arena_maybe_purge(tsdn_t *tsdn, arena_t *arena);
560 void	arena_reset(tsd_t *tsd, arena_t *arena);
561 void	arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
562     tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
563 void	arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
564     bool zero);
565 #ifdef JEMALLOC_JET
566 typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
567     uint8_t);
568 extern arena_redzone_corruption_t *arena_redzone_corruption;
569 typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
570 extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
571 #else
572 void	arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
573 #endif
574 void	arena_quarantine_junk_small(void *ptr, size_t usize);
575 void	*arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind,
576     bool zero);
577 void	*arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
578     szind_t ind, bool zero);
579 void	*arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
580     size_t alignment, bool zero, tcache_t *tcache);
581 void	arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size);
582 void	arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
583     arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm);
584 void	arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
585     void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm);
586 void	arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
587     void *ptr, size_t pageind);
588 #ifdef JEMALLOC_JET
589 typedef void (arena_dalloc_junk_large_t)(void *, size_t);
590 extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
591 #else
592 void	arena_dalloc_junk_large(void *ptr, size_t usize);
593 #endif
594 void	arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
595     arena_chunk_t *chunk, void *ptr);
596 void	arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
597     void *ptr);
598 #ifdef JEMALLOC_JET
599 typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
600 extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
601 #endif
602 bool	arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
603     size_t size, size_t extra, bool zero);
604 void	*arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
605     size_t size, size_t alignment, bool zero, tcache_t *tcache);
606 dss_prec_t	arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena);
607 bool	arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec);
608 ssize_t	arena_lg_dirty_mult_default_get(void);
609 bool	arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
610 ssize_t	arena_decay_time_default_get(void);
611 bool	arena_decay_time_default_set(ssize_t decay_time);
612 void	arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
613     unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult,
614     ssize_t *decay_time, size_t *nactive, size_t *ndirty);
615 void	arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
616     const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
617     size_t *nactive, size_t *ndirty, arena_stats_t *astats,
618     malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
619     malloc_huge_stats_t *hstats);
620 unsigned	arena_nthreads_get(arena_t *arena, bool internal);
621 void	arena_nthreads_inc(arena_t *arena, bool internal);
622 void	arena_nthreads_dec(arena_t *arena, bool internal);
623 size_t	arena_extent_sn_next(arena_t *arena);
624 arena_t	*arena_new(tsdn_t *tsdn, unsigned ind);
625 void	arena_boot(void);
626 void	arena_prefork0(tsdn_t *tsdn, arena_t *arena);
627 void	arena_prefork1(tsdn_t *tsdn, arena_t *arena);
628 void	arena_prefork2(tsdn_t *tsdn, arena_t *arena);
629 void	arena_prefork3(tsdn_t *tsdn, arena_t *arena);
630 void	arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
631 void	arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
632 
633 #endif /* JEMALLOC_H_EXTERNS */
634 /******************************************************************************/
635 #ifdef JEMALLOC_H_INLINES
636 
637 #ifndef JEMALLOC_ENABLE_INLINE
638 arena_chunk_map_bits_t	*arena_bitselm_get_mutable(arena_chunk_t *chunk,
639     size_t pageind);
640 const arena_chunk_map_bits_t	*arena_bitselm_get_const(
641     const arena_chunk_t *chunk, size_t pageind);
642 arena_chunk_map_misc_t	*arena_miscelm_get_mutable(arena_chunk_t *chunk,
643     size_t pageind);
644 const arena_chunk_map_misc_t	*arena_miscelm_get_const(
645     const arena_chunk_t *chunk, size_t pageind);
646 size_t	arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm);
647 void	*arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm);
648 arena_chunk_map_misc_t	*arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
649 arena_chunk_map_misc_t	*arena_run_to_miscelm(arena_run_t *run);
650 size_t	*arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind);
651 const size_t	*arena_mapbitsp_get_const(const arena_chunk_t *chunk,
652     size_t pageind);
653 size_t	arena_mapbitsp_read(const size_t *mapbitsp);
654 size_t	arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind);
655 size_t	arena_mapbits_size_decode(size_t mapbits);
656 size_t	arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk,
657     size_t pageind);
658 size_t	arena_mapbits_large_size_get(const arena_chunk_t *chunk,
659     size_t pageind);
660 size_t	arena_mapbits_small_runind_get(const arena_chunk_t *chunk,
661     size_t pageind);
662 szind_t	arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind);
663 size_t	arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind);
664 size_t	arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind);
665 size_t	arena_mapbits_decommitted_get(const arena_chunk_t *chunk,
666     size_t pageind);
667 size_t	arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind);
668 size_t	arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind);
669 void	arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
670 size_t	arena_mapbits_size_encode(size_t size);
671 void	arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
672     size_t size, size_t flags);
673 void	arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
674     size_t size);
675 void	arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
676     size_t flags);
677 void	arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
678     size_t size, size_t flags);
679 void	arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
680     szind_t binind);
681 void	arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
682     size_t runind, szind_t binind, size_t flags);
683 void	arena_metadata_allocated_add(arena_t *arena, size_t size);
684 void	arena_metadata_allocated_sub(arena_t *arena, size_t size);
685 size_t	arena_metadata_allocated_get(arena_t *arena);
686 bool	arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
687 bool	arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
688 bool	arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
689 szind_t	arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
690 szind_t	arena_bin_index(arena_t *arena, arena_bin_t *bin);
691 size_t	arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
692     const void *ptr);
693 prof_tctx_t	*arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
694 void	arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
695     prof_tctx_t *tctx);
696 void	arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
697     const void *old_ptr, prof_tctx_t *old_tctx);
698 void	arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
699 void	arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
700 void	*arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
701     bool zero, tcache_t *tcache, bool slow_path);
702 arena_t	*arena_aalloc(const void *ptr);
703 size_t	arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote);
704 void	arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path);
705 void	arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
706     bool slow_path);
707 #endif
708 
709 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
710 #  ifdef JEMALLOC_ARENA_INLINE_A
711 JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
arena_bitselm_get_mutable(arena_chunk_t * chunk,size_t pageind)712 arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind)
713 {
714 
715 	assert(pageind >= map_bias);
716 	assert(pageind < chunk_npages);
717 
718 	return (&chunk->map_bits[pageind-map_bias]);
719 }
720 
721 JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t *
arena_bitselm_get_const(const arena_chunk_t * chunk,size_t pageind)722 arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind)
723 {
724 
725 	return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind));
726 }
727 
728 JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_miscelm_get_mutable(arena_chunk_t * chunk,size_t pageind)729 arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
730 {
731 
732 	assert(pageind >= map_bias);
733 	assert(pageind < chunk_npages);
734 
735 	return ((arena_chunk_map_misc_t *)((uintptr_t)chunk +
736 	    (uintptr_t)map_misc_offset) + pageind-map_bias);
737 }
738 
739 JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t *
arena_miscelm_get_const(const arena_chunk_t * chunk,size_t pageind)740 arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind)
741 {
742 
743 	return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind));
744 }
745 
746 JEMALLOC_ALWAYS_INLINE size_t
arena_miscelm_to_pageind(const arena_chunk_map_misc_t * miscelm)747 arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
748 {
749 	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
750 	size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
751 	    map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias;
752 
753 	assert(pageind >= map_bias);
754 	assert(pageind < chunk_npages);
755 
756 	return (pageind);
757 }
758 
759 JEMALLOC_ALWAYS_INLINE void *
arena_miscelm_to_rpages(const arena_chunk_map_misc_t * miscelm)760 arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm)
761 {
762 	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
763 	size_t pageind = arena_miscelm_to_pageind(miscelm);
764 
765 	return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE)));
766 }
767 
768 JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_rd_to_miscelm(arena_runs_dirty_link_t * rd)769 arena_rd_to_miscelm(arena_runs_dirty_link_t *rd)
770 {
771 	arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
772 	    *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd));
773 
774 	assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
775 	assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
776 
777 	return (miscelm);
778 }
779 
780 JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_run_to_miscelm(arena_run_t * run)781 arena_run_to_miscelm(arena_run_t *run)
782 {
783 	arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
784 	    *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run));
785 
786 	assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
787 	assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
788 
789 	return (miscelm);
790 }
791 
792 JEMALLOC_ALWAYS_INLINE size_t *
arena_mapbitsp_get_mutable(arena_chunk_t * chunk,size_t pageind)793 arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
794 {
795 
796 	return (&arena_bitselm_get_mutable(chunk, pageind)->bits);
797 }
798 
799 JEMALLOC_ALWAYS_INLINE const size_t *
arena_mapbitsp_get_const(const arena_chunk_t * chunk,size_t pageind)800 arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind)
801 {
802 
803 	return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind));
804 }
805 
806 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbitsp_read(const size_t * mapbitsp)807 arena_mapbitsp_read(const size_t *mapbitsp)
808 {
809 
810 	return (*mapbitsp);
811 }
812 
813 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_get(const arena_chunk_t * chunk,size_t pageind)814 arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind)
815 {
816 
817 	return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind)));
818 }
819 
820 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_size_decode(size_t mapbits)821 arena_mapbits_size_decode(size_t mapbits)
822 {
823 	size_t size;
824 
825 #if CHUNK_MAP_SIZE_SHIFT > 0
826 	size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT;
827 #elif CHUNK_MAP_SIZE_SHIFT == 0
828 	size = mapbits & CHUNK_MAP_SIZE_MASK;
829 #else
830 	size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT;
831 #endif
832 
833 	return (size);
834 }
835 
836 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unallocated_size_get(const arena_chunk_t * chunk,size_t pageind)837 arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind)
838 {
839 	size_t mapbits;
840 
841 	mapbits = arena_mapbits_get(chunk, pageind);
842 	assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
843 	return (arena_mapbits_size_decode(mapbits));
844 }
845 
846 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_size_get(const arena_chunk_t * chunk,size_t pageind)847 arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind)
848 {
849 	size_t mapbits;
850 
851 	mapbits = arena_mapbits_get(chunk, pageind);
852 	assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
853 	    (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
854 	return (arena_mapbits_size_decode(mapbits));
855 }
856 
857 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_small_runind_get(const arena_chunk_t * chunk,size_t pageind)858 arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind)
859 {
860 	size_t mapbits;
861 
862 	mapbits = arena_mapbits_get(chunk, pageind);
863 	assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
864 	    CHUNK_MAP_ALLOCATED);
865 	return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
866 }
867 
868 JEMALLOC_ALWAYS_INLINE szind_t
arena_mapbits_binind_get(const arena_chunk_t * chunk,size_t pageind)869 arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind)
870 {
871 	size_t mapbits;
872 	szind_t binind;
873 
874 	mapbits = arena_mapbits_get(chunk, pageind);
875 	binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
876 	assert(binind < NBINS || binind == BININD_INVALID);
877 	return (binind);
878 }
879 
880 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_dirty_get(const arena_chunk_t * chunk,size_t pageind)881 arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind)
882 {
883 	size_t mapbits;
884 
885 	mapbits = arena_mapbits_get(chunk, pageind);
886 	assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
887 	    (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
888 	return (mapbits & CHUNK_MAP_DIRTY);
889 }
890 
891 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unzeroed_get(const arena_chunk_t * chunk,size_t pageind)892 arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind)
893 {
894 	size_t mapbits;
895 
896 	mapbits = arena_mapbits_get(chunk, pageind);
897 	assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
898 	    (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
899 	return (mapbits & CHUNK_MAP_UNZEROED);
900 }
901 
902 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_decommitted_get(const arena_chunk_t * chunk,size_t pageind)903 arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind)
904 {
905 	size_t mapbits;
906 
907 	mapbits = arena_mapbits_get(chunk, pageind);
908 	assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
909 	    (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
910 	return (mapbits & CHUNK_MAP_DECOMMITTED);
911 }
912 
913 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_get(const arena_chunk_t * chunk,size_t pageind)914 arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind)
915 {
916 	size_t mapbits;
917 
918 	mapbits = arena_mapbits_get(chunk, pageind);
919 	return (mapbits & CHUNK_MAP_LARGE);
920 }
921 
922 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_allocated_get(const arena_chunk_t * chunk,size_t pageind)923 arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind)
924 {
925 	size_t mapbits;
926 
927 	mapbits = arena_mapbits_get(chunk, pageind);
928 	return (mapbits & CHUNK_MAP_ALLOCATED);
929 }
930 
931 JEMALLOC_ALWAYS_INLINE void
arena_mapbitsp_write(size_t * mapbitsp,size_t mapbits)932 arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
933 {
934 
935 	*mapbitsp = mapbits;
936 }
937 
938 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_size_encode(size_t size)939 arena_mapbits_size_encode(size_t size)
940 {
941 	size_t mapbits;
942 
943 #if CHUNK_MAP_SIZE_SHIFT > 0
944 	mapbits = size << CHUNK_MAP_SIZE_SHIFT;
945 #elif CHUNK_MAP_SIZE_SHIFT == 0
946 	mapbits = size;
947 #else
948 	mapbits = size >> -CHUNK_MAP_SIZE_SHIFT;
949 #endif
950 
951 	assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0);
952 	return (mapbits);
953 }
954 
955 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t * chunk,size_t pageind,size_t size,size_t flags)956 arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
957     size_t flags)
958 {
959 	size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
960 
961 	assert((size & PAGE_MASK) == 0);
962 	assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
963 	assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
964 	    (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
965 	arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
966 	    CHUNK_MAP_BININD_INVALID | flags);
967 }
968 
969 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_size_set(arena_chunk_t * chunk,size_t pageind,size_t size)970 arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
971     size_t size)
972 {
973 	size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
974 	size_t mapbits = arena_mapbitsp_read(mapbitsp);
975 
976 	assert((size & PAGE_MASK) == 0);
977 	assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
978 	arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
979 	    (mapbits & ~CHUNK_MAP_SIZE_MASK));
980 }
981 
982 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_internal_set(arena_chunk_t * chunk,size_t pageind,size_t flags)983 arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
984 {
985 	size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
986 
987 	assert((flags & CHUNK_MAP_UNZEROED) == flags);
988 	arena_mapbitsp_write(mapbitsp, flags);
989 }
990 
991 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set(arena_chunk_t * chunk,size_t pageind,size_t size,size_t flags)992 arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
993     size_t flags)
994 {
995 	size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
996 
997 	assert((size & PAGE_MASK) == 0);
998 	assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
999 	assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
1000 	    (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
1001 	arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
1002 	    CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
1003 	    CHUNK_MAP_ALLOCATED);
1004 }
1005 
1006 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t * chunk,size_t pageind,szind_t binind)1007 arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
1008     szind_t binind)
1009 {
1010 	size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
1011 	size_t mapbits = arena_mapbitsp_read(mapbitsp);
1012 
1013 	assert(binind <= BININD_INVALID);
1014 	assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS +
1015 	    large_pad);
1016 	arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
1017 	    (binind << CHUNK_MAP_BININD_SHIFT));
1018 }
1019 
1020 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t * chunk,size_t pageind,size_t runind,szind_t binind,size_t flags)1021 arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
1022     szind_t binind, size_t flags)
1023 {
1024 	size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
1025 
1026 	assert(binind < BININD_INVALID);
1027 	assert(pageind - runind >= map_bias);
1028 	assert((flags & CHUNK_MAP_UNZEROED) == flags);
1029 	arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) |
1030 	    (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED);
1031 }
1032 
1033 JEMALLOC_INLINE void
arena_metadata_allocated_add(arena_t * arena,size_t size)1034 arena_metadata_allocated_add(arena_t *arena, size_t size)
1035 {
1036 
1037 	atomic_add_z(&arena->stats.metadata_allocated, size);
1038 }
1039 
1040 JEMALLOC_INLINE void
arena_metadata_allocated_sub(arena_t * arena,size_t size)1041 arena_metadata_allocated_sub(arena_t *arena, size_t size)
1042 {
1043 
1044 	atomic_sub_z(&arena->stats.metadata_allocated, size);
1045 }
1046 
1047 JEMALLOC_INLINE size_t
arena_metadata_allocated_get(arena_t * arena)1048 arena_metadata_allocated_get(arena_t *arena)
1049 {
1050 
1051 	return (atomic_read_z(&arena->stats.metadata_allocated));
1052 }
1053 
1054 JEMALLOC_INLINE bool
arena_prof_accum_impl(arena_t * arena,uint64_t accumbytes)1055 arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
1056 {
1057 
1058 	cassert(config_prof);
1059 	assert(prof_interval != 0);
1060 
1061 	arena->prof_accumbytes += accumbytes;
1062 	if (arena->prof_accumbytes >= prof_interval) {
1063 		arena->prof_accumbytes -= prof_interval;
1064 		return (true);
1065 	}
1066 	return (false);
1067 }
1068 
1069 JEMALLOC_INLINE bool
arena_prof_accum_locked(arena_t * arena,uint64_t accumbytes)1070 arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
1071 {
1072 
1073 	cassert(config_prof);
1074 
1075 	if (likely(prof_interval == 0))
1076 		return (false);
1077 	return (arena_prof_accum_impl(arena, accumbytes));
1078 }
1079 
1080 JEMALLOC_INLINE bool
arena_prof_accum(tsdn_t * tsdn,arena_t * arena,uint64_t accumbytes)1081 arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
1082 {
1083 
1084 	cassert(config_prof);
1085 
1086 	if (likely(prof_interval == 0))
1087 		return (false);
1088 
1089 	{
1090 		bool ret;
1091 
1092 		malloc_mutex_lock(tsdn, &arena->lock);
1093 		ret = arena_prof_accum_impl(arena, accumbytes);
1094 		malloc_mutex_unlock(tsdn, &arena->lock);
1095 		return (ret);
1096 	}
1097 }
1098 
1099 JEMALLOC_ALWAYS_INLINE szind_t
arena_ptr_small_binind_get(const void * ptr,size_t mapbits)1100 arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
1101 {
1102 	szind_t binind;
1103 
1104 	binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
1105 
1106 	if (config_debug) {
1107 		arena_chunk_t *chunk;
1108 		arena_t *arena;
1109 		size_t pageind;
1110 		size_t actual_mapbits;
1111 		size_t rpages_ind;
1112 		const arena_run_t *run;
1113 		arena_bin_t *bin;
1114 		szind_t run_binind, actual_binind;
1115 		arena_bin_info_t *bin_info;
1116 		const arena_chunk_map_misc_t *miscelm;
1117 		const void *rpages;
1118 
1119 		assert(binind != BININD_INVALID);
1120 		assert(binind < NBINS);
1121 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1122 		arena = extent_node_arena_get(&chunk->node);
1123 		pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1124 		actual_mapbits = arena_mapbits_get(chunk, pageind);
1125 		assert(mapbits == actual_mapbits);
1126 		assert(arena_mapbits_large_get(chunk, pageind) == 0);
1127 		assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1128 		rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
1129 		    pageind);
1130 		miscelm = arena_miscelm_get_const(chunk, rpages_ind);
1131 		run = &miscelm->run;
1132 		run_binind = run->binind;
1133 		bin = &arena->bins[run_binind];
1134 		actual_binind = (szind_t)(bin - arena->bins);
1135 		assert(run_binind == actual_binind);
1136 		bin_info = &arena_bin_info[actual_binind];
1137 		rpages = arena_miscelm_to_rpages(miscelm);
1138 		assert(((uintptr_t)ptr - ((uintptr_t)rpages +
1139 		    (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
1140 		    == 0);
1141 	}
1142 
1143 	return (binind);
1144 }
1145 #  endif /* JEMALLOC_ARENA_INLINE_A */
1146 
1147 #  ifdef JEMALLOC_ARENA_INLINE_B
1148 JEMALLOC_INLINE szind_t
arena_bin_index(arena_t * arena,arena_bin_t * bin)1149 arena_bin_index(arena_t *arena, arena_bin_t *bin)
1150 {
1151 	szind_t binind = (szind_t)(bin - arena->bins);
1152 	assert(binind < NBINS);
1153 	return (binind);
1154 }
1155 
1156 JEMALLOC_INLINE size_t
arena_run_regind(arena_run_t * run,arena_bin_info_t * bin_info,const void * ptr)1157 arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
1158 {
1159 	size_t diff, interval, shift, regind;
1160 	arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1161 	void *rpages = arena_miscelm_to_rpages(miscelm);
1162 
1163 	/*
1164 	 * Freeing a pointer lower than region zero can cause assertion
1165 	 * failure.
1166 	 */
1167 	assert((uintptr_t)ptr >= (uintptr_t)rpages +
1168 	    (uintptr_t)bin_info->reg0_offset);
1169 
1170 	/*
1171 	 * Avoid doing division with a variable divisor if possible.  Using
1172 	 * actual division here can reduce allocator throughput by over 20%!
1173 	 */
1174 	diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages -
1175 	    bin_info->reg0_offset);
1176 
1177 	/* Rescale (factor powers of 2 out of the numerator and denominator). */
1178 	interval = bin_info->reg_interval;
1179 	shift = ffs_zu(interval) - 1;
1180 	diff >>= shift;
1181 	interval >>= shift;
1182 
1183 	if (interval == 1) {
1184 		/* The divisor was a power of 2. */
1185 		regind = diff;
1186 	} else {
1187 		/*
1188 		 * To divide by a number D that is not a power of two we
1189 		 * multiply by (2^21 / D) and then right shift by 21 positions.
1190 		 *
1191 		 *   X / D
1192 		 *
1193 		 * becomes
1194 		 *
1195 		 *   (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
1196 		 *
1197 		 * We can omit the first three elements, because we never
1198 		 * divide by 0, and 1 and 2 are both powers of two, which are
1199 		 * handled above.
1200 		 */
1201 #define	SIZE_INV_SHIFT	((sizeof(size_t) << 3) - LG_RUN_MAXREGS)
1202 #define	SIZE_INV(s)	(((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1)
1203 		static const size_t interval_invs[] = {
1204 		    SIZE_INV(3),
1205 		    SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
1206 		    SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
1207 		    SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
1208 		    SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
1209 		    SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
1210 		    SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
1211 		    SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
1212 		};
1213 
1214 		if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t))
1215 		    + 2))) {
1216 			regind = (diff * interval_invs[interval - 3]) >>
1217 			    SIZE_INV_SHIFT;
1218 		} else
1219 			regind = diff / interval;
1220 #undef SIZE_INV
1221 #undef SIZE_INV_SHIFT
1222 	}
1223 	assert(diff == regind * interval);
1224 	assert(regind < bin_info->nregs);
1225 
1226 	return (regind);
1227 }
1228 
1229 JEMALLOC_INLINE prof_tctx_t *
arena_prof_tctx_get(tsdn_t * tsdn,const void * ptr)1230 arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
1231 {
1232 	prof_tctx_t *ret;
1233 	arena_chunk_t *chunk;
1234 
1235 	cassert(config_prof);
1236 	assert(ptr != NULL);
1237 
1238 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1239 	if (likely(chunk != ptr)) {
1240 		size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1241 		size_t mapbits = arena_mapbits_get(chunk, pageind);
1242 		assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
1243 		if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
1244 			ret = (prof_tctx_t *)(uintptr_t)1U;
1245 		else {
1246 			arena_chunk_map_misc_t *elm =
1247 			    arena_miscelm_get_mutable(chunk, pageind);
1248 			ret = atomic_read_p(&elm->prof_tctx_pun);
1249 		}
1250 	} else
1251 		ret = huge_prof_tctx_get(tsdn, ptr);
1252 
1253 	return (ret);
1254 }
1255 
1256 JEMALLOC_INLINE void
arena_prof_tctx_set(tsdn_t * tsdn,const void * ptr,size_t usize,prof_tctx_t * tctx)1257 arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
1258     prof_tctx_t *tctx)
1259 {
1260 	arena_chunk_t *chunk;
1261 
1262 	cassert(config_prof);
1263 	assert(ptr != NULL);
1264 
1265 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1266 	if (likely(chunk != ptr)) {
1267 		size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1268 
1269 		assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1270 
1271 		if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx >
1272 		    (uintptr_t)1U)) {
1273 			arena_chunk_map_misc_t *elm;
1274 
1275 			assert(arena_mapbits_large_get(chunk, pageind) != 0);
1276 
1277 			elm = arena_miscelm_get_mutable(chunk, pageind);
1278 			atomic_write_p(&elm->prof_tctx_pun, tctx);
1279 		} else {
1280 			/*
1281 			 * tctx must always be initialized for large runs.
1282 			 * Assert that the surrounding conditional logic is
1283 			 * equivalent to checking whether ptr refers to a large
1284 			 * run.
1285 			 */
1286 			assert(arena_mapbits_large_get(chunk, pageind) == 0);
1287 		}
1288 	} else
1289 		huge_prof_tctx_set(tsdn, ptr, tctx);
1290 }
1291 
1292 JEMALLOC_INLINE void
arena_prof_tctx_reset(tsdn_t * tsdn,const void * ptr,size_t usize,const void * old_ptr,prof_tctx_t * old_tctx)1293 arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
1294     const void *old_ptr, prof_tctx_t *old_tctx)
1295 {
1296 
1297 	cassert(config_prof);
1298 	assert(ptr != NULL);
1299 
1300 	if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr &&
1301 	    (uintptr_t)old_tctx > (uintptr_t)1U))) {
1302 		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1303 		if (likely(chunk != ptr)) {
1304 			size_t pageind;
1305 			arena_chunk_map_misc_t *elm;
1306 
1307 			pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
1308 			    LG_PAGE;
1309 			assert(arena_mapbits_allocated_get(chunk, pageind) !=
1310 			    0);
1311 			assert(arena_mapbits_large_get(chunk, pageind) != 0);
1312 
1313 			elm = arena_miscelm_get_mutable(chunk, pageind);
1314 			atomic_write_p(&elm->prof_tctx_pun,
1315 			    (prof_tctx_t *)(uintptr_t)1U);
1316 		} else
1317 			huge_prof_tctx_reset(tsdn, ptr);
1318 	}
1319 }
1320 
1321 JEMALLOC_ALWAYS_INLINE void
arena_decay_ticks(tsdn_t * tsdn,arena_t * arena,unsigned nticks)1322 arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks)
1323 {
1324 	tsd_t *tsd;
1325 	ticker_t *decay_ticker;
1326 
1327 	if (unlikely(tsdn_null(tsdn)))
1328 		return;
1329 	tsd = tsdn_tsd(tsdn);
1330 	decay_ticker = decay_ticker_get(tsd, arena->ind);
1331 	if (unlikely(decay_ticker == NULL))
1332 		return;
1333 	if (unlikely(ticker_ticks(decay_ticker, nticks)))
1334 		arena_purge(tsdn, arena, false);
1335 }
1336 
1337 JEMALLOC_ALWAYS_INLINE void
arena_decay_tick(tsdn_t * tsdn,arena_t * arena)1338 arena_decay_tick(tsdn_t *tsdn, arena_t *arena)
1339 {
1340 
1341 	arena_decay_ticks(tsdn, arena, 1);
1342 }
1343 
1344 JEMALLOC_ALWAYS_INLINE void *
arena_malloc(tsdn_t * tsdn,arena_t * arena,size_t size,szind_t ind,bool zero,tcache_t * tcache,bool slow_path)1345 arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
1346     tcache_t *tcache, bool slow_path)
1347 {
1348 
1349 	assert(!tsdn_null(tsdn) || tcache == NULL);
1350 	assert(size != 0);
1351 
1352 	if (likely(tcache != NULL)) {
1353 		if (likely(size <= SMALL_MAXCLASS)) {
1354 			return (tcache_alloc_small(tsdn_tsd(tsdn), arena,
1355 			    tcache, size, ind, zero, slow_path));
1356 		}
1357 		if (likely(size <= tcache_maxclass)) {
1358 			return (tcache_alloc_large(tsdn_tsd(tsdn), arena,
1359 			    tcache, size, ind, zero, slow_path));
1360 		}
1361 		/* (size > tcache_maxclass) case falls through. */
1362 		assert(size > tcache_maxclass);
1363 	}
1364 
1365 	return (arena_malloc_hard(tsdn, arena, size, ind, zero));
1366 }
1367 
1368 JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(const void * ptr)1369 arena_aalloc(const void *ptr)
1370 {
1371 	arena_chunk_t *chunk;
1372 
1373 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1374 	if (likely(chunk != ptr))
1375 		return (extent_node_arena_get(&chunk->node));
1376 	else
1377 		return (huge_aalloc(ptr));
1378 }
1379 
1380 /* Return the size of the allocation pointed to by ptr. */
1381 JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(tsdn_t * tsdn,const void * ptr,bool demote)1382 arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote)
1383 {
1384 	size_t ret;
1385 	arena_chunk_t *chunk;
1386 	size_t pageind;
1387 	szind_t binind;
1388 
1389 	assert(ptr != NULL);
1390 
1391 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1392 	if (likely(chunk != ptr)) {
1393 		pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1394 		assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1395 		binind = arena_mapbits_binind_get(chunk, pageind);
1396 		if (unlikely(binind == BININD_INVALID || (config_prof && !demote
1397 		    && arena_mapbits_large_get(chunk, pageind) != 0))) {
1398 			/*
1399 			 * Large allocation.  In the common case (demote), and
1400 			 * as this is an inline function, most callers will only
1401 			 * end up looking at binind to determine that ptr is a
1402 			 * small allocation.
1403 			 */
1404 			assert(config_cache_oblivious || ((uintptr_t)ptr &
1405 			    PAGE_MASK) == 0);
1406 			ret = arena_mapbits_large_size_get(chunk, pageind) -
1407 			    large_pad;
1408 			assert(ret != 0);
1409 			assert(pageind + ((ret+large_pad)>>LG_PAGE) <=
1410 			    chunk_npages);
1411 			assert(arena_mapbits_dirty_get(chunk, pageind) ==
1412 			    arena_mapbits_dirty_get(chunk,
1413 			    pageind+((ret+large_pad)>>LG_PAGE)-1));
1414 		} else {
1415 			/*
1416 			 * Small allocation (possibly promoted to a large
1417 			 * object).
1418 			 */
1419 			assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
1420 			    arena_ptr_small_binind_get(ptr,
1421 			    arena_mapbits_get(chunk, pageind)) == binind);
1422 			ret = index2size(binind);
1423 		}
1424 	} else
1425 		ret = huge_salloc(tsdn, ptr);
1426 
1427 	return (ret);
1428 }
1429 
1430 JEMALLOC_ALWAYS_INLINE void
arena_dalloc(tsdn_t * tsdn,void * ptr,tcache_t * tcache,bool slow_path)1431 arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
1432 {
1433 	arena_chunk_t *chunk;
1434 	size_t pageind, mapbits;
1435 
1436 	assert(!tsdn_null(tsdn) || tcache == NULL);
1437 	assert(ptr != NULL);
1438 
1439 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1440 	if (likely(chunk != ptr)) {
1441 		pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1442 #if defined(__ANDROID__)
1443 		/* Verify the ptr is actually in the chunk. */
1444 		if (unlikely(pageind < map_bias || pageind >= chunk_npages)) {
1445 		    async_safe_fatal("Invalid address %p passed to free: invalid page index", ptr);
1446 		}
1447 #endif
1448 		mapbits = arena_mapbits_get(chunk, pageind);
1449 		assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1450 #if defined(__ANDROID__)
1451 		/* Verify the ptr has been allocated. */
1452 		if (unlikely((mapbits & CHUNK_MAP_ALLOCATED) == 0)) {
1453 		    async_safe_fatal("Invalid address %p passed to free: value not allocated", ptr);
1454 		}
1455 #endif
1456 		if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
1457 			/* Small allocation. */
1458 			if (likely(tcache != NULL)) {
1459 				szind_t binind = arena_ptr_small_binind_get(ptr,
1460 				    mapbits);
1461 				tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
1462 				    binind, slow_path);
1463 			} else {
1464 				arena_dalloc_small(tsdn,
1465 				    extent_node_arena_get(&chunk->node), chunk,
1466 				    ptr, pageind);
1467 			}
1468 		} else {
1469 			size_t size = arena_mapbits_large_size_get(chunk,
1470 			    pageind);
1471 
1472 			assert(config_cache_oblivious || ((uintptr_t)ptr &
1473 			    PAGE_MASK) == 0);
1474 
1475 			if (likely(tcache != NULL) && size - large_pad <=
1476 			    tcache_maxclass) {
1477 				tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
1478 				    size - large_pad, slow_path);
1479 			} else {
1480 				arena_dalloc_large(tsdn,
1481 				    extent_node_arena_get(&chunk->node), chunk,
1482 				    ptr);
1483 			}
1484 		}
1485 	} else
1486 		huge_dalloc(tsdn, ptr);
1487 }
1488 
1489 JEMALLOC_ALWAYS_INLINE void
arena_sdalloc(tsdn_t * tsdn,void * ptr,size_t size,tcache_t * tcache,bool slow_path)1490 arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
1491     bool slow_path)
1492 {
1493 	arena_chunk_t *chunk;
1494 
1495 	assert(!tsdn_null(tsdn) || tcache == NULL);
1496 
1497 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1498 	if (likely(chunk != ptr)) {
1499 		if (config_prof && opt_prof) {
1500 			size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
1501 			    LG_PAGE;
1502 			assert(arena_mapbits_allocated_get(chunk, pageind) !=
1503 			    0);
1504 			if (arena_mapbits_large_get(chunk, pageind) != 0) {
1505 				/*
1506 				 * Make sure to use promoted size, not request
1507 				 * size.
1508 				 */
1509 				size = arena_mapbits_large_size_get(chunk,
1510 				    pageind) - large_pad;
1511 			}
1512 		}
1513 		assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false)));
1514 
1515 		if (likely(size <= SMALL_MAXCLASS)) {
1516 			/* Small allocation. */
1517 			if (likely(tcache != NULL)) {
1518 				szind_t binind = size2index(size);
1519 				tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
1520 				    binind, slow_path);
1521 			} else {
1522 				size_t pageind = ((uintptr_t)ptr -
1523 				    (uintptr_t)chunk) >> LG_PAGE;
1524 				arena_dalloc_small(tsdn,
1525 				    extent_node_arena_get(&chunk->node), chunk,
1526 				    ptr, pageind);
1527 			}
1528 		} else {
1529 			assert(config_cache_oblivious || ((uintptr_t)ptr &
1530 			    PAGE_MASK) == 0);
1531 
1532 			if (likely(tcache != NULL) && size <= tcache_maxclass) {
1533 				tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
1534 				    size, slow_path);
1535 			} else {
1536 				arena_dalloc_large(tsdn,
1537 				    extent_node_arena_get(&chunk->node), chunk,
1538 				    ptr);
1539 			}
1540 		}
1541 	} else
1542 		huge_dalloc(tsdn, ptr);
1543 }
1544 #  endif /* JEMALLOC_ARENA_INLINE_B */
1545 #endif
1546 
1547 #endif /* JEMALLOC_H_INLINES */
1548 /******************************************************************************/
1549