• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************/
2 #ifdef JEMALLOC_H_TYPES
3 
4 #define	LARGE_MINCLASS		(ZU(1) << LG_LARGE_MINCLASS)
5 
6 /* Maximum number of regions in one run. */
7 #define	LG_RUN_MAXREGS		(LG_PAGE - LG_TINY_MIN)
8 #define	RUN_MAXREGS		(1U << LG_RUN_MAXREGS)
9 
10 /*
11  * Minimum redzone size.  Redzones may be larger than this if necessary to
12  * preserve region alignment.
13  */
14 #define	REDZONE_MINSIZE		16
15 
16 /*
17  * The minimum ratio of active:dirty pages per arena is computed as:
18  *
19  *   (nactive >> lg_dirty_mult) >= ndirty
20  *
21  * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as
22  * many active pages as dirty pages.
23  */
24 #define	LG_DIRTY_MULT_DEFAULT	3
25 
26 typedef enum {
27 	purge_mode_ratio = 0,
28 	purge_mode_decay = 1,
29 
30 	purge_mode_limit = 2
31 } purge_mode_t;
32 /* ANDROID change */
33 /* Use the decay mode purge method. By setting the DECAY_TIME_DEFAULT to
34  * zero, this forces all pages to be purged as soon as they are not in use.
35  */
36 #define	PURGE_DEFAULT		purge_mode_decay
37 /* Default decay time in seconds. */
38 #define	DECAY_TIME_DEFAULT	0
39 /* End ANDROID change */
40 /* Number of event ticks between time checks. */
41 #define	DECAY_NTICKS_PER_UPDATE	1000
42 
43 typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
44 typedef struct arena_run_s arena_run_t;
45 typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
46 typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
47 typedef struct arena_chunk_s arena_chunk_t;
48 typedef struct arena_bin_info_s arena_bin_info_t;
49 typedef struct arena_bin_s arena_bin_t;
50 typedef struct arena_s arena_t;
51 typedef struct arena_tdata_s arena_tdata_t;
52 
53 #endif /* JEMALLOC_H_TYPES */
54 /******************************************************************************/
55 #ifdef JEMALLOC_H_STRUCTS
56 
57 #ifdef JEMALLOC_ARENA_STRUCTS_A
58 struct arena_run_s {
59 	/* Index of bin this run is associated with. */
60 	szind_t		binind;
61 
62 	/* Number of free regions in run. */
63 	unsigned	nfree;
64 
65 	/* Per region allocated/deallocated bitmap. */
66 	bitmap_t	bitmap[BITMAP_GROUPS_MAX];
67 };
68 
69 /* Each element of the chunk map corresponds to one page within the chunk. */
70 struct arena_chunk_map_bits_s {
71 	/*
72 	 * Run address (or size) and various flags are stored together.  The bit
73 	 * layout looks like (assuming 32-bit system):
74 	 *
75 	 *   ???????? ???????? ???nnnnn nnndumla
76 	 *
77 	 * ? : Unallocated: Run address for first/last pages, unset for internal
78 	 *                  pages.
79 	 *     Small: Run page offset.
80 	 *     Large: Run page count for first page, unset for trailing pages.
81 	 * n : binind for small size class, BININD_INVALID for large size class.
82 	 * d : dirty?
83 	 * u : unzeroed?
84 	 * m : decommitted?
85 	 * l : large?
86 	 * a : allocated?
87 	 *
88 	 * Following are example bit patterns for the three types of runs.
89 	 *
90 	 * p : run page offset
91 	 * s : run size
92 	 * n : binind for size class; large objects set these to BININD_INVALID
93 	 * x : don't care
94 	 * - : 0
95 	 * + : 1
96 	 * [DUMLA] : bit set
97 	 * [dumla] : bit unset
98 	 *
99 	 *   Unallocated (clean):
100 	 *     ssssssss ssssssss sss+++++ +++dum-a
101 	 *     xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx
102 	 *     ssssssss ssssssss sss+++++ +++dUm-a
103 	 *
104 	 *   Unallocated (dirty):
105 	 *     ssssssss ssssssss sss+++++ +++D-m-a
106 	 *     xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
107 	 *     ssssssss ssssssss sss+++++ +++D-m-a
108 	 *
109 	 *   Small:
110 	 *     pppppppp pppppppp pppnnnnn nnnd---A
111 	 *     pppppppp pppppppp pppnnnnn nnn----A
112 	 *     pppppppp pppppppp pppnnnnn nnnd---A
113 	 *
114 	 *   Large:
115 	 *     ssssssss ssssssss sss+++++ +++D--LA
116 	 *     xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
117 	 *     -------- -------- ---+++++ +++D--LA
118 	 *
119 	 *   Large (sampled, size <= LARGE_MINCLASS):
120 	 *     ssssssss ssssssss sssnnnnn nnnD--LA
121 	 *     xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
122 	 *     -------- -------- ---+++++ +++D--LA
123 	 *
124 	 *   Large (not sampled, size == LARGE_MINCLASS):
125 	 *     ssssssss ssssssss sss+++++ +++D--LA
126 	 *     xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
127 	 *     -------- -------- ---+++++ +++D--LA
128 	 */
129 	size_t				bits;
130 #define	CHUNK_MAP_ALLOCATED	((size_t)0x01U)
131 #define	CHUNK_MAP_LARGE		((size_t)0x02U)
132 #define	CHUNK_MAP_STATE_MASK	((size_t)0x3U)
133 
134 #define	CHUNK_MAP_DECOMMITTED	((size_t)0x04U)
135 #define	CHUNK_MAP_UNZEROED	((size_t)0x08U)
136 #define	CHUNK_MAP_DIRTY		((size_t)0x10U)
137 #define	CHUNK_MAP_FLAGS_MASK	((size_t)0x1cU)
138 
139 #define	CHUNK_MAP_BININD_SHIFT	5
140 #define	BININD_INVALID		((size_t)0xffU)
141 #define	CHUNK_MAP_BININD_MASK	(BININD_INVALID << CHUNK_MAP_BININD_SHIFT)
142 #define	CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
143 
144 #define	CHUNK_MAP_RUNIND_SHIFT	(CHUNK_MAP_BININD_SHIFT + 8)
145 #define	CHUNK_MAP_SIZE_SHIFT	(CHUNK_MAP_RUNIND_SHIFT - LG_PAGE)
146 #define	CHUNK_MAP_SIZE_MASK						\
147     (~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK))
148 };
149 
150 struct arena_runs_dirty_link_s {
151 	qr(arena_runs_dirty_link_t)	rd_link;
152 };
153 
154 /*
155  * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just
156  * like arena_chunk_map_bits_t.  Two separate arrays are stored within each
157  * chunk header in order to improve cache locality.
158  */
159 struct arena_chunk_map_misc_s {
160 	/*
161 	 * Linkage for run trees.  There are two disjoint uses:
162 	 *
163 	 * 1) arena_t's runs_avail tree.
164 	 * 2) arena_run_t conceptually uses this linkage for in-use non-full
165 	 *    runs, rather than directly embedding linkage.
166 	 */
167 	rb_node(arena_chunk_map_misc_t)		rb_link;
168 
169 	union {
170 		/* Linkage for list of dirty runs. */
171 		arena_runs_dirty_link_t		rd;
172 
173 		/* Profile counters, used for large object runs. */
174 		union {
175 			void			*prof_tctx_pun;
176 			prof_tctx_t		*prof_tctx;
177 		};
178 
179 		/* Small region run metadata. */
180 		arena_run_t			run;
181 	};
182 };
183 typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
184 #endif /* JEMALLOC_ARENA_STRUCTS_A */
185 
186 #ifdef JEMALLOC_ARENA_STRUCTS_B
187 /* Arena chunk header. */
188 struct arena_chunk_s {
189 	/*
190 	 * A pointer to the arena that owns the chunk is stored within the node.
191 	 * This field as a whole is used by chunks_rtree to support both
192 	 * ivsalloc() and core-based debugging.
193 	 */
194 	extent_node_t		node;
195 
196 	/*
197 	 * Map of pages within chunk that keeps track of free/large/small.  The
198 	 * first map_bias entries are omitted, since the chunk header does not
199 	 * need to be tracked in the map.  This omission saves a header page
200 	 * for common chunk sizes (e.g. 4 MiB).
201 	 */
202 	arena_chunk_map_bits_t	map_bits[1]; /* Dynamically sized. */
203 };
204 
205 /*
206  * Read-only information associated with each element of arena_t's bins array
207  * is stored separately, partly to reduce memory usage (only one copy, rather
208  * than one per arena), but mainly to avoid false cacheline sharing.
209  *
210  * Each run has the following layout:
211  *
212  *               /--------------------\
213  *               | pad?               |
214  *               |--------------------|
215  *               | redzone            |
216  *   reg0_offset | region 0           |
217  *               | redzone            |
218  *               |--------------------| \
219  *               | redzone            | |
220  *               | region 1           |  > reg_interval
221  *               | redzone            | /
222  *               |--------------------|
223  *               | ...                |
224  *               | ...                |
225  *               | ...                |
226  *               |--------------------|
227  *               | redzone            |
228  *               | region nregs-1     |
229  *               | redzone            |
230  *               |--------------------|
231  *               | alignment pad?     |
232  *               \--------------------/
233  *
234  * reg_interval has at least the same minimum alignment as reg_size; this
235  * preserves the alignment constraint that sa2u() depends on.  Alignment pad is
236  * either 0 or redzone_size; it is present only if needed to align reg0_offset.
237  */
238 struct arena_bin_info_s {
239 	/* Size of regions in a run for this bin's size class. */
240 	size_t			reg_size;
241 
242 	/* Redzone size. */
243 	size_t			redzone_size;
244 
245 	/* Interval between regions (reg_size + (redzone_size << 1)). */
246 	size_t			reg_interval;
247 
248 	/* Total size of a run for this bin's size class. */
249 	size_t			run_size;
250 
251 	/* Total number of regions in a run for this bin's size class. */
252 	uint32_t		nregs;
253 
254 	/*
255 	 * Metadata used to manipulate bitmaps for runs associated with this
256 	 * bin.
257 	 */
258 	bitmap_info_t		bitmap_info;
259 
260 	/* Offset of first region in a run for this bin's size class. */
261 	uint32_t		reg0_offset;
262 };
263 
264 struct arena_bin_s {
265 	/*
266 	 * All operations on runcur, runs, and stats require that lock be
267 	 * locked.  Run allocation/deallocation are protected by the arena lock,
268 	 * which may be acquired while holding one or more bin locks, but not
269 	 * vise versa.
270 	 */
271 	malloc_mutex_t		lock;
272 
273 	/*
274 	 * Current run being used to service allocations of this bin's size
275 	 * class.
276 	 */
277 	arena_run_t		*runcur;
278 
279 	/*
280 	 * Tree of non-full runs.  This tree is used when looking for an
281 	 * existing run when runcur is no longer usable.  We choose the
282 	 * non-full run that is lowest in memory; this policy tends to keep
283 	 * objects packed well, and it can also help reduce the number of
284 	 * almost-empty chunks.
285 	 */
286 	arena_run_tree_t	runs;
287 
288 	/* Bin statistics. */
289 	malloc_bin_stats_t	stats;
290 };
291 
292 struct arena_s {
293 	/* This arena's index within the arenas array. */
294 	unsigned		ind;
295 
296 	/*
297 	 * Number of threads currently assigned to this arena.  This field is
298 	 * synchronized via atomic operations.
299 	 */
300 	unsigned		nthreads;
301 
302 	/*
303 	 * There are three classes of arena operations from a locking
304 	 * perspective:
305 	 * 1) Thread assignment (modifies nthreads) is synchronized via atomics.
306 	 * 2) Bin-related operations are protected by bin locks.
307 	 * 3) Chunk- and run-related operations are protected by this mutex.
308 	 */
309 	malloc_mutex_t		lock;
310 
311 	arena_stats_t		stats;
312 	/*
313 	 * List of tcaches for extant threads associated with this arena.
314 	 * Stats from these are merged incrementally, and at exit if
315 	 * opt_stats_print is enabled.
316 	 */
317 	ql_head(tcache_t)	tcache_ql;
318 
319 	uint64_t		prof_accumbytes;
320 
321 	/*
322 	 * PRNG state for cache index randomization of large allocation base
323 	 * pointers.
324 	 */
325 	uint64_t		offset_state;
326 
327 	dss_prec_t		dss_prec;
328 
329 	/*
330 	 * In order to avoid rapid chunk allocation/deallocation when an arena
331 	 * oscillates right on the cusp of needing a new chunk, cache the most
332 	 * recently freed chunk.  The spare is left in the arena's chunk trees
333 	 * until it is deleted.
334 	 *
335 	 * There is one spare chunk per arena, rather than one spare total, in
336 	 * order to avoid interactions between multiple threads that could make
337 	 * a single spare inadequate.
338 	 */
339 	arena_chunk_t		*spare;
340 
341 	/* Minimum ratio (log base 2) of nactive:ndirty. */
342 	ssize_t			lg_dirty_mult;
343 
344 	/* True if a thread is currently executing arena_purge_to_limit(). */
345 	bool			purging;
346 
347 	/* Number of pages in active runs and huge regions. */
348 	size_t			nactive;
349 
350 	/*
351 	 * Current count of pages within unused runs that are potentially
352 	 * dirty, and for which madvise(... MADV_DONTNEED) has not been called.
353 	 * By tracking this, we can institute a limit on how much dirty unused
354 	 * memory is mapped for each arena.
355 	 */
356 	size_t			ndirty;
357 
358 	/*
359 	 * Unused dirty memory this arena manages.  Dirty memory is conceptually
360 	 * tracked as an arbitrarily interleaved LRU of dirty runs and cached
361 	 * chunks, but the list linkage is actually semi-duplicated in order to
362 	 * avoid extra arena_chunk_map_misc_t space overhead.
363 	 *
364 	 *   LRU-----------------------------------------------------------MRU
365 	 *
366 	 *        /-- arena ---\
367 	 *        |            |
368 	 *        |            |
369 	 *        |------------|                             /- chunk -\
370 	 *   ...->|chunks_cache|<--------------------------->|  /----\ |<--...
371 	 *        |------------|                             |  |node| |
372 	 *        |            |                             |  |    | |
373 	 *        |            |    /- run -\    /- run -\   |  |    | |
374 	 *        |            |    |       |    |       |   |  |    | |
375 	 *        |            |    |       |    |       |   |  |    | |
376 	 *        |------------|    |-------|    |-------|   |  |----| |
377 	 *   ...->|runs_dirty  |<-->|rd     |<-->|rd     |<---->|rd  |<----...
378 	 *        |------------|    |-------|    |-------|   |  |----| |
379 	 *        |            |    |       |    |       |   |  |    | |
380 	 *        |            |    |       |    |       |   |  \----/ |
381 	 *        |            |    \-------/    \-------/   |         |
382 	 *        |            |                             |         |
383 	 *        |            |                             |         |
384 	 *        \------------/                             \---------/
385 	 */
386 	arena_runs_dirty_link_t	runs_dirty;
387 	extent_node_t		chunks_cache;
388 
389 	/*
390 	 * Approximate time in seconds from the creation of a set of unused
391 	 * dirty pages until an equivalent set of unused dirty pages is purged
392 	 * and/or reused.
393 	 */
394 	ssize_t			decay_time;
395 	/* decay_time / SMOOTHSTEP_NSTEPS. */
396 	nstime_t		decay_interval;
397 	/*
398 	 * Time at which the current decay interval logically started.  We do
399 	 * not actually advance to a new epoch until sometime after it starts
400 	 * because of scheduling and computation delays, and it is even possible
401 	 * to completely skip epochs.  In all cases, during epoch advancement we
402 	 * merge all relevant activity into the most recently recorded epoch.
403 	 */
404 	nstime_t		decay_epoch;
405 	/* decay_deadline randomness generator. */
406 	uint64_t		decay_jitter_state;
407 	/*
408 	 * Deadline for current epoch.  This is the sum of decay_interval and
409 	 * per epoch jitter which is a uniform random variable in
410 	 * [0..decay_interval).  Epochs always advance by precise multiples of
411 	 * decay_interval, but we randomize the deadline to reduce the
412 	 * likelihood of arenas purging in lockstep.
413 	 */
414 	nstime_t		decay_deadline;
415 	/*
416 	 * Number of dirty pages at beginning of current epoch.  During epoch
417 	 * advancement we use the delta between decay_ndirty and ndirty to
418 	 * determine how many dirty pages, if any, were generated, and record
419 	 * the result in decay_backlog.
420 	 */
421 	size_t			decay_ndirty;
422 	/*
423 	 * Memoized result of arena_decay_backlog_npages_limit() corresponding
424 	 * to the current contents of decay_backlog, i.e. the limit on how many
425 	 * pages are allowed to exist for the decay epochs.
426 	 */
427 	size_t			decay_backlog_npages_limit;
428 	/*
429 	 * Trailing log of how many unused dirty pages were generated during
430 	 * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
431 	 * element is the most recent epoch.  Corresponding epoch times are
432 	 * relative to decay_epoch.
433 	 */
434 	size_t			decay_backlog[SMOOTHSTEP_NSTEPS];
435 
436 	/* Extant huge allocations. */
437 	ql_head(extent_node_t)	huge;
438 	/* Synchronizes all huge allocation/update/deallocation. */
439 	malloc_mutex_t		huge_mtx;
440 
441 	/*
442 	 * Trees of chunks that were previously allocated (trees differ only in
443 	 * node ordering).  These are used when allocating chunks, in an attempt
444 	 * to re-use address space.  Depending on function, different tree
445 	 * orderings are needed, which is why there are two trees with the same
446 	 * contents.
447 	 */
448 	extent_tree_t		chunks_szad_cached;
449 	extent_tree_t		chunks_ad_cached;
450 	extent_tree_t		chunks_szad_retained;
451 	extent_tree_t		chunks_ad_retained;
452 
453 	malloc_mutex_t		chunks_mtx;
454 	/* Cache of nodes that were allocated via base_alloc(). */
455 	ql_head(extent_node_t)	node_cache;
456 	malloc_mutex_t		node_cache_mtx;
457 
458 	/* User-configurable chunk hook functions. */
459 	chunk_hooks_t		chunk_hooks;
460 
461 	/* bins is used to store trees of free regions. */
462 	arena_bin_t		bins[NBINS];
463 
464 	/*
465 	 * Quantized address-ordered trees of this arena's available runs.  The
466 	 * trees are used for first-best-fit run allocation.
467 	 */
468 	arena_run_tree_t	runs_avail[1]; /* Dynamically sized. */
469 };
470 
471 /* Used in conjunction with tsd for fast arena-related context lookup. */
472 struct arena_tdata_s {
473 	ticker_t		decay_ticker;
474 };
475 #endif /* JEMALLOC_ARENA_STRUCTS_B */
476 
477 #endif /* JEMALLOC_H_STRUCTS */
478 /******************************************************************************/
479 #ifdef JEMALLOC_H_EXTERNS
480 
481 static const size_t	large_pad =
482 #ifdef JEMALLOC_CACHE_OBLIVIOUS
483     PAGE
484 #else
485     0
486 #endif
487     ;
488 
489 extern purge_mode_t	opt_purge;
490 extern const char	*purge_mode_names[];
491 extern ssize_t		opt_lg_dirty_mult;
492 extern ssize_t		opt_decay_time;
493 
494 extern arena_bin_info_t	arena_bin_info[NBINS];
495 
496 extern size_t		map_bias; /* Number of arena chunk header pages. */
497 extern size_t		map_misc_offset;
498 extern size_t		arena_maxrun; /* Max run size for arenas. */
499 extern size_t		large_maxclass; /* Max large size class. */
500 extern size_t		run_quantize_max; /* Max run_quantize_*() input. */
501 extern unsigned		nlclasses; /* Number of large size classes. */
502 extern unsigned		nhclasses; /* Number of huge size classes. */
503 
504 #ifdef JEMALLOC_JET
505 typedef size_t (run_quantize_t)(size_t);
506 extern run_quantize_t *run_quantize_floor;
507 extern run_quantize_t *run_quantize_ceil;
508 #endif
509 void	arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
510     bool cache);
511 void	arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
512     bool cache);
513 extent_node_t	*arena_node_alloc(arena_t *arena);
514 void	arena_node_dalloc(arena_t *arena, extent_node_t *node);
515 void	*arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
516     bool *zero);
517 void	arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
518 void	arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk,
519     size_t oldsize, size_t usize);
520 void	arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
521     size_t oldsize, size_t usize);
522 bool	arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
523     size_t oldsize, size_t usize, bool *zero);
524 ssize_t	arena_lg_dirty_mult_get(arena_t *arena);
525 bool	arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
526 ssize_t	arena_decay_time_get(arena_t *arena);
527 bool	arena_decay_time_set(arena_t *arena, ssize_t decay_time);
528 void	arena_maybe_purge(arena_t *arena);
529 void	arena_purge(arena_t *arena, bool all);
530 void	arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
531     szind_t binind, uint64_t prof_accumbytes);
532 void	arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
533     bool zero);
534 #ifdef JEMALLOC_JET
535 typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
536     uint8_t);
537 extern arena_redzone_corruption_t *arena_redzone_corruption;
538 typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
539 extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
540 #else
541 void	arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
542 #endif
543 void	arena_quarantine_junk_small(void *ptr, size_t usize);
544 void	*arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t ind, bool zero);
545 void	*arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
546     bool zero, tcache_t *tcache);
547 void	*arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
548     size_t alignment, bool zero, tcache_t *tcache);
549 void	arena_prof_promoted(const void *ptr, size_t size);
550 void	arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
551     void *ptr, arena_chunk_map_bits_t *bitselm);
552 void	arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
553     size_t pageind, arena_chunk_map_bits_t *bitselm);
554 void	arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
555     void *ptr, size_t pageind);
556 #ifdef JEMALLOC_JET
557 typedef void (arena_dalloc_junk_large_t)(void *, size_t);
558 extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
559 #else
560 void	arena_dalloc_junk_large(void *ptr, size_t usize);
561 #endif
562 void	arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
563     void *ptr);
564 void	arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
565     void *ptr);
566 #ifdef JEMALLOC_JET
567 typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
568 extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
569 #endif
570 bool	arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
571     size_t extra, bool zero);
572 void	*arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
573     size_t size, size_t alignment, bool zero, tcache_t *tcache);
574 dss_prec_t	arena_dss_prec_get(arena_t *arena);
575 bool	arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
576 ssize_t	arena_lg_dirty_mult_default_get(void);
577 bool	arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
578 ssize_t	arena_decay_time_default_get(void);
579 bool	arena_decay_time_default_set(ssize_t decay_time);
580 void	arena_basic_stats_merge(arena_t *arena, unsigned *nthreads,
581     const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
582     size_t *nactive, size_t *ndirty);
583 void	arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
584     ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
585     size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
586     malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
587 unsigned	arena_nthreads_get(arena_t *arena);
588 void	arena_nthreads_inc(arena_t *arena);
589 void	arena_nthreads_dec(arena_t *arena);
590 arena_t	*arena_new(unsigned ind);
591 bool	arena_boot(void);
592 void	arena_prefork(arena_t *arena);
593 void	arena_postfork_parent(arena_t *arena);
594 void	arena_postfork_child(arena_t *arena);
595 
596 #endif /* JEMALLOC_H_EXTERNS */
597 /******************************************************************************/
598 #ifdef JEMALLOC_H_INLINES
599 
600 #ifndef JEMALLOC_ENABLE_INLINE
601 arena_chunk_map_bits_t	*arena_bitselm_get(arena_chunk_t *chunk,
602     size_t pageind);
603 arena_chunk_map_misc_t	*arena_miscelm_get(arena_chunk_t *chunk,
604     size_t pageind);
605 size_t	arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm);
606 void	*arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm);
607 arena_chunk_map_misc_t	*arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
608 arena_chunk_map_misc_t	*arena_run_to_miscelm(arena_run_t *run);
609 size_t	*arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
610 size_t	arena_mapbitsp_read(size_t *mapbitsp);
611 size_t	arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
612 size_t	arena_mapbits_size_decode(size_t mapbits);
613 size_t	arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
614     size_t pageind);
615 size_t	arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
616 size_t	arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
617 szind_t	arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
618 size_t	arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
619 size_t	arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
620 size_t	arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
621 size_t	arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
622 size_t	arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
623 void	arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
624 size_t	arena_mapbits_size_encode(size_t size);
625 void	arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
626     size_t size, size_t flags);
627 void	arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
628     size_t size);
629 void	arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
630     size_t flags);
631 void	arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
632     size_t size, size_t flags);
633 void	arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
634     szind_t binind);
635 void	arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
636     size_t runind, szind_t binind, size_t flags);
637 void	arena_metadata_allocated_add(arena_t *arena, size_t size);
638 void	arena_metadata_allocated_sub(arena_t *arena, size_t size);
639 size_t	arena_metadata_allocated_get(arena_t *arena);
640 bool	arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
641 bool	arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
642 bool	arena_prof_accum(arena_t *arena, uint64_t accumbytes);
643 szind_t	arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
644 szind_t	arena_bin_index(arena_t *arena, arena_bin_t *bin);
645 size_t	arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
646     const void *ptr);
647 prof_tctx_t	*arena_prof_tctx_get(const void *ptr);
648 void	arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
649 void	arena_prof_tctx_reset(const void *ptr, size_t usize,
650     const void *old_ptr, prof_tctx_t *old_tctx);
651 void	arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks);
652 void	arena_decay_tick(tsd_t *tsd, arena_t *arena);
653 void	*arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
654     bool zero, tcache_t *tcache, bool slow_path);
655 arena_t	*arena_aalloc(const void *ptr);
656 size_t	arena_salloc(const void *ptr, bool demote);
657 void	arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
658 void	arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
659 #endif
660 
661 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
662 #  ifdef JEMALLOC_ARENA_INLINE_A
663 JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
arena_bitselm_get(arena_chunk_t * chunk,size_t pageind)664 arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
665 {
666 
667 	assert(pageind >= map_bias);
668 	assert(pageind < chunk_npages);
669 
670 	return (&chunk->map_bits[pageind-map_bias]);
671 }
672 
673 JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_miscelm_get(arena_chunk_t * chunk,size_t pageind)674 arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
675 {
676 
677 	assert(pageind >= map_bias);
678 	assert(pageind < chunk_npages);
679 
680 	return ((arena_chunk_map_misc_t *)((uintptr_t)chunk +
681 	    (uintptr_t)map_misc_offset) + pageind-map_bias);
682 }
683 
684 JEMALLOC_ALWAYS_INLINE size_t
arena_miscelm_to_pageind(const arena_chunk_map_misc_t * miscelm)685 arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
686 {
687 	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
688 	size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
689 	    map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias;
690 
691 	assert(pageind >= map_bias);
692 	assert(pageind < chunk_npages);
693 
694 	return (pageind);
695 }
696 
697 JEMALLOC_ALWAYS_INLINE void *
arena_miscelm_to_rpages(arena_chunk_map_misc_t * miscelm)698 arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm)
699 {
700 	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
701 	size_t pageind = arena_miscelm_to_pageind(miscelm);
702 
703 	return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE)));
704 }
705 
706 JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_rd_to_miscelm(arena_runs_dirty_link_t * rd)707 arena_rd_to_miscelm(arena_runs_dirty_link_t *rd)
708 {
709 	arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
710 	    *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd));
711 
712 	assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
713 	assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
714 
715 	return (miscelm);
716 }
717 
718 JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_run_to_miscelm(arena_run_t * run)719 arena_run_to_miscelm(arena_run_t *run)
720 {
721 	arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
722 	    *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run));
723 
724 	assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
725 	assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
726 
727 	return (miscelm);
728 }
729 
730 JEMALLOC_ALWAYS_INLINE size_t *
arena_mapbitsp_get(arena_chunk_t * chunk,size_t pageind)731 arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
732 {
733 
734 	return (&arena_bitselm_get(chunk, pageind)->bits);
735 }
736 
737 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbitsp_read(size_t * mapbitsp)738 arena_mapbitsp_read(size_t *mapbitsp)
739 {
740 
741 	return (*mapbitsp);
742 }
743 
744 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_get(arena_chunk_t * chunk,size_t pageind)745 arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
746 {
747 
748 	return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
749 }
750 
751 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_size_decode(size_t mapbits)752 arena_mapbits_size_decode(size_t mapbits)
753 {
754 	size_t size;
755 
756 #if CHUNK_MAP_SIZE_SHIFT > 0
757 	size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT;
758 #elif CHUNK_MAP_SIZE_SHIFT == 0
759 	size = mapbits & CHUNK_MAP_SIZE_MASK;
760 #else
761 	size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT;
762 #endif
763 
764 	return (size);
765 }
766 
767 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unallocated_size_get(arena_chunk_t * chunk,size_t pageind)768 arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
769 {
770 	size_t mapbits;
771 
772 	mapbits = arena_mapbits_get(chunk, pageind);
773 	assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
774 	return (arena_mapbits_size_decode(mapbits));
775 }
776 
777 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_size_get(arena_chunk_t * chunk,size_t pageind)778 arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
779 {
780 	size_t mapbits;
781 
782 	mapbits = arena_mapbits_get(chunk, pageind);
783 	assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
784 	    (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
785 	return (arena_mapbits_size_decode(mapbits));
786 }
787 
788 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_small_runind_get(arena_chunk_t * chunk,size_t pageind)789 arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
790 {
791 	size_t mapbits;
792 
793 	mapbits = arena_mapbits_get(chunk, pageind);
794 	assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
795 	    CHUNK_MAP_ALLOCATED);
796 	return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
797 }
798 
799 JEMALLOC_ALWAYS_INLINE szind_t
arena_mapbits_binind_get(arena_chunk_t * chunk,size_t pageind)800 arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
801 {
802 	size_t mapbits;
803 	szind_t binind;
804 
805 	mapbits = arena_mapbits_get(chunk, pageind);
806 	binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
807 	assert(binind < NBINS || binind == BININD_INVALID);
808 	return (binind);
809 }
810 
811 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_dirty_get(arena_chunk_t * chunk,size_t pageind)812 arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
813 {
814 	size_t mapbits;
815 
816 	mapbits = arena_mapbits_get(chunk, pageind);
817 	assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
818 	    (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
819 	return (mapbits & CHUNK_MAP_DIRTY);
820 }
821 
822 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unzeroed_get(arena_chunk_t * chunk,size_t pageind)823 arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
824 {
825 	size_t mapbits;
826 
827 	mapbits = arena_mapbits_get(chunk, pageind);
828 	assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
829 	    (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
830 	return (mapbits & CHUNK_MAP_UNZEROED);
831 }
832 
833 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_decommitted_get(arena_chunk_t * chunk,size_t pageind)834 arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind)
835 {
836 	size_t mapbits;
837 
838 	mapbits = arena_mapbits_get(chunk, pageind);
839 	assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
840 	    (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
841 	return (mapbits & CHUNK_MAP_DECOMMITTED);
842 }
843 
844 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_get(arena_chunk_t * chunk,size_t pageind)845 arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
846 {
847 	size_t mapbits;
848 
849 	mapbits = arena_mapbits_get(chunk, pageind);
850 	return (mapbits & CHUNK_MAP_LARGE);
851 }
852 
853 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_allocated_get(arena_chunk_t * chunk,size_t pageind)854 arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
855 {
856 	size_t mapbits;
857 
858 	mapbits = arena_mapbits_get(chunk, pageind);
859 	return (mapbits & CHUNK_MAP_ALLOCATED);
860 }
861 
862 JEMALLOC_ALWAYS_INLINE void
arena_mapbitsp_write(size_t * mapbitsp,size_t mapbits)863 arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
864 {
865 
866 	*mapbitsp = mapbits;
867 }
868 
869 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_size_encode(size_t size)870 arena_mapbits_size_encode(size_t size)
871 {
872 	size_t mapbits;
873 
874 #if CHUNK_MAP_SIZE_SHIFT > 0
875 	mapbits = size << CHUNK_MAP_SIZE_SHIFT;
876 #elif CHUNK_MAP_SIZE_SHIFT == 0
877 	mapbits = size;
878 #else
879 	mapbits = size >> -CHUNK_MAP_SIZE_SHIFT;
880 #endif
881 
882 	assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0);
883 	return (mapbits);
884 }
885 
886 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t * chunk,size_t pageind,size_t size,size_t flags)887 arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
888     size_t flags)
889 {
890 	size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
891 
892 	assert((size & PAGE_MASK) == 0);
893 	assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
894 	assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
895 	    (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
896 	arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
897 	    CHUNK_MAP_BININD_INVALID | flags);
898 }
899 
900 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_size_set(arena_chunk_t * chunk,size_t pageind,size_t size)901 arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
902     size_t size)
903 {
904 	size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
905 	size_t mapbits = arena_mapbitsp_read(mapbitsp);
906 
907 	assert((size & PAGE_MASK) == 0);
908 	assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
909 	arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
910 	    (mapbits & ~CHUNK_MAP_SIZE_MASK));
911 }
912 
913 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_internal_set(arena_chunk_t * chunk,size_t pageind,size_t flags)914 arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
915 {
916 	size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
917 
918 	assert((flags & CHUNK_MAP_UNZEROED) == flags);
919 	arena_mapbitsp_write(mapbitsp, flags);
920 }
921 
922 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set(arena_chunk_t * chunk,size_t pageind,size_t size,size_t flags)923 arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
924     size_t flags)
925 {
926 	size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
927 
928 	assert((size & PAGE_MASK) == 0);
929 	assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
930 	assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
931 	    (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
932 	arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
933 	    CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
934 	    CHUNK_MAP_ALLOCATED);
935 }
936 
937 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t * chunk,size_t pageind,szind_t binind)938 arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
939     szind_t binind)
940 {
941 	size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
942 	size_t mapbits = arena_mapbitsp_read(mapbitsp);
943 
944 	assert(binind <= BININD_INVALID);
945 	assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS +
946 	    large_pad);
947 	arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
948 	    (binind << CHUNK_MAP_BININD_SHIFT));
949 }
950 
951 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t * chunk,size_t pageind,size_t runind,szind_t binind,size_t flags)952 arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
953     szind_t binind, size_t flags)
954 {
955 	size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
956 
957 	assert(binind < BININD_INVALID);
958 	assert(pageind - runind >= map_bias);
959 	assert((flags & CHUNK_MAP_UNZEROED) == flags);
960 	arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) |
961 	    (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED);
962 }
963 
964 JEMALLOC_INLINE void
arena_metadata_allocated_add(arena_t * arena,size_t size)965 arena_metadata_allocated_add(arena_t *arena, size_t size)
966 {
967 
968 	atomic_add_z(&arena->stats.metadata_allocated, size);
969 }
970 
971 JEMALLOC_INLINE void
arena_metadata_allocated_sub(arena_t * arena,size_t size)972 arena_metadata_allocated_sub(arena_t *arena, size_t size)
973 {
974 
975 	atomic_sub_z(&arena->stats.metadata_allocated, size);
976 }
977 
978 JEMALLOC_INLINE size_t
arena_metadata_allocated_get(arena_t * arena)979 arena_metadata_allocated_get(arena_t *arena)
980 {
981 
982 	return (atomic_read_z(&arena->stats.metadata_allocated));
983 }
984 
985 JEMALLOC_INLINE bool
arena_prof_accum_impl(arena_t * arena,uint64_t accumbytes)986 arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
987 {
988 
989 	cassert(config_prof);
990 	assert(prof_interval != 0);
991 
992 	arena->prof_accumbytes += accumbytes;
993 	if (arena->prof_accumbytes >= prof_interval) {
994 		arena->prof_accumbytes -= prof_interval;
995 		return (true);
996 	}
997 	return (false);
998 }
999 
1000 JEMALLOC_INLINE bool
arena_prof_accum_locked(arena_t * arena,uint64_t accumbytes)1001 arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
1002 {
1003 
1004 	cassert(config_prof);
1005 
1006 	if (likely(prof_interval == 0))
1007 		return (false);
1008 	return (arena_prof_accum_impl(arena, accumbytes));
1009 }
1010 
1011 JEMALLOC_INLINE bool
arena_prof_accum(arena_t * arena,uint64_t accumbytes)1012 arena_prof_accum(arena_t *arena, uint64_t accumbytes)
1013 {
1014 
1015 	cassert(config_prof);
1016 
1017 	if (likely(prof_interval == 0))
1018 		return (false);
1019 
1020 	{
1021 		bool ret;
1022 
1023 		malloc_mutex_lock(&arena->lock);
1024 		ret = arena_prof_accum_impl(arena, accumbytes);
1025 		malloc_mutex_unlock(&arena->lock);
1026 		return (ret);
1027 	}
1028 }
1029 
1030 JEMALLOC_ALWAYS_INLINE szind_t
arena_ptr_small_binind_get(const void * ptr,size_t mapbits)1031 arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
1032 {
1033 	szind_t binind;
1034 
1035 	binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
1036 
1037 	if (config_debug) {
1038 		arena_chunk_t *chunk;
1039 		arena_t *arena;
1040 		size_t pageind;
1041 		size_t actual_mapbits;
1042 		size_t rpages_ind;
1043 		arena_run_t *run;
1044 		arena_bin_t *bin;
1045 		szind_t run_binind, actual_binind;
1046 		arena_bin_info_t *bin_info;
1047 		arena_chunk_map_misc_t *miscelm;
1048 		void *rpages;
1049 
1050 		assert(binind != BININD_INVALID);
1051 		assert(binind < NBINS);
1052 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1053 		arena = extent_node_arena_get(&chunk->node);
1054 		pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1055 		actual_mapbits = arena_mapbits_get(chunk, pageind);
1056 		assert(mapbits == actual_mapbits);
1057 		assert(arena_mapbits_large_get(chunk, pageind) == 0);
1058 		assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1059 		rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
1060 		    pageind);
1061 		miscelm = arena_miscelm_get(chunk, rpages_ind);
1062 		run = &miscelm->run;
1063 		run_binind = run->binind;
1064 		bin = &arena->bins[run_binind];
1065 		actual_binind = (szind_t)(bin - arena->bins);
1066 		assert(run_binind == actual_binind);
1067 		bin_info = &arena_bin_info[actual_binind];
1068 		rpages = arena_miscelm_to_rpages(miscelm);
1069 		assert(((uintptr_t)ptr - ((uintptr_t)rpages +
1070 		    (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
1071 		    == 0);
1072 	}
1073 
1074 	return (binind);
1075 }
1076 #  endif /* JEMALLOC_ARENA_INLINE_A */
1077 
1078 #  ifdef JEMALLOC_ARENA_INLINE_B
1079 JEMALLOC_INLINE szind_t
arena_bin_index(arena_t * arena,arena_bin_t * bin)1080 arena_bin_index(arena_t *arena, arena_bin_t *bin)
1081 {
1082 	szind_t binind = (szind_t)(bin - arena->bins);
1083 	assert(binind < NBINS);
1084 	return (binind);
1085 }
1086 
1087 JEMALLOC_INLINE size_t
arena_run_regind(arena_run_t * run,arena_bin_info_t * bin_info,const void * ptr)1088 arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
1089 {
1090 	size_t diff, interval, shift, regind;
1091 	arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1092 	void *rpages = arena_miscelm_to_rpages(miscelm);
1093 
1094 	/*
1095 	 * Freeing a pointer lower than region zero can cause assertion
1096 	 * failure.
1097 	 */
1098 	assert((uintptr_t)ptr >= (uintptr_t)rpages +
1099 	    (uintptr_t)bin_info->reg0_offset);
1100 
1101 	/*
1102 	 * Avoid doing division with a variable divisor if possible.  Using
1103 	 * actual division here can reduce allocator throughput by over 20%!
1104 	 */
1105 	diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages -
1106 	    bin_info->reg0_offset);
1107 
1108 	/* Rescale (factor powers of 2 out of the numerator and denominator). */
1109 	interval = bin_info->reg_interval;
1110 	shift = ffs_zu(interval) - 1;
1111 	diff >>= shift;
1112 	interval >>= shift;
1113 
1114 	if (interval == 1) {
1115 		/* The divisor was a power of 2. */
1116 		regind = diff;
1117 	} else {
1118 		/*
1119 		 * To divide by a number D that is not a power of two we
1120 		 * multiply by (2^21 / D) and then right shift by 21 positions.
1121 		 *
1122 		 *   X / D
1123 		 *
1124 		 * becomes
1125 		 *
1126 		 *   (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
1127 		 *
1128 		 * We can omit the first three elements, because we never
1129 		 * divide by 0, and 1 and 2 are both powers of two, which are
1130 		 * handled above.
1131 		 */
1132 #define	SIZE_INV_SHIFT	((sizeof(size_t) << 3) - LG_RUN_MAXREGS)
1133 #define	SIZE_INV(s)	(((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1)
1134 		static const size_t interval_invs[] = {
1135 		    SIZE_INV(3),
1136 		    SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
1137 		    SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
1138 		    SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
1139 		    SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
1140 		    SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
1141 		    SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
1142 		    SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
1143 		};
1144 
1145 		if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t))
1146 		    + 2))) {
1147 			regind = (diff * interval_invs[interval - 3]) >>
1148 			    SIZE_INV_SHIFT;
1149 		} else
1150 			regind = diff / interval;
1151 #undef SIZE_INV
1152 #undef SIZE_INV_SHIFT
1153 	}
1154 	assert(diff == regind * interval);
1155 	assert(regind < bin_info->nregs);
1156 
1157 	return (regind);
1158 }
1159 
1160 JEMALLOC_INLINE prof_tctx_t *
arena_prof_tctx_get(const void * ptr)1161 arena_prof_tctx_get(const void *ptr)
1162 {
1163 	prof_tctx_t *ret;
1164 	arena_chunk_t *chunk;
1165 
1166 	cassert(config_prof);
1167 	assert(ptr != NULL);
1168 
1169 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1170 	if (likely(chunk != ptr)) {
1171 		size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1172 		size_t mapbits = arena_mapbits_get(chunk, pageind);
1173 		assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
1174 		if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
1175 			ret = (prof_tctx_t *)(uintptr_t)1U;
1176 		else {
1177 			arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
1178 			    pageind);
1179 			ret = atomic_read_p(&elm->prof_tctx_pun);
1180 		}
1181 	} else
1182 		ret = huge_prof_tctx_get(ptr);
1183 
1184 	return (ret);
1185 }
1186 
1187 JEMALLOC_INLINE void
arena_prof_tctx_set(const void * ptr,size_t usize,prof_tctx_t * tctx)1188 arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
1189 {
1190 	arena_chunk_t *chunk;
1191 
1192 	cassert(config_prof);
1193 	assert(ptr != NULL);
1194 
1195 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1196 	if (likely(chunk != ptr)) {
1197 		size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1198 
1199 		assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1200 
1201 		if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx >
1202 		    (uintptr_t)1U)) {
1203 			arena_chunk_map_misc_t *elm;
1204 
1205 			assert(arena_mapbits_large_get(chunk, pageind) != 0);
1206 
1207 			elm = arena_miscelm_get(chunk, pageind);
1208 			atomic_write_p(&elm->prof_tctx_pun, tctx);
1209 		} else {
1210 			/*
1211 			 * tctx must always be initialized for large runs.
1212 			 * Assert that the surrounding conditional logic is
1213 			 * equivalent to checking whether ptr refers to a large
1214 			 * run.
1215 			 */
1216 			assert(arena_mapbits_large_get(chunk, pageind) == 0);
1217 		}
1218 	} else
1219 		huge_prof_tctx_set(ptr, tctx);
1220 }
1221 
1222 JEMALLOC_INLINE void
arena_prof_tctx_reset(const void * ptr,size_t usize,const void * old_ptr,prof_tctx_t * old_tctx)1223 arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
1224     prof_tctx_t *old_tctx)
1225 {
1226 
1227 	cassert(config_prof);
1228 	assert(ptr != NULL);
1229 
1230 	if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr &&
1231 	    (uintptr_t)old_tctx > (uintptr_t)1U))) {
1232 		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1233 		if (likely(chunk != ptr)) {
1234 			size_t pageind;
1235 			arena_chunk_map_misc_t *elm;
1236 
1237 			pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
1238 			    LG_PAGE;
1239 			assert(arena_mapbits_allocated_get(chunk, pageind) !=
1240 			    0);
1241 			assert(arena_mapbits_large_get(chunk, pageind) != 0);
1242 
1243 			elm = arena_miscelm_get(chunk, pageind);
1244 			atomic_write_p(&elm->prof_tctx_pun,
1245 			    (prof_tctx_t *)(uintptr_t)1U);
1246 		} else
1247 			huge_prof_tctx_reset(ptr);
1248 	}
1249 }
1250 
1251 JEMALLOC_ALWAYS_INLINE void
arena_decay_ticks(tsd_t * tsd,arena_t * arena,unsigned nticks)1252 arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks)
1253 {
1254 	ticker_t *decay_ticker;
1255 
1256 	if (unlikely(tsd == NULL))
1257 		return;
1258 	decay_ticker = decay_ticker_get(tsd, arena->ind);
1259 	if (unlikely(decay_ticker == NULL))
1260 		return;
1261 	if (unlikely(ticker_ticks(decay_ticker, nticks)))
1262 		arena_purge(arena, false);
1263 }
1264 
1265 JEMALLOC_ALWAYS_INLINE void
arena_decay_tick(tsd_t * tsd,arena_t * arena)1266 arena_decay_tick(tsd_t *tsd, arena_t *arena)
1267 {
1268 
1269 	arena_decay_ticks(tsd, arena, 1);
1270 }
1271 
1272 JEMALLOC_ALWAYS_INLINE void *
arena_malloc(tsd_t * tsd,arena_t * arena,size_t size,szind_t ind,bool zero,tcache_t * tcache,bool slow_path)1273 arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, bool zero,
1274     tcache_t *tcache, bool slow_path)
1275 {
1276 
1277 	assert(size != 0);
1278 
1279 	if (likely(tcache != NULL)) {
1280 		if (likely(size <= SMALL_MAXCLASS)) {
1281 			return (tcache_alloc_small(tsd, arena, tcache, size,
1282 			    ind, zero, slow_path));
1283 		}
1284 		if (likely(size <= tcache_maxclass)) {
1285 			return (tcache_alloc_large(tsd, arena, tcache, size,
1286 			    ind, zero, slow_path));
1287 		}
1288 		/* (size > tcache_maxclass) case falls through. */
1289 		assert(size > tcache_maxclass);
1290 	}
1291 
1292 	return (arena_malloc_hard(tsd, arena, size, ind, zero, tcache));
1293 }
1294 
1295 JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(const void * ptr)1296 arena_aalloc(const void *ptr)
1297 {
1298 	arena_chunk_t *chunk;
1299 
1300 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1301 	if (likely(chunk != ptr))
1302 		return (extent_node_arena_get(&chunk->node));
1303 	else
1304 		return (huge_aalloc(ptr));
1305 }
1306 
1307 /* Return the size of the allocation pointed to by ptr. */
1308 JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(const void * ptr,bool demote)1309 arena_salloc(const void *ptr, bool demote)
1310 {
1311 	size_t ret;
1312 	arena_chunk_t *chunk;
1313 	size_t pageind;
1314 	szind_t binind;
1315 
1316 	assert(ptr != NULL);
1317 
1318 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1319 	if (likely(chunk != ptr)) {
1320 		pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1321 		assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1322 		binind = arena_mapbits_binind_get(chunk, pageind);
1323 		if (unlikely(binind == BININD_INVALID || (config_prof && !demote
1324 		    && arena_mapbits_large_get(chunk, pageind) != 0))) {
1325 			/*
1326 			 * Large allocation.  In the common case (demote), and
1327 			 * as this is an inline function, most callers will only
1328 			 * end up looking at binind to determine that ptr is a
1329 			 * small allocation.
1330 			 */
1331 			assert(config_cache_oblivious || ((uintptr_t)ptr &
1332 			    PAGE_MASK) == 0);
1333 			ret = arena_mapbits_large_size_get(chunk, pageind) -
1334 			    large_pad;
1335 			assert(ret != 0);
1336 			assert(pageind + ((ret+large_pad)>>LG_PAGE) <=
1337 			    chunk_npages);
1338 			assert(arena_mapbits_dirty_get(chunk, pageind) ==
1339 			    arena_mapbits_dirty_get(chunk,
1340 			    pageind+((ret+large_pad)>>LG_PAGE)-1));
1341 		} else {
1342 			/*
1343 			 * Small allocation (possibly promoted to a large
1344 			 * object).
1345 			 */
1346 			assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
1347 			    arena_ptr_small_binind_get(ptr,
1348 			    arena_mapbits_get(chunk, pageind)) == binind);
1349 			ret = index2size(binind);
1350 		}
1351 	} else
1352 		ret = huge_salloc(ptr);
1353 
1354 	return (ret);
1355 }
1356 
1357 JEMALLOC_ALWAYS_INLINE void
arena_dalloc(tsd_t * tsd,void * ptr,tcache_t * tcache,bool slow_path)1358 arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
1359 {
1360 	arena_chunk_t *chunk;
1361 	size_t pageind, mapbits;
1362 
1363 	assert(ptr != NULL);
1364 
1365 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1366 	if (likely(chunk != ptr)) {
1367 		pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1368 #if defined(__ANDROID__)
1369 		/* Verify the ptr is actually in the chunk. */
1370 		if (unlikely(pageind < map_bias || pageind >= chunk_npages)) {
1371 		    __libc_fatal("Invalid address %p passed to free: invalid page index", ptr);
1372 		}
1373 #endif
1374 		mapbits = arena_mapbits_get(chunk, pageind);
1375 		assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1376 #if defined(__ANDROID__)
1377 		/* Verify the ptr has been allocated. */
1378 		if (unlikely((mapbits & CHUNK_MAP_ALLOCATED) == 0)) {
1379 		    __libc_fatal("Invalid address %p passed to free: value not allocated", ptr);
1380 		}
1381 #endif
1382 		if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
1383 			/* Small allocation. */
1384 			if (likely(tcache != NULL)) {
1385 				szind_t binind = arena_ptr_small_binind_get(ptr,
1386 				    mapbits);
1387 				tcache_dalloc_small(tsd, tcache, ptr, binind,
1388 				    slow_path);
1389 			} else {
1390 				arena_dalloc_small(tsd, extent_node_arena_get(
1391 				    &chunk->node), chunk, ptr, pageind);
1392 			}
1393 		} else {
1394 			size_t size = arena_mapbits_large_size_get(chunk,
1395 			    pageind);
1396 
1397 			assert(config_cache_oblivious || ((uintptr_t)ptr &
1398 			    PAGE_MASK) == 0);
1399 
1400 			if (likely(tcache != NULL) && size - large_pad <=
1401 			    tcache_maxclass) {
1402 				tcache_dalloc_large(tsd, tcache, ptr, size -
1403 				    large_pad, slow_path);
1404 			} else {
1405 				arena_dalloc_large(tsd, extent_node_arena_get(
1406 				    &chunk->node), chunk, ptr);
1407 			}
1408 		}
1409 	} else
1410 		huge_dalloc(tsd, ptr, tcache);
1411 }
1412 
1413 JEMALLOC_ALWAYS_INLINE void
arena_sdalloc(tsd_t * tsd,void * ptr,size_t size,tcache_t * tcache)1414 arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
1415 {
1416 	arena_chunk_t *chunk;
1417 
1418 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1419 	if (likely(chunk != ptr)) {
1420 		if (config_prof && opt_prof) {
1421 			size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
1422 			    LG_PAGE;
1423 			assert(arena_mapbits_allocated_get(chunk, pageind) !=
1424 			    0);
1425 			if (arena_mapbits_large_get(chunk, pageind) != 0) {
1426 				/*
1427 				 * Make sure to use promoted size, not request
1428 				 * size.
1429 				 */
1430 				size = arena_mapbits_large_size_get(chunk,
1431 				    pageind) - large_pad;
1432 			}
1433 		}
1434 		assert(s2u(size) == s2u(arena_salloc(ptr, false)));
1435 
1436 		if (likely(size <= SMALL_MAXCLASS)) {
1437 			/* Small allocation. */
1438 			if (likely(tcache != NULL)) {
1439 				szind_t binind = size2index(size);
1440 				tcache_dalloc_small(tsd, tcache, ptr, binind,
1441 				    true);
1442 			} else {
1443 				size_t pageind = ((uintptr_t)ptr -
1444 				    (uintptr_t)chunk) >> LG_PAGE;
1445 				arena_dalloc_small(tsd, extent_node_arena_get(
1446 				    &chunk->node), chunk, ptr, pageind);
1447 			}
1448 		} else {
1449 			assert(config_cache_oblivious || ((uintptr_t)ptr &
1450 			    PAGE_MASK) == 0);
1451 
1452 			if (likely(tcache != NULL) && size <= tcache_maxclass) {
1453 				tcache_dalloc_large(tsd, tcache, ptr, size,
1454 				    true);
1455 			} else {
1456 				arena_dalloc_large(tsd, extent_node_arena_get(
1457 				    &chunk->node), chunk, ptr);
1458 			}
1459 		}
1460 	} else
1461 		huge_dalloc(tsd, ptr, tcache);
1462 }
1463 #  endif /* JEMALLOC_ARENA_INLINE_B */
1464 #endif
1465 
1466 #endif /* JEMALLOC_H_INLINES */
1467 /******************************************************************************/
1468