• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************/
2 #ifdef JEMALLOC_H_TYPES
3 
4 typedef struct tcache_bin_info_s tcache_bin_info_t;
5 typedef struct tcache_bin_s tcache_bin_t;
6 typedef struct tcache_s tcache_t;
7 typedef struct tcaches_s tcaches_t;
8 
9 /*
10  * tcache pointers close to NULL are used to encode state information that is
11  * used for two purposes: preventing thread caching on a per thread basis and
12  * cleaning up during thread shutdown.
13  */
14 #define	TCACHE_STATE_DISABLED		((tcache_t *)(uintptr_t)1)
15 #define	TCACHE_STATE_REINCARNATED	((tcache_t *)(uintptr_t)2)
16 #define	TCACHE_STATE_PURGATORY		((tcache_t *)(uintptr_t)3)
17 #define	TCACHE_STATE_MAX		TCACHE_STATE_PURGATORY
18 
19 /*
20  * Absolute minimum number of cache slots for each small bin.
21  */
22 #define	TCACHE_NSLOTS_SMALL_MIN		20
23 
24 /*
25  * Absolute maximum number of cache slots for each small bin in the thread
26  * cache.  This is an additional constraint beyond that imposed as: twice the
27  * number of regions per run for this size class.
28  *
29  * This constant must be an even number.
30  */
31 #if defined(ANDROID_TCACHE_NSLOTS_SMALL_MAX)
32 #define	TCACHE_NSLOTS_SMALL_MAX		ANDROID_TCACHE_NSLOTS_SMALL_MAX
33 #else
34 #define	TCACHE_NSLOTS_SMALL_MAX		200
35 #endif
36 
37 /* Number of cache slots for large size classes. */
38 #if defined(ANDROID_TCACHE_NSLOTS_LARGE)
39 #define	TCACHE_NSLOTS_LARGE		ANDROID_TCACHE_NSLOTS_LARGE
40 #else
41 #define	TCACHE_NSLOTS_LARGE		20
42 #endif
43 
44 /* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
45 #if defined(ANDROID_LG_TCACHE_MAXCLASS_DEFAULT)
46 #define	LG_TCACHE_MAXCLASS_DEFAULT	ANDROID_LG_TCACHE_MAXCLASS_DEFAULT
47 #else
48 #define	LG_TCACHE_MAXCLASS_DEFAULT	15
49 #endif
50 
51 /*
52  * TCACHE_GC_SWEEP is the approximate number of allocation events between
53  * full GC sweeps.  Integer rounding may cause the actual number to be
54  * slightly higher, since GC is performed incrementally.
55  */
56 #define	TCACHE_GC_SWEEP			8192
57 
58 /* Number of tcache allocation/deallocation events between incremental GCs. */
59 #define	TCACHE_GC_INCR							\
60     ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
61 
62 #endif /* JEMALLOC_H_TYPES */
63 /******************************************************************************/
64 #ifdef JEMALLOC_H_STRUCTS
65 
66 typedef enum {
67 	tcache_enabled_false   = 0, /* Enable cast to/from bool. */
68 	tcache_enabled_true    = 1,
69 	tcache_enabled_default = 2
70 } tcache_enabled_t;
71 
72 /*
73  * Read-only information associated with each element of tcache_t's tbins array
74  * is stored separately, mainly to reduce memory usage.
75  */
76 struct tcache_bin_info_s {
77 	unsigned	ncached_max;	/* Upper limit on ncached. */
78 };
79 
80 struct tcache_bin_s {
81 	tcache_bin_stats_t tstats;
82 	int		low_water;	/* Min # cached since last GC. */
83 	unsigned	lg_fill_div;	/* Fill (ncached_max >> lg_fill_div). */
84 	unsigned	ncached;	/* # of cached objects. */
85 	/*
86 	 * To make use of adjacent cacheline prefetch, the items in the avail
87 	 * stack goes to higher address for newer allocations.  avail points
88 	 * just above the available space, which means that
89 	 * avail[-ncached, ... -1] are available items and the lowest item will
90 	 * be allocated first.
91 	 */
92 	void		**avail;	/* Stack of available objects. */
93 };
94 
95 struct tcache_s {
96 	ql_elm(tcache_t) link;		/* Used for aggregating stats. */
97 	uint64_t	prof_accumbytes;/* Cleared after arena_prof_accum(). */
98 	ticker_t	gc_ticker;	/* Drives incremental GC. */
99 	szind_t		next_gc_bin;	/* Next bin to GC. */
100 	tcache_bin_t	tbins[1];	/* Dynamically sized. */
101 	/*
102 	 * The pointer stacks associated with tbins follow as a contiguous
103 	 * array.  During tcache initialization, the avail pointer in each
104 	 * element of tbins is initialized to point to the proper offset within
105 	 * this array.
106 	 */
107 };
108 
109 /* Linkage for list of available (previously used) explicit tcache IDs. */
110 struct tcaches_s {
111 	union {
112 		tcache_t	*tcache;
113 		tcaches_t	*next;
114 	};
115 };
116 
117 #endif /* JEMALLOC_H_STRUCTS */
118 /******************************************************************************/
119 #ifdef JEMALLOC_H_EXTERNS
120 
121 extern bool	opt_tcache;
122 extern ssize_t	opt_lg_tcache_max;
123 
124 extern tcache_bin_info_t	*tcache_bin_info;
125 
126 /*
127  * Number of tcache bins.  There are NBINS small-object bins, plus 0 or more
128  * large-object bins.
129  */
130 extern unsigned	nhbins;
131 
132 /* Maximum cached size class. */
133 extern size_t	tcache_maxclass;
134 
135 /*
136  * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
137  * usable via the MALLOCX_TCACHE() flag.  The automatic per thread tcaches are
138  * completely disjoint from this data structure.  tcaches starts off as a sparse
139  * array, so it has no physical memory footprint until individual pages are
140  * touched.  This allows the entire array to be allocated the first time an
141  * explicit tcache is created without a disproportionate impact on memory usage.
142  */
143 extern tcaches_t	*tcaches;
144 
145 size_t	tcache_salloc(const void *ptr);
146 void	tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
147 void	*tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
148     tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
149 void	tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
150     szind_t binind, unsigned rem);
151 void	tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
152     unsigned rem, tcache_t *tcache);
153 void	tcache_arena_associate(tcache_t *tcache, arena_t *arena);
154 void	tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
155     arena_t *newarena);
156 void	tcache_arena_dissociate(tcache_t *tcache, arena_t *arena);
157 tcache_t *tcache_get_hard(tsd_t *tsd);
158 tcache_t *tcache_create(tsd_t *tsd, arena_t *arena);
159 void	tcache_cleanup(tsd_t *tsd);
160 void	tcache_enabled_cleanup(tsd_t *tsd);
161 void	tcache_stats_merge(tcache_t *tcache, arena_t *arena);
162 bool	tcaches_create(tsd_t *tsd, unsigned *r_ind);
163 void	tcaches_flush(tsd_t *tsd, unsigned ind);
164 void	tcaches_destroy(tsd_t *tsd, unsigned ind);
165 bool	tcache_boot(void);
166 
167 #endif /* JEMALLOC_H_EXTERNS */
168 /******************************************************************************/
169 #ifdef JEMALLOC_H_INLINES
170 
171 #ifndef JEMALLOC_ENABLE_INLINE
172 void	tcache_event(tsd_t *tsd, tcache_t *tcache);
173 void	tcache_flush(void);
174 bool	tcache_enabled_get(void);
175 tcache_t *tcache_get(tsd_t *tsd, bool create);
176 void	tcache_enabled_set(bool enabled);
177 void	*tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
178 void	*tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
179     size_t size, szind_t ind, bool zero, bool slow_path);
180 void	*tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
181     size_t size, szind_t ind, bool zero, bool slow_path);
182 void	tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
183     szind_t binind, bool slow_path);
184 void	tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
185     size_t size, bool slow_path);
186 tcache_t	*tcaches_get(tsd_t *tsd, unsigned ind);
187 #endif
188 
189 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
190 JEMALLOC_INLINE void
tcache_flush(void)191 tcache_flush(void)
192 {
193 	tsd_t *tsd;
194 
195 	cassert(config_tcache);
196 
197 	tsd = tsd_fetch();
198 	tcache_cleanup(tsd);
199 }
200 
201 JEMALLOC_INLINE bool
tcache_enabled_get(void)202 tcache_enabled_get(void)
203 {
204 	tsd_t *tsd;
205 	tcache_enabled_t tcache_enabled;
206 
207 	cassert(config_tcache);
208 
209 	tsd = tsd_fetch();
210 	tcache_enabled = tsd_tcache_enabled_get(tsd);
211 	if (tcache_enabled == tcache_enabled_default) {
212 		tcache_enabled = (tcache_enabled_t)opt_tcache;
213 		tsd_tcache_enabled_set(tsd, tcache_enabled);
214 	}
215 
216 	return ((bool)tcache_enabled);
217 }
218 
219 JEMALLOC_INLINE void
tcache_enabled_set(bool enabled)220 tcache_enabled_set(bool enabled)
221 {
222 	tsd_t *tsd;
223 	tcache_enabled_t tcache_enabled;
224 
225 	cassert(config_tcache);
226 
227 	tsd = tsd_fetch();
228 
229 	tcache_enabled = (tcache_enabled_t)enabled;
230 	tsd_tcache_enabled_set(tsd, tcache_enabled);
231 
232 	if (!enabled)
233 		tcache_cleanup(tsd);
234 }
235 
236 JEMALLOC_ALWAYS_INLINE tcache_t *
tcache_get(tsd_t * tsd,bool create)237 tcache_get(tsd_t *tsd, bool create)
238 {
239 	tcache_t *tcache;
240 
241 	if (!config_tcache)
242 		return (NULL);
243 
244 	tcache = tsd_tcache_get(tsd);
245 	if (!create)
246 		return (tcache);
247 	if (unlikely(tcache == NULL) && tsd_nominal(tsd)) {
248 		tcache = tcache_get_hard(tsd);
249 		tsd_tcache_set(tsd, tcache);
250 	}
251 
252 	return (tcache);
253 }
254 
255 JEMALLOC_ALWAYS_INLINE void
tcache_event(tsd_t * tsd,tcache_t * tcache)256 tcache_event(tsd_t *tsd, tcache_t *tcache)
257 {
258 
259 	if (TCACHE_GC_INCR == 0)
260 		return;
261 
262 	if (unlikely(ticker_tick(&tcache->gc_ticker)))
263 		tcache_event_hard(tsd, tcache);
264 }
265 
266 JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_easy(tcache_bin_t * tbin,bool * tcache_success)267 tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success)
268 {
269 	void *ret;
270 
271 	if (unlikely(tbin->ncached == 0)) {
272 		tbin->low_water = -1;
273 		*tcache_success = false;
274 		return (NULL);
275 	}
276 	/*
277 	 * tcache_success (instead of ret) should be checked upon the return of
278 	 * this function.  We avoid checking (ret == NULL) because there is
279 	 * never a null stored on the avail stack (which is unknown to the
280 	 * compiler), and eagerly checking ret would cause pipeline stall
281 	 * (waiting for the cacheline).
282 	 */
283 	*tcache_success = true;
284 	ret = *(tbin->avail - tbin->ncached);
285 	tbin->ncached--;
286 
287 	if (unlikely((int)tbin->ncached < tbin->low_water))
288 		tbin->low_water = tbin->ncached;
289 
290 	return (ret);
291 }
292 
293 JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tsd_t * tsd,arena_t * arena,tcache_t * tcache,size_t size,szind_t binind,bool zero,bool slow_path)294 tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
295     szind_t binind, bool zero, bool slow_path)
296 {
297 	void *ret;
298 	tcache_bin_t *tbin;
299 	bool tcache_success;
300 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
301 
302 	assert(binind < NBINS);
303 	tbin = &tcache->tbins[binind];
304 	ret = tcache_alloc_easy(tbin, &tcache_success);
305 	assert(tcache_success == (ret != NULL));
306 	if (unlikely(!tcache_success)) {
307 		bool tcache_hard_success;
308 		arena = arena_choose(tsd, arena);
309 		if (unlikely(arena == NULL))
310 			return (NULL);
311 
312 		ret = tcache_alloc_small_hard(tsd, arena, tcache, tbin, binind,
313 			&tcache_hard_success);
314 		if (tcache_hard_success == false)
315 			return (NULL);
316 	}
317 
318 	assert(ret);
319 	/*
320 	 * Only compute usize if required.  The checks in the following if
321 	 * statement are all static.
322 	 */
323 	if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
324 		usize = index2size(binind);
325 		assert(tcache_salloc(ret) == usize);
326 	}
327 
328 	if (likely(!zero)) {
329 		if (slow_path && config_fill) {
330 			if (unlikely(opt_junk_alloc)) {
331 				arena_alloc_junk_small(ret,
332 				    &arena_bin_info[binind], false);
333 			} else if (unlikely(opt_zero))
334 				memset(ret, 0, usize);
335 		}
336 	} else {
337 		if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
338 			arena_alloc_junk_small(ret, &arena_bin_info[binind],
339 			    true);
340 		}
341 		memset(ret, 0, usize);
342 	}
343 
344 	if (config_stats)
345 		tbin->tstats.nrequests++;
346 	if (config_prof)
347 		tcache->prof_accumbytes += usize;
348 	tcache_event(tsd, tcache);
349 	return (ret);
350 }
351 
352 JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tsd_t * tsd,arena_t * arena,tcache_t * tcache,size_t size,szind_t binind,bool zero,bool slow_path)353 tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
354     szind_t binind, bool zero, bool slow_path)
355 {
356 	void *ret;
357 	tcache_bin_t *tbin;
358 	bool tcache_success;
359 
360 	assert(binind < nhbins);
361 	tbin = &tcache->tbins[binind];
362 	ret = tcache_alloc_easy(tbin, &tcache_success);
363 	assert(tcache_success == (ret != NULL));
364 	if (unlikely(!tcache_success)) {
365 		/*
366 		 * Only allocate one large object at a time, because it's quite
367 		 * expensive to create one and not use it.
368 		 */
369 		arena = arena_choose(tsd, arena);
370 		if (unlikely(arena == NULL))
371 			return (NULL);
372 
373 		ret = arena_malloc_large(tsd, arena, binind, zero);
374 		if (ret == NULL)
375 			return (NULL);
376 	} else {
377 		size_t usize JEMALLOC_CC_SILENCE_INIT(0);
378 
379 		/* Only compute usize on demand */
380 		if (config_prof || (slow_path && config_fill) ||
381 		    unlikely(zero)) {
382 			usize = index2size(binind);
383 			assert(usize <= tcache_maxclass);
384 		}
385 
386 		if (config_prof && usize == LARGE_MINCLASS) {
387 			arena_chunk_t *chunk =
388 			    (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
389 			size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
390 			    LG_PAGE);
391 			arena_mapbits_large_binind_set(chunk, pageind,
392 			    BININD_INVALID);
393 		}
394 		if (likely(!zero)) {
395 			if (slow_path && config_fill) {
396 				if (unlikely(opt_junk_alloc))
397 					memset(ret, 0xa5, usize);
398 				else if (unlikely(opt_zero))
399 					memset(ret, 0, usize);
400 			}
401 		} else
402 			memset(ret, 0, usize);
403 
404 		if (config_stats)
405 			tbin->tstats.nrequests++;
406 		if (config_prof)
407 			tcache->prof_accumbytes += usize;
408 	}
409 
410 	tcache_event(tsd, tcache);
411 	return (ret);
412 }
413 
414 JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tsd_t * tsd,tcache_t * tcache,void * ptr,szind_t binind,bool slow_path)415 tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
416     bool slow_path)
417 {
418 	tcache_bin_t *tbin;
419 	tcache_bin_info_t *tbin_info;
420 
421 	assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
422 
423 	if (slow_path && config_fill && unlikely(opt_junk_free))
424 		arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
425 
426 	tbin = &tcache->tbins[binind];
427 	tbin_info = &tcache_bin_info[binind];
428 	if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
429 		tcache_bin_flush_small(tsd, tcache, tbin, binind,
430 		    (tbin_info->ncached_max >> 1));
431 	}
432 	assert(tbin->ncached < tbin_info->ncached_max);
433 	tbin->ncached++;
434 	*(tbin->avail - tbin->ncached) = ptr;
435 
436 	tcache_event(tsd, tcache);
437 }
438 
439 JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t * tsd,tcache_t * tcache,void * ptr,size_t size,bool slow_path)440 tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
441     bool slow_path)
442 {
443 	szind_t binind;
444 	tcache_bin_t *tbin;
445 	tcache_bin_info_t *tbin_info;
446 
447 	assert((size & PAGE_MASK) == 0);
448 	assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
449 	assert(tcache_salloc(ptr) <= tcache_maxclass);
450 
451 	binind = size2index(size);
452 
453 	if (slow_path && config_fill && unlikely(opt_junk_free))
454 		arena_dalloc_junk_large(ptr, size);
455 
456 	tbin = &tcache->tbins[binind];
457 	tbin_info = &tcache_bin_info[binind];
458 	if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
459 		tcache_bin_flush_large(tsd, tbin, binind,
460 		    (tbin_info->ncached_max >> 1), tcache);
461 	}
462 	assert(tbin->ncached < tbin_info->ncached_max);
463 	tbin->ncached++;
464 	*(tbin->avail - tbin->ncached) = ptr;
465 
466 	tcache_event(tsd, tcache);
467 }
468 
469 JEMALLOC_ALWAYS_INLINE tcache_t *
tcaches_get(tsd_t * tsd,unsigned ind)470 tcaches_get(tsd_t *tsd, unsigned ind)
471 {
472 	tcaches_t *elm = &tcaches[ind];
473 	if (unlikely(elm->tcache == NULL))
474 		elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL));
475 	return (elm->tcache);
476 }
477 #endif
478 
479 #endif /* JEMALLOC_H_INLINES */
480 /******************************************************************************/
481