• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define JEMALLOC_TCACHE_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4 
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/mutex.h"
7 #include "jemalloc/internal/size_classes.h"
8 
9 /******************************************************************************/
10 /* Data. */
11 
12 #if !defined(__BIONIC__) || defined(ANDROID_ENABLE_TCACHE)
13 bool	opt_tcache = true;
14 #else
15 bool	opt_tcache = false;
16 #endif
17 ssize_t	opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
18 
19 cache_bin_info_t	*tcache_bin_info;
20 static unsigned		stack_nelms; /* Total stack elms per tcache. */
21 
22 unsigned		nhbins;
23 size_t			tcache_maxclass;
24 
25 tcaches_t		*tcaches;
26 
27 /* Index of first element within tcaches that has never been used. */
28 static unsigned		tcaches_past;
29 
30 /* Head of singly linked list tracking available tcaches elements. */
31 static tcaches_t	*tcaches_avail;
32 
33 /* Protects tcaches{,_past,_avail}. */
34 static malloc_mutex_t	tcaches_mtx;
35 
36 /******************************************************************************/
37 
38 size_t
tcache_salloc(tsdn_t * tsdn,const void * ptr)39 tcache_salloc(tsdn_t *tsdn, const void *ptr) {
40 	return arena_salloc(tsdn, ptr);
41 }
42 
43 void
tcache_event_hard(tsd_t * tsd,tcache_t * tcache)44 tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
45 	szind_t binind = tcache->next_gc_bin;
46 
47 	cache_bin_t *tbin;
48 	if (binind < NBINS) {
49 		tbin = tcache_small_bin_get(tcache, binind);
50 	} else {
51 		tbin = tcache_large_bin_get(tcache, binind);
52 	}
53 	if (tbin->low_water > 0) {
54 		/*
55 		 * Flush (ceiling) 3/4 of the objects below the low water mark.
56 		 */
57 		if (binind < NBINS) {
58 			tcache_bin_flush_small(tsd, tcache, tbin, binind,
59 			    tbin->ncached - tbin->low_water + (tbin->low_water
60 			    >> 2));
61 			/*
62 			 * Reduce fill count by 2X.  Limit lg_fill_div such that
63 			 * the fill count is always at least 1.
64 			 */
65 			cache_bin_info_t *tbin_info = &tcache_bin_info[binind];
66 			if ((tbin_info->ncached_max >>
67 			     (tcache->lg_fill_div[binind] + 1)) >= 1) {
68 				tcache->lg_fill_div[binind]++;
69 			}
70 		} else {
71 			tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
72 			    - tbin->low_water + (tbin->low_water >> 2), tcache);
73 		}
74 	} else if (tbin->low_water < 0) {
75 		/*
76 		 * Increase fill count by 2X for small bins.  Make sure
77 		 * lg_fill_div stays greater than 0.
78 		 */
79 		if (binind < NBINS && tcache->lg_fill_div[binind] > 1) {
80 			tcache->lg_fill_div[binind]--;
81 		}
82 	}
83 	tbin->low_water = tbin->ncached;
84 
85 	tcache->next_gc_bin++;
86 	if (tcache->next_gc_bin == nhbins) {
87 		tcache->next_gc_bin = 0;
88 	}
89 }
90 
91 void *
tcache_alloc_small_hard(tsdn_t * tsdn,arena_t * arena,tcache_t * tcache,cache_bin_t * tbin,szind_t binind,bool * tcache_success)92 tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
93     cache_bin_t *tbin, szind_t binind, bool *tcache_success) {
94 	void *ret;
95 
96 	assert(tcache->arena != NULL);
97 	arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind,
98 	    config_prof ? tcache->prof_accumbytes : 0);
99 	if (config_prof) {
100 		tcache->prof_accumbytes = 0;
101 	}
102 	ret = cache_bin_alloc_easy(tbin, tcache_success);
103 
104 	return ret;
105 }
106 
107 void
tcache_bin_flush_small(tsd_t * tsd,tcache_t * tcache,cache_bin_t * tbin,szind_t binind,unsigned rem)108 tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
109     szind_t binind, unsigned rem) {
110 	bool merged_stats = false;
111 
112 	assert(binind < NBINS);
113 	assert((cache_bin_sz_t)rem <= tbin->ncached);
114 
115 	arena_t *arena = tcache->arena;
116 	assert(arena != NULL);
117 	unsigned nflush = tbin->ncached - rem;
118 	VARIABLE_ARRAY(extent_t *, item_extent, nflush);
119 	/* Look up extent once per item. */
120 	for (unsigned i = 0 ; i < nflush; i++) {
121 		item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
122 	}
123 
124 	while (nflush > 0) {
125 		/* Lock the arena bin associated with the first object. */
126 		extent_t *extent = item_extent[0];
127 		arena_t *bin_arena = extent_arena_get(extent);
128 		bin_t *bin = &bin_arena->bins[binind];
129 
130 		if (config_prof && bin_arena == arena) {
131 			if (arena_prof_accum(tsd_tsdn(tsd), arena,
132 			    tcache->prof_accumbytes)) {
133 				prof_idump(tsd_tsdn(tsd));
134 			}
135 			tcache->prof_accumbytes = 0;
136 		}
137 
138 		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
139 		if (config_stats && bin_arena == arena) {
140 			assert(!merged_stats);
141 			merged_stats = true;
142 			bin->stats.nflushes++;
143 #if defined(ANDROID_ENABLE_TCACHE_STATS)
144 			bin->stats.nrequests += tbin->tstats.nrequests;
145 			tbin->tstats.nrequests = 0;
146 #endif
147 		}
148 		unsigned ndeferred = 0;
149 		for (unsigned i = 0; i < nflush; i++) {
150 			void *ptr = *(tbin->avail - 1 - i);
151 			extent = item_extent[i];
152 			assert(ptr != NULL && extent != NULL);
153 
154 			if (extent_arena_get(extent) == bin_arena) {
155 				arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
156 				    bin_arena, extent, ptr);
157 			} else {
158 				/*
159 				 * This object was allocated via a different
160 				 * arena bin than the one that is currently
161 				 * locked.  Stash the object, so that it can be
162 				 * handled in a future pass.
163 				 */
164 				*(tbin->avail - 1 - ndeferred) = ptr;
165 				item_extent[ndeferred] = extent;
166 				ndeferred++;
167 			}
168 		}
169 		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
170 		arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
171 		nflush = ndeferred;
172 	}
173 	if (config_stats && !merged_stats) {
174 		/*
175 		 * The flush loop didn't happen to flush to this thread's
176 		 * arena, so the stats didn't get merged.  Manually do so now.
177 		 */
178 		bin_t *bin = &arena->bins[binind];
179 		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
180 		bin->stats.nflushes++;
181 #if defined(ANDROID_ENABLE_TCACHE_STATS)
182 		bin->stats.nrequests += tbin->tstats.nrequests;
183 		tbin->tstats.nrequests = 0;
184 #endif
185 		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
186 	}
187 
188 	memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
189 	    sizeof(void *));
190 	tbin->ncached = rem;
191 	if (tbin->ncached < tbin->low_water) {
192 		tbin->low_water = tbin->ncached;
193 	}
194 }
195 
196 void
tcache_bin_flush_large(tsd_t * tsd,cache_bin_t * tbin,szind_t binind,unsigned rem,tcache_t * tcache)197 tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
198     unsigned rem, tcache_t *tcache) {
199 #if defined(ANDROID_ENABLE_TCACHE_STATS)
200 	bool merged_stats = false;
201 #endif
202 
203 	assert(binind < nhbins);
204 	assert((cache_bin_sz_t)rem <= tbin->ncached);
205 
206 	arena_t *arena = tcache->arena;
207 	assert(arena != NULL);
208 	unsigned nflush = tbin->ncached - rem;
209 	VARIABLE_ARRAY(extent_t *, item_extent, nflush);
210 	/* Look up extent once per item. */
211 	for (unsigned i = 0 ; i < nflush; i++) {
212 		item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
213 	}
214 
215 	while (nflush > 0) {
216 		/* Lock the arena associated with the first object. */
217 		extent_t *extent = item_extent[0];
218 		arena_t *locked_arena = extent_arena_get(extent);
219 		UNUSED bool idump;
220 
221 		if (config_prof) {
222 			idump = false;
223 		}
224 
225 		malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx);
226 		for (unsigned i = 0; i < nflush; i++) {
227 			void *ptr = *(tbin->avail - 1 - i);
228 			assert(ptr != NULL);
229 			extent = item_extent[i];
230 			if (extent_arena_get(extent) == locked_arena) {
231 				large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
232 				    extent);
233 			}
234 		}
235 		if ((config_prof || config_stats) && locked_arena == arena) {
236 			if (config_prof) {
237 				idump = arena_prof_accum(tsd_tsdn(tsd), arena,
238 				    tcache->prof_accumbytes);
239 				tcache->prof_accumbytes = 0;
240 			}
241 #if defined(ANDROID_ENABLE_TCACHE_STATS)
242 			if (config_stats) {
243 				merged_stats = true;
244 				arena_stats_large_nrequests_add(tsd_tsdn(tsd),
245 				    &arena->stats, binind,
246 				    tbin->tstats.nrequests);
247 				tbin->tstats.nrequests = 0;
248 			}
249 #endif
250 		}
251 		malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx);
252 
253 		unsigned ndeferred = 0;
254 		for (unsigned i = 0; i < nflush; i++) {
255 			void *ptr = *(tbin->avail - 1 - i);
256 			extent = item_extent[i];
257 			assert(ptr != NULL && extent != NULL);
258 
259 			if (extent_arena_get(extent) == locked_arena) {
260 				large_dalloc_finish(tsd_tsdn(tsd), extent);
261 			} else {
262 				/*
263 				 * This object was allocated via a different
264 				 * arena than the one that is currently locked.
265 				 * Stash the object, so that it can be handled
266 				 * in a future pass.
267 				 */
268 				*(tbin->avail - 1 - ndeferred) = ptr;
269 				item_extent[ndeferred] = extent;
270 				ndeferred++;
271 			}
272 		}
273 		if (config_prof && idump) {
274 			prof_idump(tsd_tsdn(tsd));
275 		}
276 		arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
277 		    ndeferred);
278 		nflush = ndeferred;
279 	}
280 #if defined(ANDROID_ENABLE_TCACHE_STATS)
281 	if (config_stats && !merged_stats) {
282 		/*
283 		 * The flush loop didn't happen to flush to this thread's
284 		 * arena, so the stats didn't get merged.  Manually do so now.
285 		 */
286 		arena_stats_large_nrequests_add(tsd_tsdn(tsd), &arena->stats,
287 		    binind, tbin->tstats.nrequests);
288 		tbin->tstats.nrequests = 0;
289 	}
290 #endif
291 
292 	memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
293 	    sizeof(void *));
294 	tbin->ncached = rem;
295 	if (tbin->ncached < tbin->low_water) {
296 		tbin->low_water = tbin->ncached;
297 	}
298 }
299 
300 void
tcache_arena_associate(tsdn_t * tsdn,tcache_t * tcache,arena_t * arena)301 tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
302 	assert(tcache->arena == NULL);
303 	tcache->arena = arena;
304 
305 	if (config_stats) {
306 		/* Link into list of extant tcaches. */
307 		malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
308 
309 		ql_elm_new(tcache, link);
310 		ql_tail_insert(&arena->tcache_ql, tcache, link);
311 		cache_bin_array_descriptor_init(
312 		    &tcache->cache_bin_array_descriptor, tcache->bins_small,
313 		    tcache->bins_large);
314 		ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
315 		    &tcache->cache_bin_array_descriptor, link);
316 
317 		malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
318 	}
319 }
320 
321 static void
tcache_arena_dissociate(tsdn_t * tsdn,tcache_t * tcache)322 tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
323 	arena_t *arena = tcache->arena;
324 	assert(arena != NULL);
325 	if (config_stats) {
326 		/* Unlink from list of extant tcaches. */
327 		malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
328 		if (config_debug) {
329 			bool in_ql = false;
330 			tcache_t *iter;
331 			ql_foreach(iter, &arena->tcache_ql, link) {
332 				if (iter == tcache) {
333 					in_ql = true;
334 					break;
335 				}
336 			}
337 			assert(in_ql);
338 		}
339 		ql_remove(&arena->tcache_ql, tcache, link);
340 		ql_remove(&arena->cache_bin_array_descriptor_ql,
341 		    &tcache->cache_bin_array_descriptor, link);
342 		tcache_stats_merge(tsdn, tcache, arena);
343 		malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
344 	}
345 	tcache->arena = NULL;
346 }
347 
348 void
tcache_arena_reassociate(tsdn_t * tsdn,tcache_t * tcache,arena_t * arena)349 tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
350 	tcache_arena_dissociate(tsdn, tcache);
351 	tcache_arena_associate(tsdn, tcache, arena);
352 }
353 
354 bool
tsd_tcache_enabled_data_init(tsd_t * tsd)355 tsd_tcache_enabled_data_init(tsd_t *tsd) {
356 	/* Called upon tsd initialization. */
357 	tsd_tcache_enabled_set(tsd, opt_tcache);
358 	tsd_slow_update(tsd);
359 
360 	if (opt_tcache) {
361 		/* Trigger tcache init. */
362 		tsd_tcache_data_init(tsd);
363 	}
364 
365 	return false;
366 }
367 
368 /* Initialize auto tcache (embedded in TSD). */
369 static void
tcache_init(tsd_t * tsd,tcache_t * tcache,void * avail_stack)370 tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) {
371 	memset(&tcache->link, 0, sizeof(ql_elm(tcache_t)));
372 	tcache->prof_accumbytes = 0;
373 	tcache->next_gc_bin = 0;
374 	tcache->arena = NULL;
375 
376 	ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
377 
378 	size_t stack_offset = 0;
379 	assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
380 	memset(tcache->bins_small, 0, sizeof(cache_bin_t) * NBINS);
381 	memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - NBINS));
382 	unsigned i = 0;
383 	for (; i < NBINS; i++) {
384 		tcache->lg_fill_div[i] = 1;
385 		stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
386 		/*
387 		 * avail points past the available space.  Allocations will
388 		 * access the slots toward higher addresses (for the benefit of
389 		 * prefetch).
390 		 */
391 		tcache_small_bin_get(tcache, i)->avail =
392 		    (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
393 	}
394 	for (; i < nhbins; i++) {
395 		stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
396 		tcache_large_bin_get(tcache, i)->avail =
397 		    (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
398 	}
399 	assert(stack_offset == stack_nelms * sizeof(void *));
400 }
401 
402 /* Initialize auto tcache (embedded in TSD). */
403 bool
tsd_tcache_data_init(tsd_t * tsd)404 tsd_tcache_data_init(tsd_t *tsd) {
405 	tcache_t *tcache = tsd_tcachep_get_unsafe(tsd);
406 	assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
407 	size_t size = stack_nelms * sizeof(void *);
408 	/* Avoid false cacheline sharing. */
409 	size = sz_sa2u(size, CACHELINE);
410 
411 	void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true,
412 	    NULL, true, arena_get(TSDN_NULL, 0, true));
413 	if (avail_array == NULL) {
414 		return true;
415 	}
416 
417 	tcache_init(tsd, tcache, avail_array);
418 	/*
419 	 * Initialization is a bit tricky here.  After malloc init is done, all
420 	 * threads can rely on arena_choose and associate tcache accordingly.
421 	 * However, the thread that does actual malloc bootstrapping relies on
422 	 * functional tsd, and it can only rely on a0.  In that case, we
423 	 * associate its tcache to a0 temporarily, and later on
424 	 * arena_choose_hard() will re-associate properly.
425 	 */
426 	tcache->arena = NULL;
427 	arena_t *arena;
428 	if (!malloc_initialized()) {
429 		/* If in initialization, assign to a0. */
430 		arena = arena_get(tsd_tsdn(tsd), 0, false);
431 		tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
432 	} else {
433 		arena = arena_choose(tsd, NULL);
434 		/* This may happen if thread.tcache.enabled is used. */
435 		if (tcache->arena == NULL) {
436 			tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
437 		}
438 	}
439 	assert(arena == tcache->arena);
440 
441 	return false;
442 }
443 
444 /* Created manual tcache for tcache.create mallctl. */
445 tcache_t *
tcache_create_explicit(tsd_t * tsd)446 tcache_create_explicit(tsd_t *tsd) {
447 	tcache_t *tcache;
448 	size_t size, stack_offset;
449 
450 	size = sizeof(tcache_t);
451 	/* Naturally align the pointer stacks. */
452 	size = PTR_CEILING(size);
453 	stack_offset = size;
454 	size += stack_nelms * sizeof(void *);
455 	/* Avoid false cacheline sharing. */
456 	size = sz_sa2u(size, CACHELINE);
457 
458 	tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true,
459 	    arena_get(TSDN_NULL, 0, true));
460 	if (tcache == NULL) {
461 		return NULL;
462 	}
463 
464 	tcache_init(tsd, tcache,
465 	    (void *)((uintptr_t)tcache + (uintptr_t)stack_offset));
466 	tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL));
467 
468 	return tcache;
469 }
470 
471 static void
tcache_flush_cache(tsd_t * tsd,tcache_t * tcache)472 tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
473 	assert(tcache->arena != NULL);
474 
475 	for (unsigned i = 0; i < NBINS; i++) {
476 		cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
477 		tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
478 
479 #if defined(ANDROID_ENABLE_TCACHE_STATS)
480 		if (config_stats) {
481 			assert(tbin->tstats.nrequests == 0);
482 		}
483 #endif
484 	}
485 	for (unsigned i = NBINS; i < nhbins; i++) {
486 		cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
487 		tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
488 
489 #if defined(ANDROID_ENABLE_TCACHE_STATS)
490 		if (config_stats) {
491 			assert(tbin->tstats.nrequests == 0);
492 		}
493 #endif
494 	}
495 
496 	if (config_prof && tcache->prof_accumbytes > 0 &&
497 	    arena_prof_accum(tsd_tsdn(tsd), tcache->arena,
498 	    tcache->prof_accumbytes)) {
499 		prof_idump(tsd_tsdn(tsd));
500 	}
501 }
502 
503 void
tcache_flush(tsd_t * tsd)504 tcache_flush(tsd_t *tsd) {
505 	assert(tcache_available(tsd));
506 	tcache_flush_cache(tsd, tsd_tcachep_get(tsd));
507 }
508 
509 static void
tcache_destroy(tsd_t * tsd,tcache_t * tcache,bool tsd_tcache)510 tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
511 	tcache_flush_cache(tsd, tcache);
512 	tcache_arena_dissociate(tsd_tsdn(tsd), tcache);
513 
514 	if (tsd_tcache) {
515 		/* Release the avail array for the TSD embedded auto tcache. */
516 		void *avail_array =
517 		    (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail -
518 		    (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *));
519 		idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true);
520 	} else {
521 		/* Release both the tcache struct and avail array. */
522 		idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true);
523 	}
524 }
525 
526 /* For auto tcache (embedded in TSD) only. */
527 void
tcache_cleanup(tsd_t * tsd)528 tcache_cleanup(tsd_t *tsd) {
529 	tcache_t *tcache = tsd_tcachep_get(tsd);
530 	if (!tcache_available(tsd)) {
531 		assert(tsd_tcache_enabled_get(tsd) == false);
532 		if (config_debug) {
533 			assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
534 		}
535 		return;
536 	}
537 	assert(tsd_tcache_enabled_get(tsd));
538 	assert(tcache_small_bin_get(tcache, 0)->avail != NULL);
539 
540 	tcache_destroy(tsd, tcache, true);
541 	if (config_debug) {
542 		tcache_small_bin_get(tcache, 0)->avail = NULL;
543 	}
544 }
545 
546 void
tcache_stats_merge(tsdn_t * tsdn,tcache_t * tcache,arena_t * arena)547 tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
548 #if defined(ANDROID_ENABLE_TCACHE_STATS)
549 	unsigned i;
550 
551 	cassert(config_stats);
552 
553 	/* Merge and reset tcache stats. */
554 	for (i = 0; i < NBINS; i++) {
555 		bin_t *bin = &arena->bins[i];
556 		cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
557 		malloc_mutex_lock(tsdn, &bin->lock);
558 		bin->stats.nrequests += tbin->tstats.nrequests;
559 		malloc_mutex_unlock(tsdn, &bin->lock);
560 		tbin->tstats.nrequests = 0;
561 	}
562 
563 	for (; i < nhbins; i++) {
564 		cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
565 		arena_stats_large_nrequests_add(tsdn, &arena->stats, i,
566 		    tbin->tstats.nrequests);
567 		tbin->tstats.nrequests = 0;
568 	}
569 #endif
570 }
571 
572 static bool
tcaches_create_prep(tsd_t * tsd)573 tcaches_create_prep(tsd_t *tsd) {
574 	bool err;
575 
576 	malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
577 
578 	if (tcaches == NULL) {
579 		tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *)
580 		    * (MALLOCX_TCACHE_MAX+1), CACHELINE);
581 		if (tcaches == NULL) {
582 			err = true;
583 			goto label_return;
584 		}
585 	}
586 
587 	if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) {
588 		err = true;
589 		goto label_return;
590 	}
591 
592 	err = false;
593 label_return:
594 	malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
595 	return err;
596 }
597 
598 bool
tcaches_create(tsd_t * tsd,unsigned * r_ind)599 tcaches_create(tsd_t *tsd, unsigned *r_ind) {
600 	witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
601 
602 	bool err;
603 
604 	if (tcaches_create_prep(tsd)) {
605 		err = true;
606 		goto label_return;
607 	}
608 
609 	tcache_t *tcache = tcache_create_explicit(tsd);
610 	if (tcache == NULL) {
611 		err = true;
612 		goto label_return;
613 	}
614 
615 	tcaches_t *elm;
616 	malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
617 	if (tcaches_avail != NULL) {
618 		elm = tcaches_avail;
619 		tcaches_avail = tcaches_avail->next;
620 		elm->tcache = tcache;
621 		*r_ind = (unsigned)(elm - tcaches);
622 	} else {
623 		elm = &tcaches[tcaches_past];
624 		elm->tcache = tcache;
625 		*r_ind = tcaches_past;
626 		tcaches_past++;
627 	}
628 	malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
629 
630 	err = false;
631 label_return:
632 	witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
633 	return err;
634 }
635 
636 static tcache_t *
tcaches_elm_remove(tsd_t * tsd,tcaches_t * elm)637 tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm) {
638 	malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx);
639 
640 	if (elm->tcache == NULL) {
641 		return NULL;
642 	}
643 	tcache_t *tcache = elm->tcache;
644 	elm->tcache = NULL;
645 	return tcache;
646 }
647 
648 void
tcaches_flush(tsd_t * tsd,unsigned ind)649 tcaches_flush(tsd_t *tsd, unsigned ind) {
650 	malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
651 	tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind]);
652 	malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
653 	if (tcache != NULL) {
654 		tcache_destroy(tsd, tcache, false);
655 	}
656 }
657 
658 void
tcaches_destroy(tsd_t * tsd,unsigned ind)659 tcaches_destroy(tsd_t *tsd, unsigned ind) {
660 	malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
661 	tcaches_t *elm = &tcaches[ind];
662 	tcache_t *tcache = tcaches_elm_remove(tsd, elm);
663 	elm->next = tcaches_avail;
664 	tcaches_avail = elm;
665 	malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
666 	if (tcache != NULL) {
667 		tcache_destroy(tsd, tcache, false);
668 	}
669 }
670 
671 bool
tcache_boot(tsdn_t * tsdn)672 tcache_boot(tsdn_t *tsdn) {
673 	/* If necessary, clamp opt_lg_tcache_max. */
674 	if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) <
675 	    SMALL_MAXCLASS) {
676 		tcache_maxclass = SMALL_MAXCLASS;
677 	} else {
678 		tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
679 	}
680 
681 	if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES,
682 	    malloc_mutex_rank_exclusive)) {
683 		return true;
684 	}
685 
686 	nhbins = sz_size2index(tcache_maxclass) + 1;
687 
688 	/* Initialize tcache_bin_info. */
689 	tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
690 	    * sizeof(cache_bin_info_t), CACHELINE);
691 	if (tcache_bin_info == NULL) {
692 		return true;
693 	}
694 	stack_nelms = 0;
695 	unsigned i;
696 	for (i = 0; i < NBINS; i++) {
697 		if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
698 			tcache_bin_info[i].ncached_max =
699 			    TCACHE_NSLOTS_SMALL_MIN;
700 		} else if ((bin_infos[i].nregs << 1) <=
701 		    TCACHE_NSLOTS_SMALL_MAX) {
702 			tcache_bin_info[i].ncached_max =
703 			    (bin_infos[i].nregs << 1);
704 		} else {
705 			tcache_bin_info[i].ncached_max =
706 			    TCACHE_NSLOTS_SMALL_MAX;
707 		}
708 		stack_nelms += tcache_bin_info[i].ncached_max;
709 	}
710 	for (; i < nhbins; i++) {
711 		tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
712 		stack_nelms += tcache_bin_info[i].ncached_max;
713 	}
714 
715 	return false;
716 }
717 
718 void
tcache_prefork(tsdn_t * tsdn)719 tcache_prefork(tsdn_t *tsdn) {
720 	if (!config_prof && opt_tcache) {
721 		malloc_mutex_prefork(tsdn, &tcaches_mtx);
722 	}
723 }
724 
725 void
tcache_postfork_parent(tsdn_t * tsdn)726 tcache_postfork_parent(tsdn_t *tsdn) {
727 	if (!config_prof && opt_tcache) {
728 		malloc_mutex_postfork_parent(tsdn, &tcaches_mtx);
729 	}
730 }
731 
732 void
tcache_postfork_child(tsdn_t * tsdn)733 tcache_postfork_child(tsdn_t *tsdn) {
734 	if (!config_prof && opt_tcache) {
735 		malloc_mutex_postfork_child(tsdn, &tcaches_mtx);
736 	}
737 }
738