• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define	JEMALLOC_TCACHE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 bool	opt_tcache = true;
8 ssize_t	opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
9 
10 tcache_bin_info_t	*tcache_bin_info;
11 static unsigned		stack_nelms; /* Total stack elms per tcache. */
12 
13 unsigned		nhbins;
14 size_t			tcache_maxclass;
15 
16 tcaches_t		*tcaches;
17 
18 /* Index of first element within tcaches that has never been used. */
19 static unsigned		tcaches_past;
20 
21 /* Head of singly linked list tracking available tcaches elements. */
22 static tcaches_t	*tcaches_avail;
23 
24 /******************************************************************************/
25 
tcache_salloc(const void * ptr)26 size_t	tcache_salloc(const void *ptr)
27 {
28 
29 	return (arena_salloc(ptr, false));
30 }
31 
32 void
tcache_event_hard(tsd_t * tsd,tcache_t * tcache)33 tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
34 {
35 	szind_t binind = tcache->next_gc_bin;
36 	tcache_bin_t *tbin = &tcache->tbins[binind];
37 	tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
38 
39 	if (tbin->low_water > 0) {
40 		/*
41 		 * Flush (ceiling) 3/4 of the objects below the low water mark.
42 		 */
43 		if (binind < NBINS) {
44 			tcache_bin_flush_small(tsd, tcache, tbin, binind,
45 			    tbin->ncached - tbin->low_water + (tbin->low_water
46 			    >> 2));
47 		} else {
48 			tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
49 			    - tbin->low_water + (tbin->low_water >> 2), tcache);
50 		}
51 		/*
52 		 * Reduce fill count by 2X.  Limit lg_fill_div such that the
53 		 * fill count is always at least 1.
54 		 */
55 		if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
56 			tbin->lg_fill_div++;
57 	} else if (tbin->low_water < 0) {
58 		/*
59 		 * Increase fill count by 2X.  Make sure lg_fill_div stays
60 		 * greater than 0.
61 		 */
62 		if (tbin->lg_fill_div > 1)
63 			tbin->lg_fill_div--;
64 	}
65 	tbin->low_water = tbin->ncached;
66 
67 	tcache->next_gc_bin++;
68 	if (tcache->next_gc_bin == nhbins)
69 		tcache->next_gc_bin = 0;
70 }
71 
72 void *
tcache_alloc_small_hard(tsd_t * tsd,arena_t * arena,tcache_t * tcache,tcache_bin_t * tbin,szind_t binind,bool * tcache_success)73 tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
74     tcache_bin_t *tbin, szind_t binind, bool *tcache_success)
75 {
76 	void *ret;
77 
78 	arena_tcache_fill_small(tsd, arena, tbin, binind, config_prof ?
79 	    tcache->prof_accumbytes : 0);
80 	if (config_prof)
81 		tcache->prof_accumbytes = 0;
82 	ret = tcache_alloc_easy(tbin, tcache_success);
83 
84 	return (ret);
85 }
86 
87 void
tcache_bin_flush_small(tsd_t * tsd,tcache_t * tcache,tcache_bin_t * tbin,szind_t binind,unsigned rem)88 tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
89     szind_t binind, unsigned rem)
90 {
91 	arena_t *arena;
92 	void *ptr;
93 	unsigned i, nflush, ndeferred;
94 	bool merged_stats = false;
95 
96 	assert(binind < NBINS);
97 	assert(rem <= tbin->ncached);
98 
99 	arena = arena_choose(tsd, NULL);
100 	assert(arena != NULL);
101 	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
102 		/* Lock the arena bin associated with the first object. */
103 		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
104 		    *(tbin->avail - 1));
105 		arena_t *bin_arena = extent_node_arena_get(&chunk->node);
106 		arena_bin_t *bin = &bin_arena->bins[binind];
107 
108 		if (config_prof && bin_arena == arena) {
109 			if (arena_prof_accum(arena, tcache->prof_accumbytes))
110 				prof_idump();
111 			tcache->prof_accumbytes = 0;
112 		}
113 
114 		malloc_mutex_lock(&bin->lock);
115 		if (config_stats && bin_arena == arena) {
116 			assert(!merged_stats);
117 			merged_stats = true;
118 			bin->stats.nflushes++;
119 			bin->stats.nrequests += tbin->tstats.nrequests;
120 			tbin->tstats.nrequests = 0;
121 		}
122 		ndeferred = 0;
123 		for (i = 0; i < nflush; i++) {
124 			ptr = *(tbin->avail - 1 - i);
125 			assert(ptr != NULL);
126 			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
127 			if (extent_node_arena_get(&chunk->node) == bin_arena) {
128 				size_t pageind = ((uintptr_t)ptr -
129 				    (uintptr_t)chunk) >> LG_PAGE;
130 				arena_chunk_map_bits_t *bitselm =
131 				    arena_bitselm_get(chunk, pageind);
132 				arena_dalloc_bin_junked_locked(bin_arena, chunk,
133 				    ptr, bitselm);
134 			} else {
135 				/*
136 				 * This object was allocated via a different
137 				 * arena bin than the one that is currently
138 				 * locked.  Stash the object, so that it can be
139 				 * handled in a future pass.
140 				 */
141 				*(tbin->avail - 1 - ndeferred) = ptr;
142 				ndeferred++;
143 			}
144 		}
145 		malloc_mutex_unlock(&bin->lock);
146 		arena_decay_ticks(tsd, bin_arena, nflush - ndeferred);
147 	}
148 	if (config_stats && !merged_stats) {
149 		/*
150 		 * The flush loop didn't happen to flush to this thread's
151 		 * arena, so the stats didn't get merged.  Manually do so now.
152 		 */
153 		arena_bin_t *bin = &arena->bins[binind];
154 		malloc_mutex_lock(&bin->lock);
155 		bin->stats.nflushes++;
156 		bin->stats.nrequests += tbin->tstats.nrequests;
157 		tbin->tstats.nrequests = 0;
158 		malloc_mutex_unlock(&bin->lock);
159 	}
160 
161 	memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
162 	    sizeof(void *));
163 	tbin->ncached = rem;
164 	if ((int)tbin->ncached < tbin->low_water)
165 		tbin->low_water = tbin->ncached;
166 }
167 
168 void
tcache_bin_flush_large(tsd_t * tsd,tcache_bin_t * tbin,szind_t binind,unsigned rem,tcache_t * tcache)169 tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
170     unsigned rem, tcache_t *tcache)
171 {
172 	arena_t *arena;
173 	void *ptr;
174 	unsigned i, nflush, ndeferred;
175 	bool merged_stats = false;
176 
177 	assert(binind < nhbins);
178 	assert(rem <= tbin->ncached);
179 
180 	arena = arena_choose(tsd, NULL);
181 	assert(arena != NULL);
182 	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
183 		/* Lock the arena associated with the first object. */
184 		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
185 		    *(tbin->avail - 1));
186 		arena_t *locked_arena = extent_node_arena_get(&chunk->node);
187 		UNUSED bool idump;
188 
189 		if (config_prof)
190 			idump = false;
191 		malloc_mutex_lock(&locked_arena->lock);
192 		if ((config_prof || config_stats) && locked_arena == arena) {
193 			if (config_prof) {
194 				idump = arena_prof_accum_locked(arena,
195 				    tcache->prof_accumbytes);
196 				tcache->prof_accumbytes = 0;
197 			}
198 			if (config_stats) {
199 				merged_stats = true;
200 				arena->stats.nrequests_large +=
201 				    tbin->tstats.nrequests;
202 				arena->stats.lstats[binind - NBINS].nrequests +=
203 				    tbin->tstats.nrequests;
204 				tbin->tstats.nrequests = 0;
205 			}
206 		}
207 		ndeferred = 0;
208 		for (i = 0; i < nflush; i++) {
209 			ptr = *(tbin->avail - 1 - i);
210 			assert(ptr != NULL);
211 			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
212 			if (extent_node_arena_get(&chunk->node) ==
213 			    locked_arena) {
214 				arena_dalloc_large_junked_locked(locked_arena,
215 				    chunk, ptr);
216 			} else {
217 				/*
218 				 * This object was allocated via a different
219 				 * arena than the one that is currently locked.
220 				 * Stash the object, so that it can be handled
221 				 * in a future pass.
222 				 */
223 				*(tbin->avail - 1 - ndeferred) = ptr;
224 				ndeferred++;
225 			}
226 		}
227 		malloc_mutex_unlock(&locked_arena->lock);
228 		if (config_prof && idump)
229 			prof_idump();
230 		arena_decay_ticks(tsd, locked_arena, nflush - ndeferred);
231 	}
232 	if (config_stats && !merged_stats) {
233 		/*
234 		 * The flush loop didn't happen to flush to this thread's
235 		 * arena, so the stats didn't get merged.  Manually do so now.
236 		 */
237 		malloc_mutex_lock(&arena->lock);
238 		arena->stats.nrequests_large += tbin->tstats.nrequests;
239 		arena->stats.lstats[binind - NBINS].nrequests +=
240 		    tbin->tstats.nrequests;
241 		tbin->tstats.nrequests = 0;
242 		malloc_mutex_unlock(&arena->lock);
243 	}
244 
245 	memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
246 	    sizeof(void *));
247 	tbin->ncached = rem;
248 	if ((int)tbin->ncached < tbin->low_water)
249 		tbin->low_water = tbin->ncached;
250 }
251 
252 void
tcache_arena_associate(tcache_t * tcache,arena_t * arena)253 tcache_arena_associate(tcache_t *tcache, arena_t *arena)
254 {
255 
256 	if (config_stats) {
257 		/* Link into list of extant tcaches. */
258 		malloc_mutex_lock(&arena->lock);
259 		ql_elm_new(tcache, link);
260 		ql_tail_insert(&arena->tcache_ql, tcache, link);
261 		malloc_mutex_unlock(&arena->lock);
262 	}
263 }
264 
265 void
tcache_arena_reassociate(tcache_t * tcache,arena_t * oldarena,arena_t * newarena)266 tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
267 {
268 
269 	tcache_arena_dissociate(tcache, oldarena);
270 	tcache_arena_associate(tcache, newarena);
271 }
272 
273 void
tcache_arena_dissociate(tcache_t * tcache,arena_t * arena)274 tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
275 {
276 
277 	if (config_stats) {
278 		/* Unlink from list of extant tcaches. */
279 		malloc_mutex_lock(&arena->lock);
280 		if (config_debug) {
281 			bool in_ql = false;
282 			tcache_t *iter;
283 			ql_foreach(iter, &arena->tcache_ql, link) {
284 				if (iter == tcache) {
285 					in_ql = true;
286 					break;
287 				}
288 			}
289 			assert(in_ql);
290 		}
291 		ql_remove(&arena->tcache_ql, tcache, link);
292 		tcache_stats_merge(tcache, arena);
293 		malloc_mutex_unlock(&arena->lock);
294 	}
295 }
296 
297 tcache_t *
tcache_get_hard(tsd_t * tsd)298 tcache_get_hard(tsd_t *tsd)
299 {
300 	arena_t *arena;
301 
302 	if (!tcache_enabled_get()) {
303 		if (tsd_nominal(tsd))
304 			tcache_enabled_set(false); /* Memoize. */
305 		return (NULL);
306 	}
307 	arena = arena_choose(tsd, NULL);
308 	if (unlikely(arena == NULL))
309 		return (NULL);
310 	return (tcache_create(tsd, arena));
311 }
312 
313 tcache_t *
tcache_create(tsd_t * tsd,arena_t * arena)314 tcache_create(tsd_t *tsd, arena_t *arena)
315 {
316 	tcache_t *tcache;
317 	size_t size, stack_offset;
318 	unsigned i;
319 
320 	size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
321 	/* Naturally align the pointer stacks. */
322 	size = PTR_CEILING(size);
323 	stack_offset = size;
324 	size += stack_nelms * sizeof(void *);
325 	/* Avoid false cacheline sharing. */
326 	size = sa2u(size, CACHELINE);
327 
328 	tcache = ipallocztm(tsd, size, CACHELINE, true, false, true,
329 	    arena_get(0, false));
330 	if (tcache == NULL)
331 		return (NULL);
332 
333 	tcache_arena_associate(tcache, arena);
334 
335 	ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
336 
337 	assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
338 	for (i = 0; i < nhbins; i++) {
339 		tcache->tbins[i].lg_fill_div = 1;
340 		stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
341 		/*
342 		 * avail points past the available space.  Allocations will
343 		 * access the slots toward higher addresses (for the benefit of
344 		 * prefetch).
345 		 */
346 		tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
347 		    (uintptr_t)stack_offset);
348 	}
349 
350 	return (tcache);
351 }
352 
353 static void
tcache_destroy(tsd_t * tsd,tcache_t * tcache)354 tcache_destroy(tsd_t *tsd, tcache_t *tcache)
355 {
356 	arena_t *arena;
357 	unsigned i;
358 
359 	arena = arena_choose(tsd, NULL);
360 	tcache_arena_dissociate(tcache, arena);
361 
362 	for (i = 0; i < NBINS; i++) {
363 		tcache_bin_t *tbin = &tcache->tbins[i];
364 		tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
365 
366 		if (config_stats && tbin->tstats.nrequests != 0) {
367 			arena_bin_t *bin = &arena->bins[i];
368 			malloc_mutex_lock(&bin->lock);
369 			bin->stats.nrequests += tbin->tstats.nrequests;
370 			malloc_mutex_unlock(&bin->lock);
371 		}
372 	}
373 
374 	for (; i < nhbins; i++) {
375 		tcache_bin_t *tbin = &tcache->tbins[i];
376 		tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
377 
378 		if (config_stats && tbin->tstats.nrequests != 0) {
379 			malloc_mutex_lock(&arena->lock);
380 			arena->stats.nrequests_large += tbin->tstats.nrequests;
381 			arena->stats.lstats[i - NBINS].nrequests +=
382 			    tbin->tstats.nrequests;
383 			malloc_mutex_unlock(&arena->lock);
384 		}
385 	}
386 
387 	if (config_prof && tcache->prof_accumbytes > 0 &&
388 	    arena_prof_accum(arena, tcache->prof_accumbytes))
389 		prof_idump();
390 
391 	idalloctm(tsd, tcache, false, true, true);
392 }
393 
394 void
tcache_cleanup(tsd_t * tsd)395 tcache_cleanup(tsd_t *tsd)
396 {
397 	tcache_t *tcache;
398 
399 	if (!config_tcache)
400 		return;
401 
402 	if ((tcache = tsd_tcache_get(tsd)) != NULL) {
403 		tcache_destroy(tsd, tcache);
404 		tsd_tcache_set(tsd, NULL);
405 	}
406 }
407 
408 void
tcache_enabled_cleanup(tsd_t * tsd)409 tcache_enabled_cleanup(tsd_t *tsd)
410 {
411 
412 	/* Do nothing. */
413 }
414 
415 /* Caller must own arena->lock. */
416 void
tcache_stats_merge(tcache_t * tcache,arena_t * arena)417 tcache_stats_merge(tcache_t *tcache, arena_t *arena)
418 {
419 	unsigned i;
420 
421 	cassert(config_stats);
422 
423 	/* Merge and reset tcache stats. */
424 	for (i = 0; i < NBINS; i++) {
425 		arena_bin_t *bin = &arena->bins[i];
426 		tcache_bin_t *tbin = &tcache->tbins[i];
427 		malloc_mutex_lock(&bin->lock);
428 		bin->stats.nrequests += tbin->tstats.nrequests;
429 		malloc_mutex_unlock(&bin->lock);
430 		tbin->tstats.nrequests = 0;
431 	}
432 
433 	for (; i < nhbins; i++) {
434 		malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
435 		tcache_bin_t *tbin = &tcache->tbins[i];
436 		arena->stats.nrequests_large += tbin->tstats.nrequests;
437 		lstats->nrequests += tbin->tstats.nrequests;
438 		tbin->tstats.nrequests = 0;
439 	}
440 }
441 
442 bool
tcaches_create(tsd_t * tsd,unsigned * r_ind)443 tcaches_create(tsd_t *tsd, unsigned *r_ind)
444 {
445 	tcache_t *tcache;
446 	tcaches_t *elm;
447 
448 	if (tcaches == NULL) {
449 		tcaches = base_alloc(sizeof(tcache_t *) *
450 		    (MALLOCX_TCACHE_MAX+1));
451 		if (tcaches == NULL)
452 			return (true);
453 	}
454 
455 	if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
456 		return (true);
457 	tcache = tcache_create(tsd, arena_get(0, false));
458 	if (tcache == NULL)
459 		return (true);
460 
461 	if (tcaches_avail != NULL) {
462 		elm = tcaches_avail;
463 		tcaches_avail = tcaches_avail->next;
464 		elm->tcache = tcache;
465 		*r_ind = (unsigned)(elm - tcaches);
466 	} else {
467 		elm = &tcaches[tcaches_past];
468 		elm->tcache = tcache;
469 		*r_ind = tcaches_past;
470 		tcaches_past++;
471 	}
472 
473 	return (false);
474 }
475 
476 static void
tcaches_elm_flush(tsd_t * tsd,tcaches_t * elm)477 tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm)
478 {
479 
480 	if (elm->tcache == NULL)
481 		return;
482 	tcache_destroy(tsd, elm->tcache);
483 	elm->tcache = NULL;
484 }
485 
486 void
tcaches_flush(tsd_t * tsd,unsigned ind)487 tcaches_flush(tsd_t *tsd, unsigned ind)
488 {
489 
490 	tcaches_elm_flush(tsd, &tcaches[ind]);
491 }
492 
493 void
tcaches_destroy(tsd_t * tsd,unsigned ind)494 tcaches_destroy(tsd_t *tsd, unsigned ind)
495 {
496 	tcaches_t *elm = &tcaches[ind];
497 	tcaches_elm_flush(tsd, elm);
498 	elm->next = tcaches_avail;
499 	tcaches_avail = elm;
500 }
501 
502 bool
tcache_boot(void)503 tcache_boot(void)
504 {
505 	unsigned i;
506 
507 	/*
508 	 * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
509 	 * known.
510 	 */
511 	if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
512 		tcache_maxclass = SMALL_MAXCLASS;
513 	else if ((1U << opt_lg_tcache_max) > large_maxclass)
514 		tcache_maxclass = large_maxclass;
515 	else
516 		tcache_maxclass = (1U << opt_lg_tcache_max);
517 
518 	nhbins = size2index(tcache_maxclass) + 1;
519 
520 	/* Initialize tcache_bin_info. */
521 	tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
522 	    sizeof(tcache_bin_info_t));
523 	if (tcache_bin_info == NULL)
524 		return (true);
525 	stack_nelms = 0;
526 	for (i = 0; i < NBINS; i++) {
527 		if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
528 			tcache_bin_info[i].ncached_max =
529 			    TCACHE_NSLOTS_SMALL_MIN;
530 		} else if ((arena_bin_info[i].nregs << 1) <=
531 		    TCACHE_NSLOTS_SMALL_MAX) {
532 			tcache_bin_info[i].ncached_max =
533 			    (arena_bin_info[i].nregs << 1);
534 		} else {
535 			tcache_bin_info[i].ncached_max =
536 			    TCACHE_NSLOTS_SMALL_MAX;
537 		}
538 		stack_nelms += tcache_bin_info[i].ncached_max;
539 	}
540 	for (; i < nhbins; i++) {
541 		tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
542 		stack_nelms += tcache_bin_info[i].ncached_max;
543 	}
544 
545 	return (false);
546 }
547