1 #define JEMALLOC_TCACHE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3
4 /******************************************************************************/
5 /* Data. */
6
7 malloc_tsd_data(, tcache, tcache_t *, NULL)
8 malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
9
10 bool opt_tcache = true;
11 ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
12
13 tcache_bin_info_t *tcache_bin_info;
14 static unsigned stack_nelms; /* Total stack elms per tcache. */
15
16 size_t nhbins;
17 size_t tcache_maxclass;
18
19 /******************************************************************************/
20
tcache_salloc(const void * ptr)21 size_t tcache_salloc(const void *ptr)
22 {
23
24 return (arena_salloc(ptr, false));
25 }
26
27 void
tcache_event_hard(tcache_t * tcache)28 tcache_event_hard(tcache_t *tcache)
29 {
30 size_t binind = tcache->next_gc_bin;
31 tcache_bin_t *tbin = &tcache->tbins[binind];
32 tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
33
34 if (tbin->low_water > 0) {
35 /*
36 * Flush (ceiling) 3/4 of the objects below the low water mark.
37 */
38 if (binind < NBINS) {
39 tcache_bin_flush_small(tbin, binind, tbin->ncached -
40 tbin->low_water + (tbin->low_water >> 2), tcache);
41 } else {
42 tcache_bin_flush_large(tbin, binind, tbin->ncached -
43 tbin->low_water + (tbin->low_water >> 2), tcache);
44 }
45 /*
46 * Reduce fill count by 2X. Limit lg_fill_div such that the
47 * fill count is always at least 1.
48 */
49 if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
50 tbin->lg_fill_div++;
51 } else if (tbin->low_water < 0) {
52 /*
53 * Increase fill count by 2X. Make sure lg_fill_div stays
54 * greater than 0.
55 */
56 if (tbin->lg_fill_div > 1)
57 tbin->lg_fill_div--;
58 }
59 tbin->low_water = tbin->ncached;
60
61 tcache->next_gc_bin++;
62 if (tcache->next_gc_bin == nhbins)
63 tcache->next_gc_bin = 0;
64 tcache->ev_cnt = 0;
65 }
66
67 void *
tcache_alloc_small_hard(tcache_t * tcache,tcache_bin_t * tbin,size_t binind)68 tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
69 {
70 void *ret;
71
72 arena_tcache_fill_small(tcache->arena, tbin, binind,
73 config_prof ? tcache->prof_accumbytes : 0);
74 if (config_prof)
75 tcache->prof_accumbytes = 0;
76 ret = tcache_alloc_easy(tbin);
77
78 return (ret);
79 }
80
81 void
tcache_bin_flush_small(tcache_bin_t * tbin,size_t binind,unsigned rem,tcache_t * tcache)82 tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
83 tcache_t *tcache)
84 {
85 void *ptr;
86 unsigned i, nflush, ndeferred;
87 bool merged_stats = false;
88
89 assert(binind < NBINS);
90 assert(rem <= tbin->ncached);
91
92 for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
93 /* Lock the arena bin associated with the first object. */
94 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
95 tbin->avail[0]);
96 arena_t *arena = chunk->arena;
97 arena_bin_t *bin = &arena->bins[binind];
98
99 if (config_prof && arena == tcache->arena) {
100 if (arena_prof_accum(arena, tcache->prof_accumbytes))
101 prof_idump();
102 tcache->prof_accumbytes = 0;
103 }
104
105 malloc_mutex_lock(&bin->lock);
106 if (config_stats && arena == tcache->arena) {
107 assert(merged_stats == false);
108 merged_stats = true;
109 bin->stats.nflushes++;
110 bin->stats.nrequests += tbin->tstats.nrequests;
111 tbin->tstats.nrequests = 0;
112 }
113 ndeferred = 0;
114 for (i = 0; i < nflush; i++) {
115 ptr = tbin->avail[i];
116 assert(ptr != NULL);
117 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
118 if (chunk->arena == arena) {
119 size_t pageind = ((uintptr_t)ptr -
120 (uintptr_t)chunk) >> LG_PAGE;
121 arena_chunk_map_t *mapelm =
122 arena_mapp_get(chunk, pageind);
123 if (config_fill && opt_junk) {
124 arena_alloc_junk_small(ptr,
125 &arena_bin_info[binind], true);
126 }
127 arena_dalloc_bin_locked(arena, chunk, ptr,
128 mapelm);
129 } else {
130 /*
131 * This object was allocated via a different
132 * arena bin than the one that is currently
133 * locked. Stash the object, so that it can be
134 * handled in a future pass.
135 */
136 tbin->avail[ndeferred] = ptr;
137 ndeferred++;
138 }
139 }
140 malloc_mutex_unlock(&bin->lock);
141 }
142 if (config_stats && merged_stats == false) {
143 /*
144 * The flush loop didn't happen to flush to this thread's
145 * arena, so the stats didn't get merged. Manually do so now.
146 */
147 arena_bin_t *bin = &tcache->arena->bins[binind];
148 malloc_mutex_lock(&bin->lock);
149 bin->stats.nflushes++;
150 bin->stats.nrequests += tbin->tstats.nrequests;
151 tbin->tstats.nrequests = 0;
152 malloc_mutex_unlock(&bin->lock);
153 }
154
155 memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
156 rem * sizeof(void *));
157 tbin->ncached = rem;
158 if ((int)tbin->ncached < tbin->low_water)
159 tbin->low_water = tbin->ncached;
160 }
161
162 void
tcache_bin_flush_large(tcache_bin_t * tbin,size_t binind,unsigned rem,tcache_t * tcache)163 tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
164 tcache_t *tcache)
165 {
166 void *ptr;
167 unsigned i, nflush, ndeferred;
168 bool merged_stats = false;
169
170 assert(binind < nhbins);
171 assert(rem <= tbin->ncached);
172
173 for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
174 /* Lock the arena associated with the first object. */
175 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
176 tbin->avail[0]);
177 arena_t *arena = chunk->arena;
178 UNUSED bool idump;
179
180 if (config_prof)
181 idump = false;
182 malloc_mutex_lock(&arena->lock);
183 if ((config_prof || config_stats) && arena == tcache->arena) {
184 if (config_prof) {
185 idump = arena_prof_accum_locked(arena,
186 tcache->prof_accumbytes);
187 tcache->prof_accumbytes = 0;
188 }
189 if (config_stats) {
190 merged_stats = true;
191 arena->stats.nrequests_large +=
192 tbin->tstats.nrequests;
193 arena->stats.lstats[binind - NBINS].nrequests +=
194 tbin->tstats.nrequests;
195 tbin->tstats.nrequests = 0;
196 }
197 }
198 ndeferred = 0;
199 for (i = 0; i < nflush; i++) {
200 ptr = tbin->avail[i];
201 assert(ptr != NULL);
202 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
203 if (chunk->arena == arena)
204 arena_dalloc_large_locked(arena, chunk, ptr);
205 else {
206 /*
207 * This object was allocated via a different
208 * arena than the one that is currently locked.
209 * Stash the object, so that it can be handled
210 * in a future pass.
211 */
212 tbin->avail[ndeferred] = ptr;
213 ndeferred++;
214 }
215 }
216 malloc_mutex_unlock(&arena->lock);
217 if (config_prof && idump)
218 prof_idump();
219 }
220 if (config_stats && merged_stats == false) {
221 /*
222 * The flush loop didn't happen to flush to this thread's
223 * arena, so the stats didn't get merged. Manually do so now.
224 */
225 arena_t *arena = tcache->arena;
226 malloc_mutex_lock(&arena->lock);
227 arena->stats.nrequests_large += tbin->tstats.nrequests;
228 arena->stats.lstats[binind - NBINS].nrequests +=
229 tbin->tstats.nrequests;
230 tbin->tstats.nrequests = 0;
231 malloc_mutex_unlock(&arena->lock);
232 }
233
234 memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
235 rem * sizeof(void *));
236 tbin->ncached = rem;
237 if ((int)tbin->ncached < tbin->low_water)
238 tbin->low_water = tbin->ncached;
239 }
240
241 void
tcache_arena_associate(tcache_t * tcache,arena_t * arena)242 tcache_arena_associate(tcache_t *tcache, arena_t *arena)
243 {
244
245 if (config_stats) {
246 /* Link into list of extant tcaches. */
247 malloc_mutex_lock(&arena->lock);
248 ql_elm_new(tcache, link);
249 ql_tail_insert(&arena->tcache_ql, tcache, link);
250 malloc_mutex_unlock(&arena->lock);
251 }
252 tcache->arena = arena;
253 }
254
255 void
tcache_arena_dissociate(tcache_t * tcache)256 tcache_arena_dissociate(tcache_t *tcache)
257 {
258
259 if (config_stats) {
260 /* Unlink from list of extant tcaches. */
261 malloc_mutex_lock(&tcache->arena->lock);
262 ql_remove(&tcache->arena->tcache_ql, tcache, link);
263 tcache_stats_merge(tcache, tcache->arena);
264 malloc_mutex_unlock(&tcache->arena->lock);
265 }
266 }
267
268 tcache_t *
tcache_get_hard(tcache_t * tcache,bool create)269 tcache_get_hard(tcache_t *tcache, bool create)
270 {
271
272 if (tcache == NULL) {
273 if (create == false) {
274 /*
275 * Creating a tcache here would cause
276 * allocation as a side effect of free().
277 * Ordinarily that would be okay since
278 * tcache_create() failure is a soft failure
279 * that doesn't propagate. However, if TLS
280 * data are freed via free() as in glibc,
281 * subtle corruption could result from setting
282 * a TLS variable after its backing memory is
283 * freed.
284 */
285 return (NULL);
286 }
287 if (tcache_enabled_get() == false) {
288 tcache_enabled_set(false); /* Memoize. */
289 return (NULL);
290 }
291 return (tcache_create(choose_arena(NULL)));
292 }
293 if (tcache == TCACHE_STATE_PURGATORY) {
294 /*
295 * Make a note that an allocator function was called
296 * after tcache_thread_cleanup() was called.
297 */
298 tcache = TCACHE_STATE_REINCARNATED;
299 tcache_tsd_set(&tcache);
300 return (NULL);
301 }
302 if (tcache == TCACHE_STATE_REINCARNATED)
303 return (NULL);
304 not_reached();
305 return (NULL);
306 }
307
308 tcache_t *
tcache_create(arena_t * arena)309 tcache_create(arena_t *arena)
310 {
311 tcache_t *tcache;
312 size_t size, stack_offset;
313 unsigned i;
314
315 size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
316 /* Naturally align the pointer stacks. */
317 size = PTR_CEILING(size);
318 stack_offset = size;
319 size += stack_nelms * sizeof(void *);
320 /*
321 * Round up to the nearest multiple of the cacheline size, in order to
322 * avoid the possibility of false cacheline sharing.
323 *
324 * That this works relies on the same logic as in ipalloc(), but we
325 * cannot directly call ipalloc() here due to tcache bootstrapping
326 * issues.
327 */
328 size = (size + CACHELINE_MASK) & (-CACHELINE);
329
330 if (size <= SMALL_MAXCLASS)
331 tcache = (tcache_t *)arena_malloc_small(arena, size, true);
332 else if (size <= tcache_maxclass)
333 tcache = (tcache_t *)arena_malloc_large(arena, size, true);
334 else
335 tcache = (tcache_t *)icalloct(size, false, arena);
336
337 if (tcache == NULL)
338 return (NULL);
339
340 tcache_arena_associate(tcache, arena);
341
342 assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
343 for (i = 0; i < nhbins; i++) {
344 tcache->tbins[i].lg_fill_div = 1;
345 tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
346 (uintptr_t)stack_offset);
347 stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
348 }
349
350 tcache_tsd_set(&tcache);
351
352 return (tcache);
353 }
354
355 void
tcache_destroy(tcache_t * tcache)356 tcache_destroy(tcache_t *tcache)
357 {
358 unsigned i;
359 size_t tcache_size;
360
361 tcache_arena_dissociate(tcache);
362
363 for (i = 0; i < NBINS; i++) {
364 tcache_bin_t *tbin = &tcache->tbins[i];
365 tcache_bin_flush_small(tbin, i, 0, tcache);
366
367 if (config_stats && tbin->tstats.nrequests != 0) {
368 arena_t *arena = tcache->arena;
369 arena_bin_t *bin = &arena->bins[i];
370 malloc_mutex_lock(&bin->lock);
371 bin->stats.nrequests += tbin->tstats.nrequests;
372 malloc_mutex_unlock(&bin->lock);
373 }
374 }
375
376 for (; i < nhbins; i++) {
377 tcache_bin_t *tbin = &tcache->tbins[i];
378 tcache_bin_flush_large(tbin, i, 0, tcache);
379
380 if (config_stats && tbin->tstats.nrequests != 0) {
381 arena_t *arena = tcache->arena;
382 malloc_mutex_lock(&arena->lock);
383 arena->stats.nrequests_large += tbin->tstats.nrequests;
384 arena->stats.lstats[i - NBINS].nrequests +=
385 tbin->tstats.nrequests;
386 malloc_mutex_unlock(&arena->lock);
387 }
388 }
389
390 if (config_prof && tcache->prof_accumbytes > 0 &&
391 arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
392 prof_idump();
393
394 tcache_size = arena_salloc(tcache, false);
395 if (tcache_size <= SMALL_MAXCLASS) {
396 arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
397 arena_t *arena = chunk->arena;
398 size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
399 LG_PAGE;
400 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
401
402 arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
403 } else if (tcache_size <= tcache_maxclass) {
404 arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
405 arena_t *arena = chunk->arena;
406
407 arena_dalloc_large(arena, chunk, tcache);
408 } else
409 idalloct(tcache, false);
410 }
411
412 void
tcache_thread_cleanup(void * arg)413 tcache_thread_cleanup(void *arg)
414 {
415 tcache_t *tcache = *(tcache_t **)arg;
416
417 if (tcache == TCACHE_STATE_DISABLED) {
418 /* Do nothing. */
419 } else if (tcache == TCACHE_STATE_REINCARNATED) {
420 /*
421 * Another destructor called an allocator function after this
422 * destructor was called. Reset tcache to
423 * TCACHE_STATE_PURGATORY in order to receive another callback.
424 */
425 tcache = TCACHE_STATE_PURGATORY;
426 tcache_tsd_set(&tcache);
427 } else if (tcache == TCACHE_STATE_PURGATORY) {
428 /*
429 * The previous time this destructor was called, we set the key
430 * to TCACHE_STATE_PURGATORY so that other destructors wouldn't
431 * cause re-creation of the tcache. This time, do nothing, so
432 * that the destructor will not be called again.
433 */
434 } else if (tcache != NULL) {
435 assert(tcache != TCACHE_STATE_PURGATORY);
436 tcache_destroy(tcache);
437 tcache = TCACHE_STATE_PURGATORY;
438 tcache_tsd_set(&tcache);
439 }
440 }
441
442 /* Caller must own arena->lock. */
443 void
tcache_stats_merge(tcache_t * tcache,arena_t * arena)444 tcache_stats_merge(tcache_t *tcache, arena_t *arena)
445 {
446 unsigned i;
447
448 cassert(config_stats);
449
450 /* Merge and reset tcache stats. */
451 for (i = 0; i < NBINS; i++) {
452 arena_bin_t *bin = &arena->bins[i];
453 tcache_bin_t *tbin = &tcache->tbins[i];
454 malloc_mutex_lock(&bin->lock);
455 bin->stats.nrequests += tbin->tstats.nrequests;
456 malloc_mutex_unlock(&bin->lock);
457 tbin->tstats.nrequests = 0;
458 }
459
460 for (; i < nhbins; i++) {
461 malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
462 tcache_bin_t *tbin = &tcache->tbins[i];
463 arena->stats.nrequests_large += tbin->tstats.nrequests;
464 lstats->nrequests += tbin->tstats.nrequests;
465 tbin->tstats.nrequests = 0;
466 }
467 }
468
469 bool
tcache_boot0(void)470 tcache_boot0(void)
471 {
472 unsigned i;
473
474 /*
475 * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
476 * known.
477 */
478 if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
479 tcache_maxclass = SMALL_MAXCLASS;
480 else if ((1U << opt_lg_tcache_max) > arena_maxclass)
481 tcache_maxclass = arena_maxclass;
482 else
483 tcache_maxclass = (1U << opt_lg_tcache_max);
484
485 nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
486
487 /* Initialize tcache_bin_info. */
488 tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
489 sizeof(tcache_bin_info_t));
490 if (tcache_bin_info == NULL)
491 return (true);
492 stack_nelms = 0;
493 for (i = 0; i < NBINS; i++) {
494 if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
495 tcache_bin_info[i].ncached_max =
496 (arena_bin_info[i].nregs << 1);
497 } else {
498 tcache_bin_info[i].ncached_max =
499 TCACHE_NSLOTS_SMALL_MAX;
500 }
501 stack_nelms += tcache_bin_info[i].ncached_max;
502 }
503 for (; i < nhbins; i++) {
504 tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
505 stack_nelms += tcache_bin_info[i].ncached_max;
506 }
507
508 return (false);
509 }
510
511 bool
tcache_boot1(void)512 tcache_boot1(void)
513 {
514
515 if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
516 return (true);
517
518 return (false);
519 }
520