1 /******************************************************************************/
2 #ifdef JEMALLOC_H_TYPES
3
4 typedef struct tcache_bin_info_s tcache_bin_info_t;
5 typedef struct tcache_bin_s tcache_bin_t;
6 typedef struct tcache_s tcache_t;
7
8 /*
9 * tcache pointers close to NULL are used to encode state information that is
10 * used for two purposes: preventing thread caching on a per thread basis and
11 * cleaning up during thread shutdown.
12 */
13 #define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
14 #define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
15 #define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
16 #define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
17
18 /*
19 * Absolute maximum number of cache slots for each small bin in the thread
20 * cache. This is an additional constraint beyond that imposed as: twice the
21 * number of regions per run for this size class.
22 *
23 * This constant must be an even number.
24 */
25 #define TCACHE_NSLOTS_SMALL_MAX 200
26
27 /* Number of cache slots for large size classes. */
28 #define TCACHE_NSLOTS_LARGE 20
29
30 /* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
31 #define LG_TCACHE_MAXCLASS_DEFAULT 15
32
33 /*
34 * TCACHE_GC_SWEEP is the approximate number of allocation events between
35 * full GC sweeps. Integer rounding may cause the actual number to be
36 * slightly higher, since GC is performed incrementally.
37 */
38 #define TCACHE_GC_SWEEP 8192
39
40 /* Number of tcache allocation/deallocation events between incremental GCs. */
41 #define TCACHE_GC_INCR \
42 ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
43
44 #endif /* JEMALLOC_H_TYPES */
45 /******************************************************************************/
46 #ifdef JEMALLOC_H_STRUCTS
47
48 typedef enum {
49 tcache_enabled_false = 0, /* Enable cast to/from bool. */
50 tcache_enabled_true = 1,
51 tcache_enabled_default = 2
52 } tcache_enabled_t;
53
54 /*
55 * Read-only information associated with each element of tcache_t's tbins array
56 * is stored separately, mainly to reduce memory usage.
57 */
58 struct tcache_bin_info_s {
59 unsigned ncached_max; /* Upper limit on ncached. */
60 };
61
62 struct tcache_bin_s {
63 tcache_bin_stats_t tstats;
64 int low_water; /* Min # cached since last GC. */
65 unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
66 unsigned ncached; /* # of cached objects. */
67 void **avail; /* Stack of available objects. */
68 };
69
70 struct tcache_s {
71 ql_elm(tcache_t) link; /* Used for aggregating stats. */
72 uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
73 arena_t *arena; /* This thread's arena. */
74 unsigned ev_cnt; /* Event count since incremental GC. */
75 unsigned next_gc_bin; /* Next bin to GC. */
76 tcache_bin_t tbins[1]; /* Dynamically sized. */
77 /*
78 * The pointer stacks associated with tbins follow as a contiguous
79 * array. During tcache initialization, the avail pointer in each
80 * element of tbins is initialized to point to the proper offset within
81 * this array.
82 */
83 };
84
85 #endif /* JEMALLOC_H_STRUCTS */
86 /******************************************************************************/
87 #ifdef JEMALLOC_H_EXTERNS
88
89 extern bool opt_tcache;
90 extern ssize_t opt_lg_tcache_max;
91
92 extern tcache_bin_info_t *tcache_bin_info;
93
94 /*
95 * Number of tcache bins. There are NBINS small-object bins, plus 0 or more
96 * large-object bins.
97 */
98 extern size_t nhbins;
99
100 /* Maximum cached size class. */
101 extern size_t tcache_maxclass;
102
103 size_t tcache_salloc(const void *ptr);
104 void tcache_event_hard(tcache_t *tcache);
105 void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
106 size_t binind);
107 void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
108 tcache_t *tcache);
109 void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
110 tcache_t *tcache);
111 void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
112 void tcache_arena_dissociate(tcache_t *tcache);
113 tcache_t *tcache_get_hard(tcache_t *tcache, bool create);
114 tcache_t *tcache_create(arena_t *arena);
115 void tcache_destroy(tcache_t *tcache);
116 void tcache_thread_cleanup(void *arg);
117 void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
118 bool tcache_boot0(void);
119 bool tcache_boot1(void);
120
121 #endif /* JEMALLOC_H_EXTERNS */
122 /******************************************************************************/
123 #ifdef JEMALLOC_H_INLINES
124
125 #ifndef JEMALLOC_ENABLE_INLINE
126 malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *)
127 malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t)
128
129 void tcache_event(tcache_t *tcache);
130 void tcache_flush(void);
131 bool tcache_enabled_get(void);
132 tcache_t *tcache_get(bool create);
133 void tcache_enabled_set(bool enabled);
134 void *tcache_alloc_easy(tcache_bin_t *tbin);
135 void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
136 void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
137 void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind);
138 void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
139 #endif
140
141 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
142 /* Map of thread-specific caches. */
malloc_tsd_externs(tcache,tcache_t *)143 malloc_tsd_externs(tcache, tcache_t *)
144 malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache, tcache_t *, NULL,
145 tcache_thread_cleanup)
146 /* Per thread flag that allows thread caches to be disabled. */
147 malloc_tsd_externs(tcache_enabled, tcache_enabled_t)
148 malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t,
149 tcache_enabled_default, malloc_tsd_no_cleanup)
150
151 JEMALLOC_INLINE void
152 tcache_flush(void)
153 {
154 tcache_t *tcache;
155
156 cassert(config_tcache);
157
158 tcache = *tcache_tsd_get();
159 if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
160 return;
161 tcache_destroy(tcache);
162 tcache = NULL;
163 tcache_tsd_set(&tcache);
164 }
165
166 JEMALLOC_INLINE bool
tcache_enabled_get(void)167 tcache_enabled_get(void)
168 {
169 tcache_enabled_t tcache_enabled;
170
171 cassert(config_tcache);
172
173 tcache_enabled = *tcache_enabled_tsd_get();
174 if (tcache_enabled == tcache_enabled_default) {
175 tcache_enabled = (tcache_enabled_t)opt_tcache;
176 tcache_enabled_tsd_set(&tcache_enabled);
177 }
178
179 return ((bool)tcache_enabled);
180 }
181
182 JEMALLOC_INLINE void
tcache_enabled_set(bool enabled)183 tcache_enabled_set(bool enabled)
184 {
185 tcache_enabled_t tcache_enabled;
186 tcache_t *tcache;
187
188 cassert(config_tcache);
189
190 tcache_enabled = (tcache_enabled_t)enabled;
191 tcache_enabled_tsd_set(&tcache_enabled);
192 tcache = *tcache_tsd_get();
193 if (enabled) {
194 if (tcache == TCACHE_STATE_DISABLED) {
195 tcache = NULL;
196 tcache_tsd_set(&tcache);
197 }
198 } else /* disabled */ {
199 if (tcache > TCACHE_STATE_MAX) {
200 tcache_destroy(tcache);
201 tcache = NULL;
202 }
203 if (tcache == NULL) {
204 tcache = TCACHE_STATE_DISABLED;
205 tcache_tsd_set(&tcache);
206 }
207 }
208 }
209
210 JEMALLOC_ALWAYS_INLINE tcache_t *
tcache_get(bool create)211 tcache_get(bool create)
212 {
213 tcache_t *tcache;
214
215 if (config_tcache == false)
216 return (NULL);
217 if (config_lazy_lock && isthreaded == false)
218 return (NULL);
219
220 tcache = *tcache_tsd_get();
221 if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
222 if (tcache == TCACHE_STATE_DISABLED)
223 return (NULL);
224 tcache = tcache_get_hard(tcache, create);
225 }
226
227 return (tcache);
228 }
229
230 JEMALLOC_ALWAYS_INLINE void
tcache_event(tcache_t * tcache)231 tcache_event(tcache_t *tcache)
232 {
233
234 if (TCACHE_GC_INCR == 0)
235 return;
236
237 tcache->ev_cnt++;
238 assert(tcache->ev_cnt <= TCACHE_GC_INCR);
239 if (tcache->ev_cnt == TCACHE_GC_INCR)
240 tcache_event_hard(tcache);
241 }
242
243 JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_easy(tcache_bin_t * tbin)244 tcache_alloc_easy(tcache_bin_t *tbin)
245 {
246 void *ret;
247
248 if (tbin->ncached == 0) {
249 tbin->low_water = -1;
250 return (NULL);
251 }
252 tbin->ncached--;
253 if ((int)tbin->ncached < tbin->low_water)
254 tbin->low_water = tbin->ncached;
255 ret = tbin->avail[tbin->ncached];
256 return (ret);
257 }
258
259 JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tcache_t * tcache,size_t size,bool zero)260 tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
261 {
262 void *ret;
263 size_t binind;
264 tcache_bin_t *tbin;
265
266 binind = small_size2bin(size);
267 assert(binind < NBINS);
268 tbin = &tcache->tbins[binind];
269 size = small_bin2size(binind);
270 ret = tcache_alloc_easy(tbin);
271 if (ret == NULL) {
272 ret = tcache_alloc_small_hard(tcache, tbin, binind);
273 if (ret == NULL)
274 return (NULL);
275 }
276 assert(tcache_salloc(ret) == size);
277
278 if (zero == false) {
279 if (config_fill) {
280 if (opt_junk) {
281 arena_alloc_junk_small(ret,
282 &arena_bin_info[binind], false);
283 } else if (opt_zero)
284 memset(ret, 0, size);
285 }
286 } else {
287 if (config_fill && opt_junk) {
288 arena_alloc_junk_small(ret, &arena_bin_info[binind],
289 true);
290 }
291 memset(ret, 0, size);
292 }
293
294 if (config_stats)
295 tbin->tstats.nrequests++;
296 if (config_prof)
297 tcache->prof_accumbytes += size;
298 tcache_event(tcache);
299 return (ret);
300 }
301
302 JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tcache_t * tcache,size_t size,bool zero)303 tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
304 {
305 void *ret;
306 size_t binind;
307 tcache_bin_t *tbin;
308
309 size = PAGE_CEILING(size);
310 assert(size <= tcache_maxclass);
311 binind = NBINS + (size >> LG_PAGE) - 1;
312 assert(binind < nhbins);
313 tbin = &tcache->tbins[binind];
314 ret = tcache_alloc_easy(tbin);
315 if (ret == NULL) {
316 /*
317 * Only allocate one large object at a time, because it's quite
318 * expensive to create one and not use it.
319 */
320 ret = arena_malloc_large(tcache->arena, size, zero);
321 if (ret == NULL)
322 return (NULL);
323 } else {
324 if (config_prof && size == PAGE) {
325 arena_chunk_t *chunk =
326 (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
327 size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
328 LG_PAGE);
329 arena_mapbits_large_binind_set(chunk, pageind,
330 BININD_INVALID);
331 }
332 if (zero == false) {
333 if (config_fill) {
334 if (opt_junk)
335 memset(ret, 0xa5, size);
336 else if (opt_zero)
337 memset(ret, 0, size);
338 }
339 } else
340 memset(ret, 0, size);
341
342 if (config_stats)
343 tbin->tstats.nrequests++;
344 if (config_prof)
345 tcache->prof_accumbytes += size;
346 }
347
348 tcache_event(tcache);
349 return (ret);
350 }
351
352 JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tcache_t * tcache,void * ptr,size_t binind)353 tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
354 {
355 tcache_bin_t *tbin;
356 tcache_bin_info_t *tbin_info;
357
358 assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
359
360 if (config_fill && opt_junk)
361 arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
362
363 tbin = &tcache->tbins[binind];
364 tbin_info = &tcache_bin_info[binind];
365 if (tbin->ncached == tbin_info->ncached_max) {
366 tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
367 1), tcache);
368 }
369 assert(tbin->ncached < tbin_info->ncached_max);
370 tbin->avail[tbin->ncached] = ptr;
371 tbin->ncached++;
372
373 tcache_event(tcache);
374 }
375
376 JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tcache_t * tcache,void * ptr,size_t size)377 tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
378 {
379 size_t binind;
380 tcache_bin_t *tbin;
381 tcache_bin_info_t *tbin_info;
382
383 assert((size & PAGE_MASK) == 0);
384 assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
385 assert(tcache_salloc(ptr) <= tcache_maxclass);
386
387 binind = NBINS + (size >> LG_PAGE) - 1;
388
389 if (config_fill && opt_junk)
390 memset(ptr, 0x5a, size);
391
392 tbin = &tcache->tbins[binind];
393 tbin_info = &tcache_bin_info[binind];
394 if (tbin->ncached == tbin_info->ncached_max) {
395 tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
396 1), tcache);
397 }
398 assert(tbin->ncached < tbin_info->ncached_max);
399 tbin->avail[tbin->ncached] = ptr;
400 tbin->ncached++;
401
402 tcache_event(tcache);
403 }
404 #endif
405
406 #endif /* JEMALLOC_H_INLINES */
407 /******************************************************************************/
408