1 #ifndef JEMALLOC_INTERNAL_TSD_H
2 #define JEMALLOC_INTERNAL_TSD_H
3
4 #include "jemalloc/internal/arena_types.h"
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/jemalloc_internal_externs.h"
7 #include "jemalloc/internal/prof_types.h"
8 #include "jemalloc/internal/ql.h"
9 #include "jemalloc/internal/rtree_tsd.h"
10 #include "jemalloc/internal/tcache_types.h"
11 #include "jemalloc/internal/tcache_structs.h"
12 #include "jemalloc/internal/util.h"
13 #include "jemalloc/internal/witness.h"
14
15 /*
16 * Thread-Specific-Data layout
17 * --- data accessed on tcache fast path: state, rtree_ctx, stats, prof ---
18 * s: state
19 * e: tcache_enabled
20 * m: thread_allocated (config_stats)
21 * f: thread_deallocated (config_stats)
22 * p: prof_tdata (config_prof)
23 * c: rtree_ctx (rtree cache accessed on deallocation)
24 * t: tcache
25 * --- data not accessed on tcache fast path: arena-related fields ---
26 * d: arenas_tdata_bypass
27 * r: reentrancy_level
28 * x: narenas_tdata
29 * i: iarena
30 * a: arena
31 * o: arenas_tdata
32 * Loading TSD data is on the critical path of basically all malloc operations.
33 * In particular, tcache and rtree_ctx rely on hot CPU cache to be effective.
34 * Use a compact layout to reduce cache footprint.
35 * +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+
36 * |---------------------------- 1st cacheline ----------------------------|
37 * | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] |
38 * |---------------------------- 2nd cacheline ----------------------------|
39 * | [c * 64 ........ ........ ........ ........ ........ ........ .......] |
40 * |---------------------------- 3nd cacheline ----------------------------|
41 * | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... |
42 * +-------------------------------------------------------------------------+
43 * Note: the entire tcache is embedded into TSD and spans multiple cachelines.
44 *
45 * The last 3 members (i, a and o) before tcache isn't really needed on tcache
46 * fast path. However we have a number of unused tcache bins and witnesses
47 * (never touched unless config_debug) at the end of tcache, so we place them
48 * there to avoid breaking the cachelines and possibly paging in an extra page.
49 */
50 #ifdef JEMALLOC_JET
51 typedef void (*test_callback_t)(int *);
52 # define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10
53 # define MALLOC_TEST_TSD \
54 O(test_data, int, int) \
55 O(test_callback, test_callback_t, int)
56 # define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL
57 #else
58 # define MALLOC_TEST_TSD
59 # define MALLOC_TEST_TSD_INITIALIZER
60 #endif
61
62 /* O(name, type, nullable type */
63 #define MALLOC_TSD \
64 O(tcache_enabled, bool, bool) \
65 O(arenas_tdata_bypass, bool, bool) \
66 O(reentrancy_level, int8_t, int8_t) \
67 O(narenas_tdata, uint32_t, uint32_t) \
68 O(offset_state, uint64_t, uint64_t) \
69 O(thread_allocated, uint64_t, uint64_t) \
70 O(thread_deallocated, uint64_t, uint64_t) \
71 O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
72 O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \
73 O(iarena, arena_t *, arena_t *) \
74 O(arena, arena_t *, arena_t *) \
75 O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\
76 O(tcache, tcache_t, tcache_t) \
77 O(witness_tsd, witness_tsd_t, witness_tsdn_t) \
78 MALLOC_TEST_TSD
79
80 #define TSD_INITIALIZER { \
81 tsd_state_uninitialized, \
82 TCACHE_ENABLED_ZERO_INITIALIZER, \
83 false, \
84 0, \
85 0, \
86 0, \
87 0, \
88 0, \
89 NULL, \
90 RTREE_CTX_ZERO_INITIALIZER, \
91 NULL, \
92 NULL, \
93 NULL, \
94 TCACHE_ZERO_INITIALIZER, \
95 WITNESS_TSD_INITIALIZER \
96 MALLOC_TEST_TSD_INITIALIZER \
97 }
98
99 enum {
100 tsd_state_nominal = 0, /* Common case --> jnz. */
101 tsd_state_nominal_slow = 1, /* Initialized but on slow path. */
102 /* the above 2 nominal states should be lower values. */
103 tsd_state_nominal_max = 1, /* used for comparison only. */
104 tsd_state_minimal_initialized = 2,
105 tsd_state_purgatory = 3,
106 tsd_state_reincarnated = 4,
107 tsd_state_uninitialized = 5
108 };
109
110 /* Manually limit tsd_state_t to a single byte. */
111 typedef uint8_t tsd_state_t;
112
113 /* The actual tsd. */
114 struct tsd_s {
115 /*
116 * The contents should be treated as totally opaque outside the tsd
117 * module. Access any thread-local state through the getters and
118 * setters below.
119 */
120 tsd_state_t state;
121 #define O(n, t, nt) \
122 t use_a_getter_or_setter_instead_##n;
123 MALLOC_TSD
124 #undef O
125 };
126
127 /*
128 * Wrapper around tsd_t that makes it possible to avoid implicit conversion
129 * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
130 * explicitly converted to tsd_t, which is non-nullable.
131 */
132 struct tsdn_s {
133 tsd_t tsd;
134 };
135 #define TSDN_NULL ((tsdn_t *)0)
136 JEMALLOC_ALWAYS_INLINE tsdn_t *
tsd_tsdn(tsd_t * tsd)137 tsd_tsdn(tsd_t *tsd) {
138 return (tsdn_t *)tsd;
139 }
140
141 JEMALLOC_ALWAYS_INLINE bool
tsdn_null(const tsdn_t * tsdn)142 tsdn_null(const tsdn_t *tsdn) {
143 return tsdn == NULL;
144 }
145
146 JEMALLOC_ALWAYS_INLINE tsd_t *
tsdn_tsd(tsdn_t * tsdn)147 tsdn_tsd(tsdn_t *tsdn) {
148 assert(!tsdn_null(tsdn));
149
150 return &tsdn->tsd;
151 }
152
153 void *malloc_tsd_malloc(size_t size);
154 void malloc_tsd_dalloc(void *wrapper);
155 void malloc_tsd_cleanup_register(bool (*f)(void));
156 tsd_t *malloc_tsd_boot0(void);
157 void malloc_tsd_boot1(void);
158 void tsd_cleanup(void *arg);
159 tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal);
160 void tsd_slow_update(tsd_t *tsd);
161
162 /*
163 * We put the platform-specific data declarations and inlines into their own
164 * header files to avoid cluttering this file. They define tsd_boot0,
165 * tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set.
166 */
167 #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
168 #include "jemalloc/internal/tsd_malloc_thread_cleanup.h"
169 #elif (defined(JEMALLOC_TLS))
170 #include "jemalloc/internal/tsd_tls.h"
171 #elif (defined(_WIN32))
172 #include "jemalloc/internal/tsd_win.h"
173 #else
174 #include "jemalloc/internal/tsd_generic.h"
175 #endif
176
177 /*
178 * tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of
179 * foo. This omits some safety checks, and so can be used during tsd
180 * initialization and cleanup.
181 */
182 #define O(n, t, nt) \
183 JEMALLOC_ALWAYS_INLINE t * \
184 tsd_##n##p_get_unsafe(tsd_t *tsd) { \
185 return &tsd->use_a_getter_or_setter_instead_##n; \
186 }
187 MALLOC_TSD
188 #undef O
189
190 /* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */
191 #define O(n, t, nt) \
192 JEMALLOC_ALWAYS_INLINE t * \
193 tsd_##n##p_get(tsd_t *tsd) { \
194 assert(tsd->state == tsd_state_nominal || \
195 tsd->state == tsd_state_nominal_slow || \
196 tsd->state == tsd_state_reincarnated || \
197 tsd->state == tsd_state_minimal_initialized); \
198 return tsd_##n##p_get_unsafe(tsd); \
199 }
200 MALLOC_TSD
201 #undef O
202
203 /*
204 * tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn
205 * isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type.
206 */
207 #define O(n, t, nt) \
208 JEMALLOC_ALWAYS_INLINE nt * \
209 tsdn_##n##p_get(tsdn_t *tsdn) { \
210 if (tsdn_null(tsdn)) { \
211 return NULL; \
212 } \
213 tsd_t *tsd = tsdn_tsd(tsdn); \
214 return (nt *)tsd_##n##p_get(tsd); \
215 }
216 MALLOC_TSD
217 #undef O
218
219 /* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */
220 #define O(n, t, nt) \
221 JEMALLOC_ALWAYS_INLINE t \
222 tsd_##n##_get(tsd_t *tsd) { \
223 return *tsd_##n##p_get(tsd); \
224 }
225 MALLOC_TSD
226 #undef O
227
228 /* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */
229 #define O(n, t, nt) \
230 JEMALLOC_ALWAYS_INLINE void \
231 tsd_##n##_set(tsd_t *tsd, t val) { \
232 assert(tsd->state != tsd_state_reincarnated && \
233 tsd->state != tsd_state_minimal_initialized); \
234 *tsd_##n##p_get(tsd) = val; \
235 }
236 MALLOC_TSD
237 #undef O
238
239 JEMALLOC_ALWAYS_INLINE void
tsd_assert_fast(tsd_t * tsd)240 tsd_assert_fast(tsd_t *tsd) {
241 assert(!malloc_slow && tsd_tcache_enabled_get(tsd) &&
242 tsd_reentrancy_level_get(tsd) == 0);
243 }
244
245 JEMALLOC_ALWAYS_INLINE bool
tsd_fast(tsd_t * tsd)246 tsd_fast(tsd_t *tsd) {
247 bool fast = (tsd->state == tsd_state_nominal);
248 if (fast) {
249 tsd_assert_fast(tsd);
250 }
251
252 return fast;
253 }
254
255 JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch_impl(bool init,bool minimal)256 tsd_fetch_impl(bool init, bool minimal) {
257 tsd_t *tsd = tsd_get(init);
258
259 if (!init && tsd_get_allocates() && tsd == NULL) {
260 return NULL;
261 }
262 assert(tsd != NULL);
263
264 if (unlikely(tsd->state != tsd_state_nominal)) {
265 return tsd_fetch_slow(tsd, minimal);
266 }
267 assert(tsd_fast(tsd));
268 tsd_assert_fast(tsd);
269
270 return tsd;
271 }
272
273 /* Get a minimal TSD that requires no cleanup. See comments in free(). */
274 JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch_min(void)275 tsd_fetch_min(void) {
276 return tsd_fetch_impl(true, true);
277 }
278
279 /* For internal background threads use only. */
280 JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_internal_fetch(void)281 tsd_internal_fetch(void) {
282 tsd_t *tsd = tsd_fetch_min();
283 /* Use reincarnated state to prevent full initialization. */
284 tsd->state = tsd_state_reincarnated;
285
286 return tsd;
287 }
288
289 JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch(void)290 tsd_fetch(void) {
291 return tsd_fetch_impl(true, false);
292 }
293
294 static inline bool
tsd_nominal(tsd_t * tsd)295 tsd_nominal(tsd_t *tsd) {
296 return (tsd->state <= tsd_state_nominal_max);
297 }
298
299 JEMALLOC_ALWAYS_INLINE tsdn_t *
tsdn_fetch(void)300 tsdn_fetch(void) {
301 if (!tsd_booted_get()) {
302 return NULL;
303 }
304
305 return tsd_tsdn(tsd_fetch_impl(false, false));
306 }
307
308 JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
tsd_rtree_ctx(tsd_t * tsd)309 tsd_rtree_ctx(tsd_t *tsd) {
310 return tsd_rtree_ctxp_get(tsd);
311 }
312
313 JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
tsdn_rtree_ctx(tsdn_t * tsdn,rtree_ctx_t * fallback)314 tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
315 /*
316 * If tsd cannot be accessed, initialize the fallback rtree_ctx and
317 * return a pointer to it.
318 */
319 if (unlikely(tsdn_null(tsdn))) {
320 rtree_ctx_data_init(fallback);
321 return fallback;
322 }
323 return tsd_rtree_ctx(tsdn_tsd(tsdn));
324 }
325
326 #endif /* JEMALLOC_INTERNAL_TSD_H */
327