1 #define JEMALLOC_CTL_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/ctl.h"
7 #include "jemalloc/internal/extent_dss.h"
8 #include "jemalloc/internal/extent_mmap.h"
9 #include "jemalloc/internal/mutex.h"
10 #include "jemalloc/internal/nstime.h"
11 #include "jemalloc/internal/size_classes.h"
12 #include "jemalloc/internal/util.h"
13
14 /******************************************************************************/
15 /* Data. */
16
17 /*
18 * ctl_mtx protects the following:
19 * - ctl_stats->*
20 */
21 static malloc_mutex_t ctl_mtx;
22 static bool ctl_initialized;
23 static ctl_stats_t *ctl_stats;
24 static ctl_arenas_t *ctl_arenas;
25
26 /******************************************************************************/
27 /* Helpers for named and indexed nodes. */
28
29 static const ctl_named_node_t *
ctl_named_node(const ctl_node_t * node)30 ctl_named_node(const ctl_node_t *node) {
31 return ((node->named) ? (const ctl_named_node_t *)node : NULL);
32 }
33
34 static const ctl_named_node_t *
ctl_named_children(const ctl_named_node_t * node,size_t index)35 ctl_named_children(const ctl_named_node_t *node, size_t index) {
36 const ctl_named_node_t *children = ctl_named_node(node->children);
37
38 return (children ? &children[index] : NULL);
39 }
40
41 static const ctl_indexed_node_t *
ctl_indexed_node(const ctl_node_t * node)42 ctl_indexed_node(const ctl_node_t *node) {
43 return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
44 }
45
46 /******************************************************************************/
47 /* Function prototypes for non-inline static functions. */
48
49 #define CTL_PROTO(n) \
50 static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
51 void *oldp, size_t *oldlenp, void *newp, size_t newlen);
52
53 #define INDEX_PROTO(n) \
54 static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
55 const size_t *mib, size_t miblen, size_t i);
56
57 CTL_PROTO(version)
58 CTL_PROTO(epoch)
59 CTL_PROTO(background_thread)
60 CTL_PROTO(max_background_threads)
61 CTL_PROTO(thread_tcache_enabled)
62 CTL_PROTO(thread_tcache_flush)
63 CTL_PROTO(thread_prof_name)
64 CTL_PROTO(thread_prof_active)
65 CTL_PROTO(thread_arena)
66 CTL_PROTO(thread_allocated)
67 CTL_PROTO(thread_allocatedp)
68 CTL_PROTO(thread_deallocated)
69 CTL_PROTO(thread_deallocatedp)
70 CTL_PROTO(config_cache_oblivious)
71 CTL_PROTO(config_debug)
72 CTL_PROTO(config_fill)
73 CTL_PROTO(config_lazy_lock)
74 CTL_PROTO(config_malloc_conf)
75 CTL_PROTO(config_prof)
76 CTL_PROTO(config_prof_libgcc)
77 CTL_PROTO(config_prof_libunwind)
78 CTL_PROTO(config_stats)
79 CTL_PROTO(config_utrace)
80 CTL_PROTO(config_xmalloc)
81 CTL_PROTO(opt_abort)
82 CTL_PROTO(opt_abort_conf)
83 CTL_PROTO(opt_metadata_thp)
84 CTL_PROTO(opt_retain)
85 CTL_PROTO(opt_dss)
86 CTL_PROTO(opt_narenas)
87 CTL_PROTO(opt_percpu_arena)
88 CTL_PROTO(opt_background_thread)
89 CTL_PROTO(opt_max_background_threads)
90 CTL_PROTO(opt_dirty_decay_ms)
91 CTL_PROTO(opt_muzzy_decay_ms)
92 CTL_PROTO(opt_stats_print)
93 CTL_PROTO(opt_stats_print_opts)
94 CTL_PROTO(opt_junk)
95 CTL_PROTO(opt_zero)
96 CTL_PROTO(opt_utrace)
97 CTL_PROTO(opt_xmalloc)
98 CTL_PROTO(opt_tcache)
99 CTL_PROTO(opt_thp)
100 CTL_PROTO(opt_lg_extent_max_active_fit)
101 CTL_PROTO(opt_lg_tcache_max)
102 CTL_PROTO(opt_prof)
103 CTL_PROTO(opt_prof_prefix)
104 CTL_PROTO(opt_prof_active)
105 CTL_PROTO(opt_prof_thread_active_init)
106 CTL_PROTO(opt_lg_prof_sample)
107 CTL_PROTO(opt_lg_prof_interval)
108 CTL_PROTO(opt_prof_gdump)
109 CTL_PROTO(opt_prof_final)
110 CTL_PROTO(opt_prof_leak)
111 CTL_PROTO(opt_prof_accum)
112 CTL_PROTO(tcache_create)
113 CTL_PROTO(tcache_flush)
114 CTL_PROTO(tcache_destroy)
115 CTL_PROTO(arena_i_initialized)
116 CTL_PROTO(arena_i_decay)
117 CTL_PROTO(arena_i_purge)
118 CTL_PROTO(arena_i_reset)
119 CTL_PROTO(arena_i_destroy)
120 CTL_PROTO(arena_i_dss)
121 CTL_PROTO(arena_i_dirty_decay_ms)
122 CTL_PROTO(arena_i_muzzy_decay_ms)
123 CTL_PROTO(arena_i_extent_hooks)
124 CTL_PROTO(arena_i_retain_grow_limit)
125 INDEX_PROTO(arena_i)
126 CTL_PROTO(arenas_bin_i_size)
127 CTL_PROTO(arenas_bin_i_nregs)
128 CTL_PROTO(arenas_bin_i_slab_size)
129 INDEX_PROTO(arenas_bin_i)
130 CTL_PROTO(arenas_lextent_i_size)
131 INDEX_PROTO(arenas_lextent_i)
132 CTL_PROTO(arenas_narenas)
133 CTL_PROTO(arenas_dirty_decay_ms)
134 CTL_PROTO(arenas_muzzy_decay_ms)
135 CTL_PROTO(arenas_quantum)
136 CTL_PROTO(arenas_page)
137 CTL_PROTO(arenas_tcache_max)
138 CTL_PROTO(arenas_nbins)
139 CTL_PROTO(arenas_nhbins)
140 CTL_PROTO(arenas_nlextents)
141 CTL_PROTO(arenas_create)
142 CTL_PROTO(arenas_lookup)
143 CTL_PROTO(prof_thread_active_init)
144 CTL_PROTO(prof_active)
145 CTL_PROTO(prof_dump)
146 CTL_PROTO(prof_gdump)
147 CTL_PROTO(prof_reset)
148 CTL_PROTO(prof_interval)
149 CTL_PROTO(lg_prof_sample)
150 CTL_PROTO(stats_arenas_i_small_allocated)
151 CTL_PROTO(stats_arenas_i_small_nmalloc)
152 CTL_PROTO(stats_arenas_i_small_ndalloc)
153 CTL_PROTO(stats_arenas_i_small_nrequests)
154 CTL_PROTO(stats_arenas_i_large_allocated)
155 CTL_PROTO(stats_arenas_i_large_nmalloc)
156 CTL_PROTO(stats_arenas_i_large_ndalloc)
157 CTL_PROTO(stats_arenas_i_large_nrequests)
158 CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
159 CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
160 CTL_PROTO(stats_arenas_i_bins_j_nrequests)
161 CTL_PROTO(stats_arenas_i_bins_j_curregs)
162 CTL_PROTO(stats_arenas_i_bins_j_nfills)
163 CTL_PROTO(stats_arenas_i_bins_j_nflushes)
164 CTL_PROTO(stats_arenas_i_bins_j_nslabs)
165 CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
166 CTL_PROTO(stats_arenas_i_bins_j_curslabs)
167 INDEX_PROTO(stats_arenas_i_bins_j)
168 CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
169 CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
170 CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
171 CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
172 INDEX_PROTO(stats_arenas_i_lextents_j)
173 CTL_PROTO(stats_arenas_i_nthreads)
174 CTL_PROTO(stats_arenas_i_uptime)
175 CTL_PROTO(stats_arenas_i_dss)
176 CTL_PROTO(stats_arenas_i_dirty_decay_ms)
177 CTL_PROTO(stats_arenas_i_muzzy_decay_ms)
178 CTL_PROTO(stats_arenas_i_pactive)
179 CTL_PROTO(stats_arenas_i_pdirty)
180 CTL_PROTO(stats_arenas_i_pmuzzy)
181 CTL_PROTO(stats_arenas_i_mapped)
182 CTL_PROTO(stats_arenas_i_retained)
183 CTL_PROTO(stats_arenas_i_dirty_npurge)
184 CTL_PROTO(stats_arenas_i_dirty_nmadvise)
185 CTL_PROTO(stats_arenas_i_dirty_purged)
186 CTL_PROTO(stats_arenas_i_muzzy_npurge)
187 CTL_PROTO(stats_arenas_i_muzzy_nmadvise)
188 CTL_PROTO(stats_arenas_i_muzzy_purged)
189 CTL_PROTO(stats_arenas_i_base)
190 CTL_PROTO(stats_arenas_i_internal)
191 CTL_PROTO(stats_arenas_i_metadata_thp)
192 CTL_PROTO(stats_arenas_i_tcache_bytes)
193 CTL_PROTO(stats_arenas_i_resident)
194 INDEX_PROTO(stats_arenas_i)
195 CTL_PROTO(stats_allocated)
196 CTL_PROTO(stats_active)
197 CTL_PROTO(stats_background_thread_num_threads)
198 CTL_PROTO(stats_background_thread_num_runs)
199 CTL_PROTO(stats_background_thread_run_interval)
200 CTL_PROTO(stats_metadata)
201 CTL_PROTO(stats_metadata_thp)
202 CTL_PROTO(stats_resident)
203 CTL_PROTO(stats_mapped)
204 CTL_PROTO(stats_retained)
205
206 #define MUTEX_STATS_CTL_PROTO_GEN(n) \
207 CTL_PROTO(stats_##n##_num_ops) \
208 CTL_PROTO(stats_##n##_num_wait) \
209 CTL_PROTO(stats_##n##_num_spin_acq) \
210 CTL_PROTO(stats_##n##_num_owner_switch) \
211 CTL_PROTO(stats_##n##_total_wait_time) \
212 CTL_PROTO(stats_##n##_max_wait_time) \
213 CTL_PROTO(stats_##n##_max_num_thds)
214
215 /* Global mutexes. */
216 #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx)
217 MUTEX_PROF_GLOBAL_MUTEXES
218 #undef OP
219
220 /* Per arena mutexes. */
221 #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx)
222 MUTEX_PROF_ARENA_MUTEXES
223 #undef OP
224
225 /* Arena bin mutexes. */
226 MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex)
227 #undef MUTEX_STATS_CTL_PROTO_GEN
228
229 CTL_PROTO(stats_mutexes_reset)
230
231 /******************************************************************************/
232 /* mallctl tree. */
233
234 #define NAME(n) {true}, n
235 #define CHILD(t, c) \
236 sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
237 (ctl_node_t *)c##_node, \
238 NULL
239 #define CTL(c) 0, NULL, c##_ctl
240
241 /*
242 * Only handles internal indexed nodes, since there are currently no external
243 * ones.
244 */
245 #define INDEX(i) {false}, i##_index
246
247 static const ctl_named_node_t thread_tcache_node[] = {
248 {NAME("enabled"), CTL(thread_tcache_enabled)},
249 {NAME("flush"), CTL(thread_tcache_flush)}
250 };
251
252 static const ctl_named_node_t thread_prof_node[] = {
253 {NAME("name"), CTL(thread_prof_name)},
254 {NAME("active"), CTL(thread_prof_active)}
255 };
256
257 static const ctl_named_node_t thread_node[] = {
258 {NAME("arena"), CTL(thread_arena)},
259 {NAME("allocated"), CTL(thread_allocated)},
260 {NAME("allocatedp"), CTL(thread_allocatedp)},
261 {NAME("deallocated"), CTL(thread_deallocated)},
262 {NAME("deallocatedp"), CTL(thread_deallocatedp)},
263 {NAME("tcache"), CHILD(named, thread_tcache)},
264 {NAME("prof"), CHILD(named, thread_prof)}
265 };
266
267 static const ctl_named_node_t config_node[] = {
268 {NAME("cache_oblivious"), CTL(config_cache_oblivious)},
269 {NAME("debug"), CTL(config_debug)},
270 {NAME("fill"), CTL(config_fill)},
271 {NAME("lazy_lock"), CTL(config_lazy_lock)},
272 {NAME("malloc_conf"), CTL(config_malloc_conf)},
273 {NAME("prof"), CTL(config_prof)},
274 {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
275 {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
276 {NAME("stats"), CTL(config_stats)},
277 {NAME("utrace"), CTL(config_utrace)},
278 {NAME("xmalloc"), CTL(config_xmalloc)}
279 };
280
281 static const ctl_named_node_t opt_node[] = {
282 {NAME("abort"), CTL(opt_abort)},
283 {NAME("abort_conf"), CTL(opt_abort_conf)},
284 {NAME("metadata_thp"), CTL(opt_metadata_thp)},
285 {NAME("retain"), CTL(opt_retain)},
286 {NAME("dss"), CTL(opt_dss)},
287 {NAME("narenas"), CTL(opt_narenas)},
288 {NAME("percpu_arena"), CTL(opt_percpu_arena)},
289 {NAME("background_thread"), CTL(opt_background_thread)},
290 {NAME("max_background_threads"), CTL(opt_max_background_threads)},
291 {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
292 {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
293 {NAME("stats_print"), CTL(opt_stats_print)},
294 {NAME("stats_print_opts"), CTL(opt_stats_print_opts)},
295 {NAME("junk"), CTL(opt_junk)},
296 {NAME("zero"), CTL(opt_zero)},
297 {NAME("utrace"), CTL(opt_utrace)},
298 {NAME("xmalloc"), CTL(opt_xmalloc)},
299 {NAME("tcache"), CTL(opt_tcache)},
300 {NAME("thp"), CTL(opt_thp)},
301 {NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)},
302 {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
303 {NAME("prof"), CTL(opt_prof)},
304 {NAME("prof_prefix"), CTL(opt_prof_prefix)},
305 {NAME("prof_active"), CTL(opt_prof_active)},
306 {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
307 {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
308 {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
309 {NAME("prof_gdump"), CTL(opt_prof_gdump)},
310 {NAME("prof_final"), CTL(opt_prof_final)},
311 {NAME("prof_leak"), CTL(opt_prof_leak)},
312 {NAME("prof_accum"), CTL(opt_prof_accum)}
313 };
314
315 static const ctl_named_node_t tcache_node[] = {
316 {NAME("create"), CTL(tcache_create)},
317 {NAME("flush"), CTL(tcache_flush)},
318 {NAME("destroy"), CTL(tcache_destroy)}
319 };
320
321 static const ctl_named_node_t arena_i_node[] = {
322 {NAME("initialized"), CTL(arena_i_initialized)},
323 {NAME("decay"), CTL(arena_i_decay)},
324 {NAME("purge"), CTL(arena_i_purge)},
325 {NAME("reset"), CTL(arena_i_reset)},
326 {NAME("destroy"), CTL(arena_i_destroy)},
327 {NAME("dss"), CTL(arena_i_dss)},
328 {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
329 {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
330 {NAME("extent_hooks"), CTL(arena_i_extent_hooks)},
331 {NAME("retain_grow_limit"), CTL(arena_i_retain_grow_limit)}
332 };
333 static const ctl_named_node_t super_arena_i_node[] = {
334 {NAME(""), CHILD(named, arena_i)}
335 };
336
337 static const ctl_indexed_node_t arena_node[] = {
338 {INDEX(arena_i)}
339 };
340
341 static const ctl_named_node_t arenas_bin_i_node[] = {
342 {NAME("size"), CTL(arenas_bin_i_size)},
343 {NAME("nregs"), CTL(arenas_bin_i_nregs)},
344 {NAME("slab_size"), CTL(arenas_bin_i_slab_size)}
345 };
346 static const ctl_named_node_t super_arenas_bin_i_node[] = {
347 {NAME(""), CHILD(named, arenas_bin_i)}
348 };
349
350 static const ctl_indexed_node_t arenas_bin_node[] = {
351 {INDEX(arenas_bin_i)}
352 };
353
354 static const ctl_named_node_t arenas_lextent_i_node[] = {
355 {NAME("size"), CTL(arenas_lextent_i_size)}
356 };
357 static const ctl_named_node_t super_arenas_lextent_i_node[] = {
358 {NAME(""), CHILD(named, arenas_lextent_i)}
359 };
360
361 static const ctl_indexed_node_t arenas_lextent_node[] = {
362 {INDEX(arenas_lextent_i)}
363 };
364
365 static const ctl_named_node_t arenas_node[] = {
366 {NAME("narenas"), CTL(arenas_narenas)},
367 {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)},
368 {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)},
369 {NAME("quantum"), CTL(arenas_quantum)},
370 {NAME("page"), CTL(arenas_page)},
371 {NAME("tcache_max"), CTL(arenas_tcache_max)},
372 {NAME("nbins"), CTL(arenas_nbins)},
373 {NAME("nhbins"), CTL(arenas_nhbins)},
374 {NAME("bin"), CHILD(indexed, arenas_bin)},
375 {NAME("nlextents"), CTL(arenas_nlextents)},
376 {NAME("lextent"), CHILD(indexed, arenas_lextent)},
377 {NAME("create"), CTL(arenas_create)},
378 {NAME("lookup"), CTL(arenas_lookup)}
379 };
380
381 static const ctl_named_node_t prof_node[] = {
382 {NAME("thread_active_init"), CTL(prof_thread_active_init)},
383 {NAME("active"), CTL(prof_active)},
384 {NAME("dump"), CTL(prof_dump)},
385 {NAME("gdump"), CTL(prof_gdump)},
386 {NAME("reset"), CTL(prof_reset)},
387 {NAME("interval"), CTL(prof_interval)},
388 {NAME("lg_sample"), CTL(lg_prof_sample)}
389 };
390
391 static const ctl_named_node_t stats_arenas_i_small_node[] = {
392 {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
393 {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
394 {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
395 {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
396 };
397
398 static const ctl_named_node_t stats_arenas_i_large_node[] = {
399 {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
400 {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
401 {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
402 {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
403 };
404
405 #define MUTEX_PROF_DATA_NODE(prefix) \
406 static const ctl_named_node_t stats_##prefix##_node[] = { \
407 {NAME("num_ops"), \
408 CTL(stats_##prefix##_num_ops)}, \
409 {NAME("num_wait"), \
410 CTL(stats_##prefix##_num_wait)}, \
411 {NAME("num_spin_acq"), \
412 CTL(stats_##prefix##_num_spin_acq)}, \
413 {NAME("num_owner_switch"), \
414 CTL(stats_##prefix##_num_owner_switch)}, \
415 {NAME("total_wait_time"), \
416 CTL(stats_##prefix##_total_wait_time)}, \
417 {NAME("max_wait_time"), \
418 CTL(stats_##prefix##_max_wait_time)}, \
419 {NAME("max_num_thds"), \
420 CTL(stats_##prefix##_max_num_thds)} \
421 /* Note that # of current waiting thread not provided. */ \
422 };
423
424 MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex)
425
426 static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
427 {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
428 {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
429 {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
430 {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
431 {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
432 {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
433 {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)},
434 {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)},
435 {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)},
436 {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)}
437 };
438
439 static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
440 {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
441 };
442
443 static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
444 {INDEX(stats_arenas_i_bins_j)}
445 };
446
447 static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = {
448 {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)},
449 {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)},
450 {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)},
451 {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)}
452 };
453 static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = {
454 {NAME(""), CHILD(named, stats_arenas_i_lextents_j)}
455 };
456
457 static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
458 {INDEX(stats_arenas_i_lextents_j)}
459 };
460
461 #define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
462 MUTEX_PROF_ARENA_MUTEXES
463 #undef OP
464
465 static const ctl_named_node_t stats_arenas_i_mutexes_node[] = {
466 #define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)},
467 MUTEX_PROF_ARENA_MUTEXES
468 #undef OP
469 };
470
471 static const ctl_named_node_t stats_arenas_i_node[] = {
472 {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
473 {NAME("uptime"), CTL(stats_arenas_i_uptime)},
474 {NAME("dss"), CTL(stats_arenas_i_dss)},
475 {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)},
476 {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)},
477 {NAME("pactive"), CTL(stats_arenas_i_pactive)},
478 {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
479 {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)},
480 {NAME("mapped"), CTL(stats_arenas_i_mapped)},
481 {NAME("retained"), CTL(stats_arenas_i_retained)},
482 {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)},
483 {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)},
484 {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)},
485 {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)},
486 {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)},
487 {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)},
488 {NAME("base"), CTL(stats_arenas_i_base)},
489 {NAME("internal"), CTL(stats_arenas_i_internal)},
490 {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)},
491 {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
492 {NAME("resident"), CTL(stats_arenas_i_resident)},
493 {NAME("small"), CHILD(named, stats_arenas_i_small)},
494 {NAME("large"), CHILD(named, stats_arenas_i_large)},
495 {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
496 {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)},
497 {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)}
498 };
499 static const ctl_named_node_t super_stats_arenas_i_node[] = {
500 {NAME(""), CHILD(named, stats_arenas_i)}
501 };
502
503 static const ctl_indexed_node_t stats_arenas_node[] = {
504 {INDEX(stats_arenas_i)}
505 };
506
507 static const ctl_named_node_t stats_background_thread_node[] = {
508 {NAME("num_threads"), CTL(stats_background_thread_num_threads)},
509 {NAME("num_runs"), CTL(stats_background_thread_num_runs)},
510 {NAME("run_interval"), CTL(stats_background_thread_run_interval)}
511 };
512
513 #define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx)
514 MUTEX_PROF_GLOBAL_MUTEXES
515 #undef OP
516
517 static const ctl_named_node_t stats_mutexes_node[] = {
518 #define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)},
519 MUTEX_PROF_GLOBAL_MUTEXES
520 #undef OP
521 {NAME("reset"), CTL(stats_mutexes_reset)}
522 };
523 #undef MUTEX_PROF_DATA_NODE
524
525 static const ctl_named_node_t stats_node[] = {
526 {NAME("allocated"), CTL(stats_allocated)},
527 {NAME("active"), CTL(stats_active)},
528 {NAME("metadata"), CTL(stats_metadata)},
529 {NAME("metadata_thp"), CTL(stats_metadata_thp)},
530 {NAME("resident"), CTL(stats_resident)},
531 {NAME("mapped"), CTL(stats_mapped)},
532 {NAME("retained"), CTL(stats_retained)},
533 {NAME("background_thread"),
534 CHILD(named, stats_background_thread)},
535 {NAME("mutexes"), CHILD(named, stats_mutexes)},
536 {NAME("arenas"), CHILD(indexed, stats_arenas)}
537 };
538
539 static const ctl_named_node_t root_node[] = {
540 {NAME("version"), CTL(version)},
541 {NAME("epoch"), CTL(epoch)},
542 {NAME("background_thread"), CTL(background_thread)},
543 {NAME("max_background_threads"), CTL(max_background_threads)},
544 {NAME("thread"), CHILD(named, thread)},
545 {NAME("config"), CHILD(named, config)},
546 {NAME("opt"), CHILD(named, opt)},
547 {NAME("tcache"), CHILD(named, tcache)},
548 {NAME("arena"), CHILD(indexed, arena)},
549 {NAME("arenas"), CHILD(named, arenas)},
550 {NAME("prof"), CHILD(named, prof)},
551 {NAME("stats"), CHILD(named, stats)}
552 };
553 static const ctl_named_node_t super_root_node[] = {
554 {NAME(""), CHILD(named, root)}
555 };
556
557 #undef NAME
558 #undef CHILD
559 #undef CTL
560 #undef INDEX
561
562 /******************************************************************************/
563
564 /*
565 * Sets *dst + *src non-atomically. This is safe, since everything is
566 * synchronized by the ctl mutex.
567 */
568 static void
ctl_accum_arena_stats_u64(arena_stats_u64_t * dst,arena_stats_u64_t * src)569 ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
570 #ifdef JEMALLOC_ATOMIC_U64
571 uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
572 uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
573 atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED);
574 #else
575 *dst += *src;
576 #endif
577 }
578
579 /* Likewise: with ctl mutex synchronization, reading is simple. */
580 static uint64_t
ctl_arena_stats_read_u64(arena_stats_u64_t * p)581 ctl_arena_stats_read_u64(arena_stats_u64_t *p) {
582 #ifdef JEMALLOC_ATOMIC_U64
583 return atomic_load_u64(p, ATOMIC_RELAXED);
584 #else
585 return *p;
586 #endif
587 }
588
589 static void
accum_atomic_zu(atomic_zu_t * dst,atomic_zu_t * src)590 accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
591 size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
592 size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
593 atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
594 }
595
596 /******************************************************************************/
597
598 static unsigned
arenas_i2a_impl(size_t i,bool compat,bool validate)599 arenas_i2a_impl(size_t i, bool compat, bool validate) {
600 unsigned a;
601
602 switch (i) {
603 case MALLCTL_ARENAS_ALL:
604 a = 0;
605 break;
606 case MALLCTL_ARENAS_DESTROYED:
607 a = 1;
608 break;
609 default:
610 if (compat && i == ctl_arenas->narenas) {
611 /*
612 * Provide deprecated backward compatibility for
613 * accessing the merged stats at index narenas rather
614 * than via MALLCTL_ARENAS_ALL. This is scheduled for
615 * removal in 6.0.0.
616 */
617 a = 0;
618 } else if (validate && i >= ctl_arenas->narenas) {
619 a = UINT_MAX;
620 } else {
621 /*
622 * This function should never be called for an index
623 * more than one past the range of indices that have
624 * initialized ctl data.
625 */
626 assert(i < ctl_arenas->narenas || (!validate && i ==
627 ctl_arenas->narenas));
628 a = (unsigned)i + 2;
629 }
630 break;
631 }
632
633 return a;
634 }
635
636 static unsigned
arenas_i2a(size_t i)637 arenas_i2a(size_t i) {
638 return arenas_i2a_impl(i, true, false);
639 }
640
641 static ctl_arena_t *
arenas_i_impl(tsd_t * tsd,size_t i,bool compat,bool init)642 arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) {
643 ctl_arena_t *ret;
644
645 assert(!compat || !init);
646
647 ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)];
648 if (init && ret == NULL) {
649 if (config_stats) {
650 struct container_s {
651 ctl_arena_t ctl_arena;
652 ctl_arena_stats_t astats;
653 };
654 struct container_s *cont =
655 (struct container_s *)base_alloc(tsd_tsdn(tsd),
656 b0get(), sizeof(struct container_s), QUANTUM);
657 if (cont == NULL) {
658 return NULL;
659 }
660 ret = &cont->ctl_arena;
661 ret->astats = &cont->astats;
662 } else {
663 ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(),
664 sizeof(ctl_arena_t), QUANTUM);
665 if (ret == NULL) {
666 return NULL;
667 }
668 }
669 ret->arena_ind = (unsigned)i;
670 ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret;
671 }
672
673 assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i));
674 return ret;
675 }
676
677 static ctl_arena_t *
arenas_i(size_t i)678 arenas_i(size_t i) {
679 ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false);
680 assert(ret != NULL);
681 return ret;
682 }
683
684 static void
ctl_arena_clear(ctl_arena_t * ctl_arena)685 ctl_arena_clear(ctl_arena_t *ctl_arena) {
686 ctl_arena->nthreads = 0;
687 ctl_arena->dss = dss_prec_names[dss_prec_limit];
688 ctl_arena->dirty_decay_ms = -1;
689 ctl_arena->muzzy_decay_ms = -1;
690 ctl_arena->pactive = 0;
691 ctl_arena->pdirty = 0;
692 ctl_arena->pmuzzy = 0;
693 if (config_stats) {
694 memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t));
695 ctl_arena->astats->allocated_small = 0;
696 ctl_arena->astats->nmalloc_small = 0;
697 ctl_arena->astats->ndalloc_small = 0;
698 ctl_arena->astats->nrequests_small = 0;
699 memset(ctl_arena->astats->bstats, 0, NBINS *
700 sizeof(bin_stats_t));
701 memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) *
702 sizeof(arena_stats_large_t));
703 }
704 }
705
706 static void
ctl_arena_stats_amerge(tsdn_t * tsdn,ctl_arena_t * ctl_arena,arena_t * arena)707 ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
708 unsigned i;
709
710 if (config_stats) {
711 arena_stats_merge(tsdn, arena, &ctl_arena->nthreads,
712 &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
713 &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
714 &ctl_arena->pdirty, &ctl_arena->pmuzzy,
715 &ctl_arena->astats->astats, ctl_arena->astats->bstats,
716 ctl_arena->astats->lstats);
717
718 for (i = 0; i < NBINS; i++) {
719 ctl_arena->astats->allocated_small +=
720 ctl_arena->astats->bstats[i].curregs *
721 sz_index2size(i);
722 ctl_arena->astats->nmalloc_small +=
723 ctl_arena->astats->bstats[i].nmalloc;
724 ctl_arena->astats->ndalloc_small +=
725 ctl_arena->astats->bstats[i].ndalloc;
726 ctl_arena->astats->nrequests_small +=
727 ctl_arena->astats->bstats[i].nrequests;
728 }
729 } else {
730 arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
731 &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
732 &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
733 &ctl_arena->pdirty, &ctl_arena->pmuzzy);
734 }
735 }
736
737 static void
ctl_arena_stats_sdmerge(ctl_arena_t * ctl_sdarena,ctl_arena_t * ctl_arena,bool destroyed)738 ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
739 bool destroyed) {
740 unsigned i;
741
742 if (!destroyed) {
743 ctl_sdarena->nthreads += ctl_arena->nthreads;
744 ctl_sdarena->pactive += ctl_arena->pactive;
745 ctl_sdarena->pdirty += ctl_arena->pdirty;
746 ctl_sdarena->pmuzzy += ctl_arena->pmuzzy;
747 } else {
748 assert(ctl_arena->nthreads == 0);
749 assert(ctl_arena->pactive == 0);
750 assert(ctl_arena->pdirty == 0);
751 assert(ctl_arena->pmuzzy == 0);
752 }
753
754 if (config_stats) {
755 ctl_arena_stats_t *sdstats = ctl_sdarena->astats;
756 ctl_arena_stats_t *astats = ctl_arena->astats;
757
758 if (!destroyed) {
759 accum_atomic_zu(&sdstats->astats.mapped,
760 &astats->astats.mapped);
761 accum_atomic_zu(&sdstats->astats.retained,
762 &astats->astats.retained);
763 }
764
765 ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
766 &astats->astats.decay_dirty.npurge);
767 ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
768 &astats->astats.decay_dirty.nmadvise);
769 ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
770 &astats->astats.decay_dirty.purged);
771
772 ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
773 &astats->astats.decay_muzzy.npurge);
774 ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
775 &astats->astats.decay_muzzy.nmadvise);
776 ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
777 &astats->astats.decay_muzzy.purged);
778
779 #define OP(mtx) malloc_mutex_prof_merge( \
780 &(sdstats->astats.mutex_prof_data[ \
781 arena_prof_mutex_##mtx]), \
782 &(astats->astats.mutex_prof_data[ \
783 arena_prof_mutex_##mtx]));
784 MUTEX_PROF_ARENA_MUTEXES
785 #undef OP
786 if (!destroyed) {
787 accum_atomic_zu(&sdstats->astats.base,
788 &astats->astats.base);
789 accum_atomic_zu(&sdstats->astats.internal,
790 &astats->astats.internal);
791 accum_atomic_zu(&sdstats->astats.resident,
792 &astats->astats.resident);
793 accum_atomic_zu(&sdstats->astats.metadata_thp,
794 &astats->astats.metadata_thp);
795 } else {
796 assert(atomic_load_zu(
797 &astats->astats.internal, ATOMIC_RELAXED) == 0);
798 }
799
800 if (!destroyed) {
801 sdstats->allocated_small += astats->allocated_small;
802 } else {
803 assert(astats->allocated_small == 0);
804 }
805 sdstats->nmalloc_small += astats->nmalloc_small;
806 sdstats->ndalloc_small += astats->ndalloc_small;
807 sdstats->nrequests_small += astats->nrequests_small;
808
809 if (!destroyed) {
810 accum_atomic_zu(&sdstats->astats.allocated_large,
811 &astats->astats.allocated_large);
812 } else {
813 assert(atomic_load_zu(&astats->astats.allocated_large,
814 ATOMIC_RELAXED) == 0);
815 }
816 ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
817 &astats->astats.nmalloc_large);
818 ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
819 &astats->astats.ndalloc_large);
820 ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large,
821 &astats->astats.nrequests_large);
822
823 accum_atomic_zu(&sdstats->astats.tcache_bytes,
824 &astats->astats.tcache_bytes);
825
826 if (ctl_arena->arena_ind == 0) {
827 sdstats->astats.uptime = astats->astats.uptime;
828 }
829
830 for (i = 0; i < NBINS; i++) {
831 sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
832 sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
833 sdstats->bstats[i].nrequests +=
834 astats->bstats[i].nrequests;
835 if (!destroyed) {
836 sdstats->bstats[i].curregs +=
837 astats->bstats[i].curregs;
838 } else {
839 assert(astats->bstats[i].curregs == 0);
840 }
841 sdstats->bstats[i].nfills += astats->bstats[i].nfills;
842 sdstats->bstats[i].nflushes +=
843 astats->bstats[i].nflushes;
844 sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
845 sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
846 if (!destroyed) {
847 sdstats->bstats[i].curslabs +=
848 astats->bstats[i].curslabs;
849 } else {
850 assert(astats->bstats[i].curslabs == 0);
851 }
852 malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
853 &astats->bstats[i].mutex_data);
854 }
855
856 for (i = 0; i < NSIZES - NBINS; i++) {
857 ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
858 &astats->lstats[i].nmalloc);
859 ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
860 &astats->lstats[i].ndalloc);
861 ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
862 &astats->lstats[i].nrequests);
863 if (!destroyed) {
864 sdstats->lstats[i].curlextents +=
865 astats->lstats[i].curlextents;
866 } else {
867 assert(astats->lstats[i].curlextents == 0);
868 }
869 }
870 }
871 }
872
873 static void
ctl_arena_refresh(tsdn_t * tsdn,arena_t * arena,ctl_arena_t * ctl_sdarena,unsigned i,bool destroyed)874 ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
875 unsigned i, bool destroyed) {
876 ctl_arena_t *ctl_arena = arenas_i(i);
877
878 ctl_arena_clear(ctl_arena);
879 ctl_arena_stats_amerge(tsdn, ctl_arena, arena);
880 /* Merge into sum stats as well. */
881 ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed);
882 }
883
884 static unsigned
ctl_arena_init(tsd_t * tsd,extent_hooks_t * extent_hooks)885 ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
886 unsigned arena_ind;
887 ctl_arena_t *ctl_arena;
888
889 if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) !=
890 NULL) {
891 ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
892 arena_ind = ctl_arena->arena_ind;
893 } else {
894 arena_ind = ctl_arenas->narenas;
895 }
896
897 /* Trigger stats allocation. */
898 if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) {
899 return UINT_MAX;
900 }
901
902 /* Initialize new arena. */
903 if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) {
904 return UINT_MAX;
905 }
906
907 if (arena_ind == ctl_arenas->narenas) {
908 ctl_arenas->narenas++;
909 }
910
911 return arena_ind;
912 }
913
914 static void
ctl_background_thread_stats_read(tsdn_t * tsdn)915 ctl_background_thread_stats_read(tsdn_t *tsdn) {
916 background_thread_stats_t *stats = &ctl_stats->background_thread;
917 if (!have_background_thread ||
918 background_thread_stats_read(tsdn, stats)) {
919 memset(stats, 0, sizeof(background_thread_stats_t));
920 nstime_init(&stats->run_interval, 0);
921 }
922 }
923
924 static void
ctl_refresh(tsdn_t * tsdn)925 ctl_refresh(tsdn_t *tsdn) {
926 unsigned i;
927 ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
928 VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
929
930 /*
931 * Clear sum stats, since they will be merged into by
932 * ctl_arena_refresh().
933 */
934 ctl_arena_clear(ctl_sarena);
935
936 for (i = 0; i < ctl_arenas->narenas; i++) {
937 tarenas[i] = arena_get(tsdn, i, false);
938 }
939
940 for (i = 0; i < ctl_arenas->narenas; i++) {
941 ctl_arena_t *ctl_arena = arenas_i(i);
942 bool initialized = (tarenas[i] != NULL);
943
944 ctl_arena->initialized = initialized;
945 if (initialized) {
946 ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i,
947 false);
948 }
949 }
950
951 if (config_stats) {
952 ctl_stats->allocated = ctl_sarena->astats->allocated_small +
953 atomic_load_zu(&ctl_sarena->astats->astats.allocated_large,
954 ATOMIC_RELAXED);
955 ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
956 ctl_stats->metadata = atomic_load_zu(
957 &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
958 atomic_load_zu(&ctl_sarena->astats->astats.internal,
959 ATOMIC_RELAXED);
960 ctl_stats->metadata_thp = atomic_load_zu(
961 &ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED);
962 ctl_stats->resident = atomic_load_zu(
963 &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
964 ctl_stats->mapped = atomic_load_zu(
965 &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED);
966 ctl_stats->retained = atomic_load_zu(
967 &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED);
968
969 ctl_background_thread_stats_read(tsdn);
970
971 #define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \
972 malloc_mutex_lock(tsdn, &mtx); \
973 malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \
974 malloc_mutex_unlock(tsdn, &mtx);
975
976 if (config_prof && opt_prof) {
977 READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof,
978 bt2gctx_mtx);
979 }
980 if (have_background_thread) {
981 READ_GLOBAL_MUTEX_PROF_DATA(
982 global_prof_mutex_background_thread,
983 background_thread_lock);
984 } else {
985 memset(&ctl_stats->mutex_prof_data[
986 global_prof_mutex_background_thread], 0,
987 sizeof(mutex_prof_data_t));
988 }
989 /* We own ctl mutex already. */
990 malloc_mutex_prof_read(tsdn,
991 &ctl_stats->mutex_prof_data[global_prof_mutex_ctl],
992 &ctl_mtx);
993 #undef READ_GLOBAL_MUTEX_PROF_DATA
994 }
995 ctl_arenas->epoch++;
996 }
997
998 static bool
ctl_init(tsd_t * tsd)999 ctl_init(tsd_t *tsd) {
1000 bool ret;
1001 tsdn_t *tsdn = tsd_tsdn(tsd);
1002
1003 malloc_mutex_lock(tsdn, &ctl_mtx);
1004 if (!ctl_initialized) {
1005 ctl_arena_t *ctl_sarena, *ctl_darena;
1006 unsigned i;
1007
1008 /*
1009 * Allocate demand-zeroed space for pointers to the full
1010 * range of supported arena indices.
1011 */
1012 if (ctl_arenas == NULL) {
1013 ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn,
1014 b0get(), sizeof(ctl_arenas_t), QUANTUM);
1015 if (ctl_arenas == NULL) {
1016 ret = true;
1017 goto label_return;
1018 }
1019 }
1020
1021 if (config_stats && ctl_stats == NULL) {
1022 ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(),
1023 sizeof(ctl_stats_t), QUANTUM);
1024 if (ctl_stats == NULL) {
1025 ret = true;
1026 goto label_return;
1027 }
1028 }
1029
1030 /*
1031 * Allocate space for the current full range of arenas
1032 * here rather than doing it lazily elsewhere, in order
1033 * to limit when OOM-caused errors can occur.
1034 */
1035 if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false,
1036 true)) == NULL) {
1037 ret = true;
1038 goto label_return;
1039 }
1040 ctl_sarena->initialized = true;
1041
1042 if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED,
1043 false, true)) == NULL) {
1044 ret = true;
1045 goto label_return;
1046 }
1047 ctl_arena_clear(ctl_darena);
1048 /*
1049 * Don't toggle ctl_darena to initialized until an arena is
1050 * actually destroyed, so that arena.<i>.initialized can be used
1051 * to query whether the stats are relevant.
1052 */
1053
1054 ctl_arenas->narenas = narenas_total_get();
1055 for (i = 0; i < ctl_arenas->narenas; i++) {
1056 if (arenas_i_impl(tsd, i, false, true) == NULL) {
1057 ret = true;
1058 goto label_return;
1059 }
1060 }
1061
1062 ql_new(&ctl_arenas->destroyed);
1063 ctl_refresh(tsdn);
1064
1065 ctl_initialized = true;
1066 }
1067
1068 ret = false;
1069 label_return:
1070 malloc_mutex_unlock(tsdn, &ctl_mtx);
1071 return ret;
1072 }
1073
1074 static int
ctl_lookup(tsdn_t * tsdn,const char * name,ctl_node_t const ** nodesp,size_t * mibp,size_t * depthp)1075 ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
1076 size_t *mibp, size_t *depthp) {
1077 int ret;
1078 const char *elm, *tdot, *dot;
1079 size_t elen, i, j;
1080 const ctl_named_node_t *node;
1081
1082 elm = name;
1083 /* Equivalent to strchrnul(). */
1084 dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
1085 elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
1086 if (elen == 0) {
1087 ret = ENOENT;
1088 goto label_return;
1089 }
1090 node = super_root_node;
1091 for (i = 0; i < *depthp; i++) {
1092 assert(node);
1093 assert(node->nchildren > 0);
1094 if (ctl_named_node(node->children) != NULL) {
1095 const ctl_named_node_t *pnode = node;
1096
1097 /* Children are named. */
1098 for (j = 0; j < node->nchildren; j++) {
1099 const ctl_named_node_t *child =
1100 ctl_named_children(node, j);
1101 if (strlen(child->name) == elen &&
1102 strncmp(elm, child->name, elen) == 0) {
1103 node = child;
1104 if (nodesp != NULL) {
1105 nodesp[i] =
1106 (const ctl_node_t *)node;
1107 }
1108 mibp[i] = j;
1109 break;
1110 }
1111 }
1112 if (node == pnode) {
1113 ret = ENOENT;
1114 goto label_return;
1115 }
1116 } else {
1117 uintmax_t index;
1118 const ctl_indexed_node_t *inode;
1119
1120 /* Children are indexed. */
1121 index = malloc_strtoumax(elm, NULL, 10);
1122 if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
1123 ret = ENOENT;
1124 goto label_return;
1125 }
1126
1127 inode = ctl_indexed_node(node->children);
1128 node = inode->index(tsdn, mibp, *depthp, (size_t)index);
1129 if (node == NULL) {
1130 ret = ENOENT;
1131 goto label_return;
1132 }
1133
1134 if (nodesp != NULL) {
1135 nodesp[i] = (const ctl_node_t *)node;
1136 }
1137 mibp[i] = (size_t)index;
1138 }
1139
1140 if (node->ctl != NULL) {
1141 /* Terminal node. */
1142 if (*dot != '\0') {
1143 /*
1144 * The name contains more elements than are
1145 * in this path through the tree.
1146 */
1147 ret = ENOENT;
1148 goto label_return;
1149 }
1150 /* Complete lookup successful. */
1151 *depthp = i + 1;
1152 break;
1153 }
1154
1155 /* Update elm. */
1156 if (*dot == '\0') {
1157 /* No more elements. */
1158 ret = ENOENT;
1159 goto label_return;
1160 }
1161 elm = &dot[1];
1162 dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
1163 strchr(elm, '\0');
1164 elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
1165 }
1166
1167 ret = 0;
1168 label_return:
1169 return ret;
1170 }
1171
1172 int
ctl_byname(tsd_t * tsd,const char * name,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1173 ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
1174 void *newp, size_t newlen) {
1175 int ret;
1176 size_t depth;
1177 ctl_node_t const *nodes[CTL_MAX_DEPTH];
1178 size_t mib[CTL_MAX_DEPTH];
1179 const ctl_named_node_t *node;
1180
1181 if (!ctl_initialized && ctl_init(tsd)) {
1182 ret = EAGAIN;
1183 goto label_return;
1184 }
1185
1186 depth = CTL_MAX_DEPTH;
1187 ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
1188 if (ret != 0) {
1189 goto label_return;
1190 }
1191
1192 node = ctl_named_node(nodes[depth-1]);
1193 if (node != NULL && node->ctl) {
1194 ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
1195 } else {
1196 /* The name refers to a partial path through the ctl tree. */
1197 ret = ENOENT;
1198 }
1199
1200 label_return:
1201 return(ret);
1202 }
1203
1204 int
ctl_nametomib(tsd_t * tsd,const char * name,size_t * mibp,size_t * miblenp)1205 ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
1206 int ret;
1207
1208 if (!ctl_initialized && ctl_init(tsd)) {
1209 ret = EAGAIN;
1210 goto label_return;
1211 }
1212
1213 ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp);
1214 label_return:
1215 return(ret);
1216 }
1217
1218 int
ctl_bymib(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1219 ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1220 size_t *oldlenp, void *newp, size_t newlen) {
1221 int ret;
1222 const ctl_named_node_t *node;
1223 size_t i;
1224
1225 if (!ctl_initialized && ctl_init(tsd)) {
1226 ret = EAGAIN;
1227 goto label_return;
1228 }
1229
1230 /* Iterate down the tree. */
1231 node = super_root_node;
1232 for (i = 0; i < miblen; i++) {
1233 assert(node);
1234 assert(node->nchildren > 0);
1235 if (ctl_named_node(node->children) != NULL) {
1236 /* Children are named. */
1237 if (node->nchildren <= mib[i]) {
1238 ret = ENOENT;
1239 goto label_return;
1240 }
1241 node = ctl_named_children(node, mib[i]);
1242 } else {
1243 const ctl_indexed_node_t *inode;
1244
1245 /* Indexed element. */
1246 inode = ctl_indexed_node(node->children);
1247 node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
1248 if (node == NULL) {
1249 ret = ENOENT;
1250 goto label_return;
1251 }
1252 }
1253 }
1254
1255 /* Call the ctl function. */
1256 if (node && node->ctl) {
1257 ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
1258 } else {
1259 /* Partial MIB. */
1260 ret = ENOENT;
1261 }
1262
1263 label_return:
1264 return(ret);
1265 }
1266
1267 bool
ctl_boot(void)1268 ctl_boot(void) {
1269 if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
1270 malloc_mutex_rank_exclusive)) {
1271 return true;
1272 }
1273
1274 ctl_initialized = false;
1275
1276 return false;
1277 }
1278
1279 void
ctl_prefork(tsdn_t * tsdn)1280 ctl_prefork(tsdn_t *tsdn) {
1281 malloc_mutex_prefork(tsdn, &ctl_mtx);
1282 }
1283
1284 void
ctl_postfork_parent(tsdn_t * tsdn)1285 ctl_postfork_parent(tsdn_t *tsdn) {
1286 malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
1287 }
1288
1289 void
ctl_postfork_child(tsdn_t * tsdn)1290 ctl_postfork_child(tsdn_t *tsdn) {
1291 malloc_mutex_postfork_child(tsdn, &ctl_mtx);
1292 }
1293
1294 /******************************************************************************/
1295 /* *_ctl() functions. */
1296
1297 #define READONLY() do { \
1298 if (newp != NULL || newlen != 0) { \
1299 ret = EPERM; \
1300 goto label_return; \
1301 } \
1302 } while (0)
1303
1304 #define WRITEONLY() do { \
1305 if (oldp != NULL || oldlenp != NULL) { \
1306 ret = EPERM; \
1307 goto label_return; \
1308 } \
1309 } while (0)
1310
1311 #define READ_XOR_WRITE() do { \
1312 if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
1313 newlen != 0)) { \
1314 ret = EPERM; \
1315 goto label_return; \
1316 } \
1317 } while (0)
1318
1319 #define READ(v, t) do { \
1320 if (oldp != NULL && oldlenp != NULL) { \
1321 if (*oldlenp != sizeof(t)) { \
1322 size_t copylen = (sizeof(t) <= *oldlenp) \
1323 ? sizeof(t) : *oldlenp; \
1324 memcpy(oldp, (void *)&(v), copylen); \
1325 ret = EINVAL; \
1326 goto label_return; \
1327 } \
1328 *(t *)oldp = (v); \
1329 } \
1330 } while (0)
1331
1332 #define WRITE(v, t) do { \
1333 if (newp != NULL) { \
1334 if (newlen != sizeof(t)) { \
1335 ret = EINVAL; \
1336 goto label_return; \
1337 } \
1338 (v) = *(t *)newp; \
1339 } \
1340 } while (0)
1341
1342 #define MIB_UNSIGNED(v, i) do { \
1343 if (mib[i] > UINT_MAX) { \
1344 ret = EFAULT; \
1345 goto label_return; \
1346 } \
1347 v = (unsigned)mib[i]; \
1348 } while (0)
1349
1350 /*
1351 * There's a lot of code duplication in the following macros due to limitations
1352 * in how nested cpp macros are expanded.
1353 */
1354 #define CTL_RO_CLGEN(c, l, n, v, t) \
1355 static int \
1356 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1357 size_t *oldlenp, void *newp, size_t newlen) { \
1358 int ret; \
1359 t oldval; \
1360 \
1361 if (!(c)) { \
1362 return ENOENT; \
1363 } \
1364 if (l) { \
1365 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
1366 } \
1367 READONLY(); \
1368 oldval = (v); \
1369 READ(oldval, t); \
1370 \
1371 ret = 0; \
1372 label_return: \
1373 if (l) { \
1374 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
1375 } \
1376 return ret; \
1377 }
1378
1379 #define CTL_RO_CGEN(c, n, v, t) \
1380 static int \
1381 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1382 size_t *oldlenp, void *newp, size_t newlen) { \
1383 int ret; \
1384 t oldval; \
1385 \
1386 if (!(c)) { \
1387 return ENOENT; \
1388 } \
1389 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
1390 READONLY(); \
1391 oldval = (v); \
1392 READ(oldval, t); \
1393 \
1394 ret = 0; \
1395 label_return: \
1396 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
1397 return ret; \
1398 }
1399
1400 #define CTL_RO_GEN(n, v, t) \
1401 static int \
1402 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1403 size_t *oldlenp, void *newp, size_t newlen) { \
1404 int ret; \
1405 t oldval; \
1406 \
1407 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
1408 READONLY(); \
1409 oldval = (v); \
1410 READ(oldval, t); \
1411 \
1412 ret = 0; \
1413 label_return: \
1414 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
1415 return ret; \
1416 }
1417
1418 /*
1419 * ctl_mtx is not acquired, under the assumption that no pertinent data will
1420 * mutate during the call.
1421 */
1422 #define CTL_RO_NL_CGEN(c, n, v, t) \
1423 static int \
1424 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1425 size_t *oldlenp, void *newp, size_t newlen) { \
1426 int ret; \
1427 t oldval; \
1428 \
1429 if (!(c)) { \
1430 return ENOENT; \
1431 } \
1432 READONLY(); \
1433 oldval = (v); \
1434 READ(oldval, t); \
1435 \
1436 ret = 0; \
1437 label_return: \
1438 return ret; \
1439 }
1440
1441 #define CTL_RO_NL_GEN(n, v, t) \
1442 static int \
1443 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1444 size_t *oldlenp, void *newp, size_t newlen) { \
1445 int ret; \
1446 t oldval; \
1447 \
1448 READONLY(); \
1449 oldval = (v); \
1450 READ(oldval, t); \
1451 \
1452 ret = 0; \
1453 label_return: \
1454 return ret; \
1455 }
1456
1457 #define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
1458 static int \
1459 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1460 size_t *oldlenp, void *newp, size_t newlen) { \
1461 int ret; \
1462 t oldval; \
1463 \
1464 if (!(c)) { \
1465 return ENOENT; \
1466 } \
1467 READONLY(); \
1468 oldval = (m(tsd)); \
1469 READ(oldval, t); \
1470 \
1471 ret = 0; \
1472 label_return: \
1473 return ret; \
1474 }
1475
1476 #define CTL_RO_CONFIG_GEN(n, t) \
1477 static int \
1478 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1479 size_t *oldlenp, void *newp, size_t newlen) { \
1480 int ret; \
1481 t oldval; \
1482 \
1483 READONLY(); \
1484 oldval = n; \
1485 READ(oldval, t); \
1486 \
1487 ret = 0; \
1488 label_return: \
1489 return ret; \
1490 }
1491
1492 /******************************************************************************/
1493
CTL_RO_NL_GEN(version,JEMALLOC_VERSION,const char *)1494 CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
1495
1496 static int
1497 epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1498 size_t *oldlenp, void *newp, size_t newlen) {
1499 int ret;
1500 UNUSED uint64_t newval;
1501
1502 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1503 WRITE(newval, uint64_t);
1504 if (newp != NULL) {
1505 ctl_refresh(tsd_tsdn(tsd));
1506 }
1507 READ(ctl_arenas->epoch, uint64_t);
1508
1509 ret = 0;
1510 label_return:
1511 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1512 return ret;
1513 }
1514
1515 static int
background_thread_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1516 background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1517 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1518 int ret;
1519 bool oldval;
1520
1521 if (!have_background_thread) {
1522 return ENOENT;
1523 }
1524 background_thread_ctl_init(tsd_tsdn(tsd));
1525
1526 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1527 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1528 if (newp == NULL) {
1529 oldval = background_thread_enabled();
1530 READ(oldval, bool);
1531 } else {
1532 if (newlen != sizeof(bool)) {
1533 ret = EINVAL;
1534 goto label_return;
1535 }
1536 oldval = background_thread_enabled();
1537 READ(oldval, bool);
1538
1539 bool newval = *(bool *)newp;
1540 if (newval == oldval) {
1541 ret = 0;
1542 goto label_return;
1543 }
1544
1545 background_thread_enabled_set(tsd_tsdn(tsd), newval);
1546 if (newval) {
1547 if (!can_enable_background_thread) {
1548 malloc_printf("<jemalloc>: Error in dlsym("
1549 "RTLD_NEXT, \"pthread_create\"). Cannot "
1550 "enable background_thread\n");
1551 ret = EFAULT;
1552 goto label_return;
1553 }
1554 if (background_threads_enable(tsd)) {
1555 ret = EFAULT;
1556 goto label_return;
1557 }
1558 } else {
1559 if (background_threads_disable(tsd)) {
1560 ret = EFAULT;
1561 goto label_return;
1562 }
1563 }
1564 }
1565 ret = 0;
1566 label_return:
1567 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1568 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1569
1570 return ret;
1571 }
1572
1573 static int
max_background_threads_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1574 max_background_threads_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1575 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1576 int ret;
1577 size_t oldval;
1578
1579 if (!have_background_thread) {
1580 return ENOENT;
1581 }
1582 background_thread_ctl_init(tsd_tsdn(tsd));
1583
1584 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1585 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1586 if (newp == NULL) {
1587 oldval = max_background_threads;
1588 READ(oldval, size_t);
1589 } else {
1590 if (newlen != sizeof(size_t)) {
1591 ret = EINVAL;
1592 goto label_return;
1593 }
1594 oldval = max_background_threads;
1595 READ(oldval, size_t);
1596
1597 size_t newval = *(size_t *)newp;
1598 if (newval == oldval) {
1599 ret = 0;
1600 goto label_return;
1601 }
1602 if (newval > opt_max_background_threads) {
1603 ret = EINVAL;
1604 goto label_return;
1605 }
1606
1607 if (background_thread_enabled()) {
1608 if (!can_enable_background_thread) {
1609 malloc_printf("<jemalloc>: Error in dlsym("
1610 "RTLD_NEXT, \"pthread_create\"). Cannot "
1611 "enable background_thread\n");
1612 ret = EFAULT;
1613 goto label_return;
1614 }
1615 background_thread_enabled_set(tsd_tsdn(tsd), false);
1616 if (background_threads_disable(tsd)) {
1617 ret = EFAULT;
1618 goto label_return;
1619 }
1620 max_background_threads = newval;
1621 background_thread_enabled_set(tsd_tsdn(tsd), true);
1622 if (background_threads_enable(tsd)) {
1623 ret = EFAULT;
1624 goto label_return;
1625 }
1626 } else {
1627 max_background_threads = newval;
1628 }
1629 }
1630 ret = 0;
1631 label_return:
1632 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1633 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1634
1635 return ret;
1636 }
1637
1638 /******************************************************************************/
1639
CTL_RO_CONFIG_GEN(config_cache_oblivious,bool)1640 CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
1641 CTL_RO_CONFIG_GEN(config_debug, bool)
1642 CTL_RO_CONFIG_GEN(config_fill, bool)
1643 CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
1644 CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
1645 CTL_RO_CONFIG_GEN(config_prof, bool)
1646 CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
1647 CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
1648 CTL_RO_CONFIG_GEN(config_stats, bool)
1649 CTL_RO_CONFIG_GEN(config_utrace, bool)
1650 CTL_RO_CONFIG_GEN(config_xmalloc, bool)
1651
1652 /******************************************************************************/
1653
1654 CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
1655 CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
1656 CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
1657 const char *)
1658 CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
1659 CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
1660 CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
1661 CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
1662 const char *)
1663 CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
1664 CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
1665 CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
1666 CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
1667 CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
1668 CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
1669 CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
1670 CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
1671 CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
1672 CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
1673 CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
1674 CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *)
1675 CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit,
1676 size_t)
1677 CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
1678 CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
1679 CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
1680 CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
1681 CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
1682 opt_prof_thread_active_init, bool)
1683 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
1684 CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
1685 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
1686 CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
1687 CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
1688 CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
1689
1690 /******************************************************************************/
1691
1692 static int
1693 thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1694 size_t *oldlenp, void *newp, size_t newlen) {
1695 int ret;
1696 arena_t *oldarena;
1697 unsigned newind, oldind;
1698
1699 oldarena = arena_choose(tsd, NULL);
1700 if (oldarena == NULL) {
1701 return EAGAIN;
1702 }
1703 newind = oldind = arena_ind_get(oldarena);
1704 WRITE(newind, unsigned);
1705 READ(oldind, unsigned);
1706
1707 if (newind != oldind) {
1708 arena_t *newarena;
1709
1710 if (newind >= narenas_total_get()) {
1711 /* New arena index is out of range. */
1712 ret = EFAULT;
1713 goto label_return;
1714 }
1715
1716 if (have_percpu_arena &&
1717 PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
1718 if (newind < percpu_arena_ind_limit(opt_percpu_arena)) {
1719 /*
1720 * If perCPU arena is enabled, thread_arena
1721 * control is not allowed for the auto arena
1722 * range.
1723 */
1724 ret = EPERM;
1725 goto label_return;
1726 }
1727 }
1728
1729 /* Initialize arena if necessary. */
1730 newarena = arena_get(tsd_tsdn(tsd), newind, true);
1731 if (newarena == NULL) {
1732 ret = EAGAIN;
1733 goto label_return;
1734 }
1735 /* Set new arena/tcache associations. */
1736 arena_migrate(tsd, oldind, newind);
1737 if (tcache_available(tsd)) {
1738 tcache_arena_reassociate(tsd_tsdn(tsd),
1739 tsd_tcachep_get(tsd), newarena);
1740 }
1741 }
1742
1743 ret = 0;
1744 label_return:
1745 return ret;
1746 }
1747
CTL_TSD_RO_NL_CGEN(config_stats,thread_allocated,tsd_thread_allocated_get,uint64_t)1748 CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
1749 uint64_t)
1750 CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
1751 uint64_t *)
1752 CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
1753 uint64_t)
1754 CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
1755 tsd_thread_deallocatedp_get, uint64_t *)
1756
1757 static int
1758 thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1759 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1760 int ret;
1761 bool oldval;
1762
1763 oldval = tcache_enabled_get(tsd);
1764 if (newp != NULL) {
1765 if (newlen != sizeof(bool)) {
1766 ret = EINVAL;
1767 goto label_return;
1768 }
1769 tcache_enabled_set(tsd, *(bool *)newp);
1770 }
1771 READ(oldval, bool);
1772
1773 ret = 0;
1774 label_return:
1775 return ret;
1776 }
1777
1778 static int
thread_tcache_flush_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1779 thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1780 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1781 int ret;
1782
1783 if (!tcache_available(tsd)) {
1784 ret = EFAULT;
1785 goto label_return;
1786 }
1787
1788 READONLY();
1789 WRITEONLY();
1790
1791 tcache_flush(tsd);
1792
1793 ret = 0;
1794 label_return:
1795 return ret;
1796 }
1797
1798 static int
thread_prof_name_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1799 thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1800 size_t *oldlenp, void *newp, size_t newlen) {
1801 int ret;
1802
1803 if (!config_prof) {
1804 return ENOENT;
1805 }
1806
1807 READ_XOR_WRITE();
1808
1809 if (newp != NULL) {
1810 if (newlen != sizeof(const char *)) {
1811 ret = EINVAL;
1812 goto label_return;
1813 }
1814
1815 if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
1816 0) {
1817 goto label_return;
1818 }
1819 } else {
1820 const char *oldname = prof_thread_name_get(tsd);
1821 READ(oldname, const char *);
1822 }
1823
1824 ret = 0;
1825 label_return:
1826 return ret;
1827 }
1828
1829 static int
thread_prof_active_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1830 thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1831 size_t *oldlenp, void *newp, size_t newlen) {
1832 int ret;
1833 bool oldval;
1834
1835 if (!config_prof) {
1836 return ENOENT;
1837 }
1838
1839 oldval = prof_thread_active_get(tsd);
1840 if (newp != NULL) {
1841 if (newlen != sizeof(bool)) {
1842 ret = EINVAL;
1843 goto label_return;
1844 }
1845 if (prof_thread_active_set(tsd, *(bool *)newp)) {
1846 ret = EAGAIN;
1847 goto label_return;
1848 }
1849 }
1850 READ(oldval, bool);
1851
1852 ret = 0;
1853 label_return:
1854 return ret;
1855 }
1856
1857 /******************************************************************************/
1858
1859 static int
tcache_create_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1860 tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1861 size_t *oldlenp, void *newp, size_t newlen) {
1862 int ret;
1863 unsigned tcache_ind;
1864
1865 READONLY();
1866 if (tcaches_create(tsd, &tcache_ind)) {
1867 ret = EFAULT;
1868 goto label_return;
1869 }
1870 READ(tcache_ind, unsigned);
1871
1872 ret = 0;
1873 label_return:
1874 return ret;
1875 }
1876
1877 static int
tcache_flush_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1878 tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1879 size_t *oldlenp, void *newp, size_t newlen) {
1880 int ret;
1881 unsigned tcache_ind;
1882
1883 WRITEONLY();
1884 tcache_ind = UINT_MAX;
1885 WRITE(tcache_ind, unsigned);
1886 if (tcache_ind == UINT_MAX) {
1887 ret = EFAULT;
1888 goto label_return;
1889 }
1890 tcaches_flush(tsd, tcache_ind);
1891
1892 ret = 0;
1893 label_return:
1894 return ret;
1895 }
1896
1897 static int
tcache_destroy_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1898 tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1899 size_t *oldlenp, void *newp, size_t newlen) {
1900 int ret;
1901 unsigned tcache_ind;
1902
1903 WRITEONLY();
1904 tcache_ind = UINT_MAX;
1905 WRITE(tcache_ind, unsigned);
1906 if (tcache_ind == UINT_MAX) {
1907 ret = EFAULT;
1908 goto label_return;
1909 }
1910 tcaches_destroy(tsd, tcache_ind);
1911
1912 ret = 0;
1913 label_return:
1914 return ret;
1915 }
1916
1917 /******************************************************************************/
1918
1919 static int
arena_i_initialized_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1920 arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1921 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1922 int ret;
1923 tsdn_t *tsdn = tsd_tsdn(tsd);
1924 unsigned arena_ind;
1925 bool initialized;
1926
1927 READONLY();
1928 MIB_UNSIGNED(arena_ind, 1);
1929
1930 malloc_mutex_lock(tsdn, &ctl_mtx);
1931 initialized = arenas_i(arena_ind)->initialized;
1932 malloc_mutex_unlock(tsdn, &ctl_mtx);
1933
1934 READ(initialized, bool);
1935
1936 ret = 0;
1937 label_return:
1938 return ret;
1939 }
1940
1941 static void
arena_i_decay(tsdn_t * tsdn,unsigned arena_ind,bool all)1942 arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) {
1943 malloc_mutex_lock(tsdn, &ctl_mtx);
1944 {
1945 unsigned narenas = ctl_arenas->narenas;
1946
1947 /*
1948 * Access via index narenas is deprecated, and scheduled for
1949 * removal in 6.0.0.
1950 */
1951 if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) {
1952 unsigned i;
1953 VARIABLE_ARRAY(arena_t *, tarenas, narenas);
1954
1955 for (i = 0; i < narenas; i++) {
1956 tarenas[i] = arena_get(tsdn, i, false);
1957 }
1958
1959 /*
1960 * No further need to hold ctl_mtx, since narenas and
1961 * tarenas contain everything needed below.
1962 */
1963 malloc_mutex_unlock(tsdn, &ctl_mtx);
1964
1965 for (i = 0; i < narenas; i++) {
1966 if (tarenas[i] != NULL) {
1967 arena_decay(tsdn, tarenas[i], false,
1968 all);
1969 }
1970 }
1971 } else {
1972 arena_t *tarena;
1973
1974 assert(arena_ind < narenas);
1975
1976 tarena = arena_get(tsdn, arena_ind, false);
1977
1978 /* No further need to hold ctl_mtx. */
1979 malloc_mutex_unlock(tsdn, &ctl_mtx);
1980
1981 if (tarena != NULL) {
1982 arena_decay(tsdn, tarena, false, all);
1983 }
1984 }
1985 }
1986 }
1987
1988 static int
arena_i_decay_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1989 arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1990 size_t *oldlenp, void *newp, size_t newlen) {
1991 int ret;
1992 unsigned arena_ind;
1993
1994 READONLY();
1995 WRITEONLY();
1996 MIB_UNSIGNED(arena_ind, 1);
1997 arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
1998
1999 ret = 0;
2000 label_return:
2001 return ret;
2002 }
2003
2004 static int
arena_i_purge_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2005 arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2006 size_t *oldlenp, void *newp, size_t newlen) {
2007 int ret;
2008 unsigned arena_ind;
2009
2010 READONLY();
2011 WRITEONLY();
2012 MIB_UNSIGNED(arena_ind, 1);
2013 arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
2014
2015 ret = 0;
2016 label_return:
2017 return ret;
2018 }
2019
2020 static int
arena_i_reset_destroy_helper(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,unsigned * arena_ind,arena_t ** arena)2021 arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
2022 void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
2023 arena_t **arena) {
2024 int ret;
2025
2026 READONLY();
2027 WRITEONLY();
2028 MIB_UNSIGNED(*arena_ind, 1);
2029
2030 *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
2031 if (*arena == NULL || arena_is_auto(*arena)) {
2032 ret = EFAULT;
2033 goto label_return;
2034 }
2035
2036 ret = 0;
2037 label_return:
2038 return ret;
2039 }
2040
2041 static void
arena_reset_prepare_background_thread(tsd_t * tsd,unsigned arena_ind)2042 arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) {
2043 /* Temporarily disable the background thread during arena reset. */
2044 if (have_background_thread) {
2045 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
2046 if (background_thread_enabled()) {
2047 unsigned ind = arena_ind % ncpus;
2048 background_thread_info_t *info =
2049 &background_thread_info[ind];
2050 assert(info->state == background_thread_started);
2051 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
2052 info->state = background_thread_paused;
2053 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
2054 }
2055 }
2056 }
2057
2058 static void
arena_reset_finish_background_thread(tsd_t * tsd,unsigned arena_ind)2059 arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) {
2060 if (have_background_thread) {
2061 if (background_thread_enabled()) {
2062 unsigned ind = arena_ind % ncpus;
2063 background_thread_info_t *info =
2064 &background_thread_info[ind];
2065 assert(info->state == background_thread_paused);
2066 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
2067 info->state = background_thread_started;
2068 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
2069 }
2070 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
2071 }
2072 }
2073
2074 static int
arena_i_reset_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2075 arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2076 size_t *oldlenp, void *newp, size_t newlen) {
2077 int ret;
2078 unsigned arena_ind;
2079 arena_t *arena;
2080
2081 ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
2082 newp, newlen, &arena_ind, &arena);
2083 if (ret != 0) {
2084 return ret;
2085 }
2086
2087 arena_reset_prepare_background_thread(tsd, arena_ind);
2088 arena_reset(tsd, arena);
2089 arena_reset_finish_background_thread(tsd, arena_ind);
2090
2091 return ret;
2092 }
2093
2094 static int
arena_i_destroy_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2095 arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2096 size_t *oldlenp, void *newp, size_t newlen) {
2097 int ret;
2098 unsigned arena_ind;
2099 arena_t *arena;
2100 ctl_arena_t *ctl_darena, *ctl_arena;
2101
2102 ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
2103 newp, newlen, &arena_ind, &arena);
2104 if (ret != 0) {
2105 goto label_return;
2106 }
2107
2108 if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
2109 true) != 0) {
2110 ret = EFAULT;
2111 goto label_return;
2112 }
2113
2114 arena_reset_prepare_background_thread(tsd, arena_ind);
2115 /* Merge stats after resetting and purging arena. */
2116 arena_reset(tsd, arena);
2117 arena_decay(tsd_tsdn(tsd), arena, false, true);
2118 ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
2119 ctl_darena->initialized = true;
2120 ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
2121 /* Destroy arena. */
2122 arena_destroy(tsd, arena);
2123 ctl_arena = arenas_i(arena_ind);
2124 ctl_arena->initialized = false;
2125 /* Record arena index for later recycling via arenas.create. */
2126 ql_elm_new(ctl_arena, destroyed_link);
2127 ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
2128 arena_reset_finish_background_thread(tsd, arena_ind);
2129
2130 assert(ret == 0);
2131 label_return:
2132 return ret;
2133 }
2134
2135 static int
arena_i_dss_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2136 arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2137 size_t *oldlenp, void *newp, size_t newlen) {
2138 int ret;
2139 const char *dss = NULL;
2140 unsigned arena_ind;
2141 dss_prec_t dss_prec_old = dss_prec_limit;
2142 dss_prec_t dss_prec = dss_prec_limit;
2143
2144 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2145 WRITE(dss, const char *);
2146 MIB_UNSIGNED(arena_ind, 1);
2147 if (dss != NULL) {
2148 int i;
2149 bool match = false;
2150
2151 for (i = 0; i < dss_prec_limit; i++) {
2152 if (strcmp(dss_prec_names[i], dss) == 0) {
2153 dss_prec = i;
2154 match = true;
2155 break;
2156 }
2157 }
2158
2159 if (!match) {
2160 ret = EINVAL;
2161 goto label_return;
2162 }
2163 }
2164
2165 /*
2166 * Access via index narenas is deprecated, and scheduled for removal in
2167 * 6.0.0.
2168 */
2169 if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind ==
2170 ctl_arenas->narenas) {
2171 if (dss_prec != dss_prec_limit &&
2172 extent_dss_prec_set(dss_prec)) {
2173 ret = EFAULT;
2174 goto label_return;
2175 }
2176 dss_prec_old = extent_dss_prec_get();
2177 } else {
2178 arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2179 if (arena == NULL || (dss_prec != dss_prec_limit &&
2180 arena_dss_prec_set(arena, dss_prec))) {
2181 ret = EFAULT;
2182 goto label_return;
2183 }
2184 dss_prec_old = arena_dss_prec_get(arena);
2185 }
2186
2187 dss = dss_prec_names[dss_prec_old];
2188 READ(dss, const char *);
2189
2190 ret = 0;
2191 label_return:
2192 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2193 return ret;
2194 }
2195
2196 static int
arena_i_decay_ms_ctl_impl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,bool dirty)2197 arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
2198 void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
2199 int ret;
2200 unsigned arena_ind;
2201 arena_t *arena;
2202
2203 MIB_UNSIGNED(arena_ind, 1);
2204 arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2205 if (arena == NULL) {
2206 ret = EFAULT;
2207 goto label_return;
2208 }
2209
2210 if (oldp != NULL && oldlenp != NULL) {
2211 size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) :
2212 arena_muzzy_decay_ms_get(arena);
2213 READ(oldval, ssize_t);
2214 }
2215 if (newp != NULL) {
2216 if (newlen != sizeof(ssize_t)) {
2217 ret = EINVAL;
2218 goto label_return;
2219 }
2220 if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
2221 *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
2222 arena, *(ssize_t *)newp)) {
2223 ret = EFAULT;
2224 goto label_return;
2225 }
2226 }
2227
2228 ret = 0;
2229 label_return:
2230 return ret;
2231 }
2232
2233 static int
arena_i_dirty_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2234 arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2235 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2236 return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2237 newlen, true);
2238 }
2239
2240 static int
arena_i_muzzy_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2241 arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2242 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2243 return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2244 newlen, false);
2245 }
2246
2247 static int
arena_i_extent_hooks_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2248 arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2249 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2250 int ret;
2251 unsigned arena_ind;
2252 arena_t *arena;
2253
2254 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2255 MIB_UNSIGNED(arena_ind, 1);
2256 if (arena_ind < narenas_total_get()) {
2257 extent_hooks_t *old_extent_hooks;
2258 arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2259 if (arena == NULL) {
2260 if (arena_ind >= narenas_auto) {
2261 ret = EFAULT;
2262 goto label_return;
2263 }
2264 old_extent_hooks =
2265 (extent_hooks_t *)&extent_hooks_default;
2266 READ(old_extent_hooks, extent_hooks_t *);
2267 if (newp != NULL) {
2268 /* Initialize a new arena as a side effect. */
2269 extent_hooks_t *new_extent_hooks
2270 JEMALLOC_CC_SILENCE_INIT(NULL);
2271 WRITE(new_extent_hooks, extent_hooks_t *);
2272 arena = arena_init(tsd_tsdn(tsd), arena_ind,
2273 new_extent_hooks);
2274 if (arena == NULL) {
2275 ret = EFAULT;
2276 goto label_return;
2277 }
2278 }
2279 } else {
2280 if (newp != NULL) {
2281 extent_hooks_t *new_extent_hooks
2282 JEMALLOC_CC_SILENCE_INIT(NULL);
2283 WRITE(new_extent_hooks, extent_hooks_t *);
2284 old_extent_hooks = extent_hooks_set(tsd, arena,
2285 new_extent_hooks);
2286 READ(old_extent_hooks, extent_hooks_t *);
2287 } else {
2288 old_extent_hooks = extent_hooks_get(arena);
2289 READ(old_extent_hooks, extent_hooks_t *);
2290 }
2291 }
2292 } else {
2293 ret = EFAULT;
2294 goto label_return;
2295 }
2296 ret = 0;
2297 label_return:
2298 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2299 return ret;
2300 }
2301
2302 static int
arena_i_retain_grow_limit_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2303 arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2304 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2305 int ret;
2306 unsigned arena_ind;
2307 arena_t *arena;
2308
2309 if (!opt_retain) {
2310 /* Only relevant when retain is enabled. */
2311 return ENOENT;
2312 }
2313
2314 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2315 MIB_UNSIGNED(arena_ind, 1);
2316 if (arena_ind < narenas_total_get() && (arena =
2317 arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
2318 size_t old_limit, new_limit;
2319 if (newp != NULL) {
2320 WRITE(new_limit, size_t);
2321 }
2322 bool err = arena_retain_grow_limit_get_set(tsd, arena,
2323 &old_limit, newp != NULL ? &new_limit : NULL);
2324 if (!err) {
2325 READ(old_limit, size_t);
2326 ret = 0;
2327 } else {
2328 ret = EFAULT;
2329 }
2330 } else {
2331 ret = EFAULT;
2332 }
2333 label_return:
2334 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2335 return ret;
2336 }
2337
2338 static const ctl_named_node_t *
arena_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)2339 arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
2340 const ctl_named_node_t *ret;
2341
2342 malloc_mutex_lock(tsdn, &ctl_mtx);
2343 switch (i) {
2344 case MALLCTL_ARENAS_ALL:
2345 case MALLCTL_ARENAS_DESTROYED:
2346 break;
2347 default:
2348 if (i > ctl_arenas->narenas) {
2349 ret = NULL;
2350 goto label_return;
2351 }
2352 break;
2353 }
2354
2355 ret = super_arena_i_node;
2356 label_return:
2357 malloc_mutex_unlock(tsdn, &ctl_mtx);
2358 return ret;
2359 }
2360
2361 /******************************************************************************/
2362
2363 static int
arenas_narenas_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2364 arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2365 size_t *oldlenp, void *newp, size_t newlen) {
2366 int ret;
2367 unsigned narenas;
2368
2369 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2370 READONLY();
2371 if (*oldlenp != sizeof(unsigned)) {
2372 ret = EINVAL;
2373 goto label_return;
2374 }
2375 narenas = ctl_arenas->narenas;
2376 READ(narenas, unsigned);
2377
2378 ret = 0;
2379 label_return:
2380 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2381 return ret;
2382 }
2383
2384 static int
arenas_decay_ms_ctl_impl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,bool dirty)2385 arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
2386 void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
2387 int ret;
2388
2389 if (oldp != NULL && oldlenp != NULL) {
2390 size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() :
2391 arena_muzzy_decay_ms_default_get());
2392 READ(oldval, ssize_t);
2393 }
2394 if (newp != NULL) {
2395 if (newlen != sizeof(ssize_t)) {
2396 ret = EINVAL;
2397 goto label_return;
2398 }
2399 if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp)
2400 : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) {
2401 ret = EFAULT;
2402 goto label_return;
2403 }
2404 }
2405
2406 ret = 0;
2407 label_return:
2408 return ret;
2409 }
2410
2411 static int
arenas_dirty_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2412 arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2413 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2414 return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2415 newlen, true);
2416 }
2417
2418 static int
arenas_muzzy_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2419 arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2420 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2421 return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2422 newlen, false);
2423 }
2424
CTL_RO_NL_GEN(arenas_quantum,QUANTUM,size_t)2425 CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
2426 CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
2427 CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
2428 CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
2429 CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
2430 CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
2431 CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
2432 CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
2433 static const ctl_named_node_t *
2434 arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
2435 if (i > NBINS) {
2436 return NULL;
2437 }
2438 return super_arenas_bin_i_node;
2439 }
2440
2441 CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned)
2442 CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]),
2443 size_t)
2444 static const ctl_named_node_t *
arenas_lextent_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)2445 arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
2446 size_t i) {
2447 if (i > NSIZES - NBINS) {
2448 return NULL;
2449 }
2450 return super_arenas_lextent_i_node;
2451 }
2452
2453 static int
arenas_create_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2454 arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2455 size_t *oldlenp, void *newp, size_t newlen) {
2456 int ret;
2457 extent_hooks_t *extent_hooks;
2458 unsigned arena_ind;
2459
2460 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2461
2462 extent_hooks = (extent_hooks_t *)&extent_hooks_default;
2463 WRITE(extent_hooks, extent_hooks_t *);
2464 if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) {
2465 ret = EAGAIN;
2466 goto label_return;
2467 }
2468 READ(arena_ind, unsigned);
2469
2470 ret = 0;
2471 label_return:
2472 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2473 return ret;
2474 }
2475
2476 static int
arenas_lookup_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2477 arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2478 size_t *oldlenp, void *newp, size_t newlen) {
2479 int ret;
2480 unsigned arena_ind;
2481 void *ptr;
2482 extent_t *extent;
2483 arena_t *arena;
2484
2485 ptr = NULL;
2486 ret = EINVAL;
2487 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2488 WRITE(ptr, void *);
2489 extent = iealloc(tsd_tsdn(tsd), ptr);
2490 if (extent == NULL)
2491 goto label_return;
2492
2493 arena = extent_arena_get(extent);
2494 if (arena == NULL)
2495 goto label_return;
2496
2497 arena_ind = arena_ind_get(arena);
2498 READ(arena_ind, unsigned);
2499
2500 ret = 0;
2501 label_return:
2502 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2503 return ret;
2504 }
2505
2506 /******************************************************************************/
2507
2508 static int
prof_thread_active_init_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2509 prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2510 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2511 int ret;
2512 bool oldval;
2513
2514 if (!config_prof) {
2515 return ENOENT;
2516 }
2517
2518 if (newp != NULL) {
2519 if (newlen != sizeof(bool)) {
2520 ret = EINVAL;
2521 goto label_return;
2522 }
2523 oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
2524 *(bool *)newp);
2525 } else {
2526 oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
2527 }
2528 READ(oldval, bool);
2529
2530 ret = 0;
2531 label_return:
2532 return ret;
2533 }
2534
2535 static int
prof_active_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2536 prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2537 size_t *oldlenp, void *newp, size_t newlen) {
2538 int ret;
2539 bool oldval;
2540
2541 if (!config_prof) {
2542 return ENOENT;
2543 }
2544
2545 if (newp != NULL) {
2546 if (newlen != sizeof(bool)) {
2547 ret = EINVAL;
2548 goto label_return;
2549 }
2550 oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
2551 } else {
2552 oldval = prof_active_get(tsd_tsdn(tsd));
2553 }
2554 READ(oldval, bool);
2555
2556 ret = 0;
2557 label_return:
2558 return ret;
2559 }
2560
2561 static int
prof_dump_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2562 prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2563 size_t *oldlenp, void *newp, size_t newlen) {
2564 int ret;
2565 const char *filename = NULL;
2566
2567 if (!config_prof) {
2568 return ENOENT;
2569 }
2570
2571 WRITEONLY();
2572 WRITE(filename, const char *);
2573
2574 if (prof_mdump(tsd, filename)) {
2575 ret = EFAULT;
2576 goto label_return;
2577 }
2578
2579 ret = 0;
2580 label_return:
2581 return ret;
2582 }
2583
2584 static int
prof_gdump_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2585 prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2586 size_t *oldlenp, void *newp, size_t newlen) {
2587 int ret;
2588 bool oldval;
2589
2590 if (!config_prof) {
2591 return ENOENT;
2592 }
2593
2594 if (newp != NULL) {
2595 if (newlen != sizeof(bool)) {
2596 ret = EINVAL;
2597 goto label_return;
2598 }
2599 oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
2600 } else {
2601 oldval = prof_gdump_get(tsd_tsdn(tsd));
2602 }
2603 READ(oldval, bool);
2604
2605 ret = 0;
2606 label_return:
2607 return ret;
2608 }
2609
2610 static int
prof_reset_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2611 prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2612 size_t *oldlenp, void *newp, size_t newlen) {
2613 int ret;
2614 size_t lg_sample = lg_prof_sample;
2615
2616 if (!config_prof) {
2617 return ENOENT;
2618 }
2619
2620 WRITEONLY();
2621 WRITE(lg_sample, size_t);
2622 if (lg_sample >= (sizeof(uint64_t) << 3)) {
2623 lg_sample = (sizeof(uint64_t) << 3) - 1;
2624 }
2625
2626 prof_reset(tsd, lg_sample);
2627
2628 ret = 0;
2629 label_return:
2630 return ret;
2631 }
2632
CTL_RO_NL_CGEN(config_prof,prof_interval,prof_interval,uint64_t)2633 CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
2634 CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
2635
2636 /******************************************************************************/
2637
2638 CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
2639 CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
2640 CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
2641 CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t)
2642 CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
2643 CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
2644 CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
2645
2646 CTL_RO_CGEN(config_stats, stats_background_thread_num_threads,
2647 ctl_stats->background_thread.num_threads, size_t)
2648 CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
2649 ctl_stats->background_thread.num_runs, uint64_t)
2650 CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
2651 nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
2652
2653 CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
2654 CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
2655 ssize_t)
2656 CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms,
2657 ssize_t)
2658 CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned)
2659 CTL_RO_GEN(stats_arenas_i_uptime,
2660 nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t)
2661 CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
2662 CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
2663 CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
2664 CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
2665 atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED),
2666 size_t)
2667 CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
2668 atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
2669 size_t)
2670
2671 CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
2672 ctl_arena_stats_read_u64(
2673 &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t)
2674 CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
2675 ctl_arena_stats_read_u64(
2676 &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
2677 CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
2678 ctl_arena_stats_read_u64(
2679 &arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t)
2680
2681 CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
2682 ctl_arena_stats_read_u64(
2683 &arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t)
2684 CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
2685 ctl_arena_stats_read_u64(
2686 &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
2687 CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
2688 ctl_arena_stats_read_u64(
2689 &arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t)
2690
2691 CTL_RO_CGEN(config_stats, stats_arenas_i_base,
2692 atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
2693 size_t)
2694 CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
2695 atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
2696 size_t)
2697 CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
2698 atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp,
2699 ATOMIC_RELAXED), size_t)
2700 CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
2701 atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
2702 ATOMIC_RELAXED), size_t)
2703 CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
2704 atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
2705 size_t)
2706
2707 CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
2708 arenas_i(mib[2])->astats->allocated_small, size_t)
2709 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
2710 arenas_i(mib[2])->astats->nmalloc_small, uint64_t)
2711 CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
2712 arenas_i(mib[2])->astats->ndalloc_small, uint64_t)
2713 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
2714 arenas_i(mib[2])->astats->nrequests_small, uint64_t)
2715 CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
2716 atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
2717 ATOMIC_RELAXED), size_t)
2718 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
2719 ctl_arena_stats_read_u64(
2720 &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
2721 CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
2722 ctl_arena_stats_read_u64(
2723 &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t)
2724 /*
2725 * Note: "nmalloc" here instead of "nrequests" in the read. This is intentional.
2726 */
2727 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
2728 ctl_arena_stats_read_u64(
2729 &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) /* Intentional. */
2730
2731 /* Lock profiling related APIs below. */
2732 #define RO_MUTEX_CTL_GEN(n, l) \
2733 CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \
2734 l.n_lock_ops, uint64_t) \
2735 CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \
2736 l.n_wait_times, uint64_t) \
2737 CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \
2738 l.n_spin_acquired, uint64_t) \
2739 CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \
2740 l.n_owner_switches, uint64_t) \
2741 CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \
2742 nstime_ns(&l.tot_wait_time), uint64_t) \
2743 CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \
2744 nstime_ns(&l.max_wait_time), uint64_t) \
2745 CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \
2746 l.max_n_thds, uint32_t)
2747
2748 /* Global mutexes. */
2749 #define OP(mtx) \
2750 RO_MUTEX_CTL_GEN(mutexes_##mtx, \
2751 ctl_stats->mutex_prof_data[global_prof_mutex_##mtx])
2752 MUTEX_PROF_GLOBAL_MUTEXES
2753 #undef OP
2754
2755 /* Per arena mutexes */
2756 #define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \
2757 arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])
2758 MUTEX_PROF_ARENA_MUTEXES
2759 #undef OP
2760
2761 /* tcache bin mutex */
2762 RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex,
2763 arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data)
2764 #undef RO_MUTEX_CTL_GEN
2765
2766 /* Resets all mutex stats, including global, arena and bin mutexes. */
2767 static int
2768 stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2769 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2770 if (!config_stats) {
2771 return ENOENT;
2772 }
2773
2774 tsdn_t *tsdn = tsd_tsdn(tsd);
2775
2776 #define MUTEX_PROF_RESET(mtx) \
2777 malloc_mutex_lock(tsdn, &mtx); \
2778 malloc_mutex_prof_data_reset(tsdn, &mtx); \
2779 malloc_mutex_unlock(tsdn, &mtx);
2780
2781 /* Global mutexes: ctl and prof. */
2782 MUTEX_PROF_RESET(ctl_mtx);
2783 if (have_background_thread) {
2784 MUTEX_PROF_RESET(background_thread_lock);
2785 }
2786 if (config_prof && opt_prof) {
2787 MUTEX_PROF_RESET(bt2gctx_mtx);
2788 }
2789
2790
2791 /* Per arena mutexes. */
2792 unsigned n = narenas_total_get();
2793
2794 for (unsigned i = 0; i < n; i++) {
2795 arena_t *arena = arena_get(tsdn, i, false);
2796 if (!arena) {
2797 continue;
2798 }
2799 MUTEX_PROF_RESET(arena->large_mtx);
2800 MUTEX_PROF_RESET(arena->extent_avail_mtx);
2801 MUTEX_PROF_RESET(arena->extents_dirty.mtx);
2802 MUTEX_PROF_RESET(arena->extents_muzzy.mtx);
2803 MUTEX_PROF_RESET(arena->extents_retained.mtx);
2804 MUTEX_PROF_RESET(arena->decay_dirty.mtx);
2805 MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
2806 MUTEX_PROF_RESET(arena->tcache_ql_mtx);
2807 MUTEX_PROF_RESET(arena->base->mtx);
2808
2809 for (szind_t i = 0; i < NBINS; i++) {
2810 bin_t *bin = &arena->bins[i];
2811 MUTEX_PROF_RESET(bin->lock);
2812 }
2813 }
2814 #undef MUTEX_PROF_RESET
2815 return 0;
2816 }
2817
2818 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
2819 arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
2820 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
2821 arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t)
2822 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
2823 arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
2824 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
2825 arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
2826 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
2827 arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
2828 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
2829 arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
2830 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
2831 arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
2832 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
2833 arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
2834 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
2835 arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
2836
2837 static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t j)2838 stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
2839 size_t j) {
2840 if (j > NBINS) {
2841 return NULL;
2842 }
2843 return super_stats_arenas_i_bins_j_node;
2844 }
2845
2846 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
2847 ctl_arena_stats_read_u64(
2848 &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
2849 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
2850 ctl_arena_stats_read_u64(
2851 &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
2852 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
2853 ctl_arena_stats_read_u64(
2854 &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
2855 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
2856 arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
2857
2858 static const ctl_named_node_t *
stats_arenas_i_lextents_j_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t j)2859 stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
2860 size_t j) {
2861 if (j > NSIZES - NBINS) {
2862 return NULL;
2863 }
2864 return super_stats_arenas_i_lextents_j_node;
2865 }
2866
2867 static const ctl_named_node_t *
stats_arenas_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)2868 stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
2869 const ctl_named_node_t *ret;
2870 size_t a;
2871
2872 malloc_mutex_lock(tsdn, &ctl_mtx);
2873 a = arenas_i2a_impl(i, true, true);
2874 if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
2875 ret = NULL;
2876 goto label_return;
2877 }
2878
2879 ret = super_stats_arenas_i_node;
2880 label_return:
2881 malloc_mutex_unlock(tsdn, &ctl_mtx);
2882 return ret;
2883 }
2884