• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define	JEMALLOC_CTL_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 /*
8  * ctl_mtx protects the following:
9  * - ctl_stats.*
10  * - opt_prof_active
11  */
12 static malloc_mutex_t	ctl_mtx;
13 static bool		ctl_initialized;
14 static uint64_t		ctl_epoch;
15 static ctl_stats_t	ctl_stats;
16 
17 /******************************************************************************/
18 /* Helpers for named and indexed nodes. */
19 
20 static inline const ctl_named_node_t *
ctl_named_node(const ctl_node_t * node)21 ctl_named_node(const ctl_node_t *node)
22 {
23 
24 	return ((node->named) ? (const ctl_named_node_t *)node : NULL);
25 }
26 
27 static inline const ctl_named_node_t *
ctl_named_children(const ctl_named_node_t * node,int index)28 ctl_named_children(const ctl_named_node_t *node, int index)
29 {
30 	const ctl_named_node_t *children = ctl_named_node(node->children);
31 
32 	return (children ? &children[index] : NULL);
33 }
34 
35 static inline const ctl_indexed_node_t *
ctl_indexed_node(const ctl_node_t * node)36 ctl_indexed_node(const ctl_node_t *node)
37 {
38 
39 	return ((node->named == false) ? (const ctl_indexed_node_t *)node :
40 	    NULL);
41 }
42 
43 /******************************************************************************/
44 /* Function prototypes for non-inline static functions. */
45 
46 #define	CTL_PROTO(n)							\
47 static int	n##_ctl(const size_t *mib, size_t miblen, void *oldp,	\
48     size_t *oldlenp, void *newp, size_t newlen);
49 
50 #define	INDEX_PROTO(n)							\
51 static const ctl_named_node_t	*n##_index(const size_t *mib,		\
52     size_t miblen, size_t i);
53 
54 static bool	ctl_arena_init(ctl_arena_stats_t *astats);
55 static void	ctl_arena_clear(ctl_arena_stats_t *astats);
56 static void	ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
57     arena_t *arena);
58 static void	ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
59     ctl_arena_stats_t *astats);
60 static void	ctl_arena_refresh(arena_t *arena, unsigned i);
61 static bool	ctl_grow(void);
62 static void	ctl_refresh(void);
63 static bool	ctl_init(void);
64 static int	ctl_lookup(const char *name, ctl_node_t const **nodesp,
65     size_t *mibp, size_t *depthp);
66 
67 CTL_PROTO(version)
68 CTL_PROTO(epoch)
69 CTL_PROTO(thread_tcache_enabled)
70 CTL_PROTO(thread_tcache_flush)
71 CTL_PROTO(thread_arena)
72 CTL_PROTO(thread_allocated)
73 CTL_PROTO(thread_allocatedp)
74 CTL_PROTO(thread_deallocated)
75 CTL_PROTO(thread_deallocatedp)
76 CTL_PROTO(config_debug)
77 CTL_PROTO(config_fill)
78 CTL_PROTO(config_lazy_lock)
79 CTL_PROTO(config_munmap)
80 CTL_PROTO(config_prof)
81 CTL_PROTO(config_prof_libgcc)
82 CTL_PROTO(config_prof_libunwind)
83 CTL_PROTO(config_stats)
84 CTL_PROTO(config_tcache)
85 CTL_PROTO(config_tls)
86 CTL_PROTO(config_utrace)
87 CTL_PROTO(config_valgrind)
88 CTL_PROTO(config_xmalloc)
89 CTL_PROTO(opt_abort)
90 CTL_PROTO(opt_dss)
91 CTL_PROTO(opt_lg_chunk)
92 CTL_PROTO(opt_narenas)
93 CTL_PROTO(opt_lg_dirty_mult)
94 CTL_PROTO(opt_stats_print)
95 CTL_PROTO(opt_junk)
96 CTL_PROTO(opt_zero)
97 CTL_PROTO(opt_quarantine)
98 CTL_PROTO(opt_redzone)
99 CTL_PROTO(opt_utrace)
100 CTL_PROTO(opt_xmalloc)
101 CTL_PROTO(opt_tcache)
102 CTL_PROTO(opt_lg_tcache_max)
103 CTL_PROTO(opt_prof)
104 CTL_PROTO(opt_prof_prefix)
105 CTL_PROTO(opt_prof_active)
106 CTL_PROTO(opt_lg_prof_sample)
107 CTL_PROTO(opt_lg_prof_interval)
108 CTL_PROTO(opt_prof_gdump)
109 CTL_PROTO(opt_prof_final)
110 CTL_PROTO(opt_prof_leak)
111 CTL_PROTO(opt_prof_accum)
112 CTL_PROTO(arena_i_purge)
113 static void	arena_purge(unsigned arena_ind);
114 CTL_PROTO(arena_i_dss)
115 CTL_PROTO(arena_i_chunk_alloc)
116 CTL_PROTO(arena_i_chunk_dalloc)
117 INDEX_PROTO(arena_i)
118 CTL_PROTO(arenas_bin_i_size)
119 CTL_PROTO(arenas_bin_i_nregs)
120 CTL_PROTO(arenas_bin_i_run_size)
121 INDEX_PROTO(arenas_bin_i)
122 CTL_PROTO(arenas_lrun_i_size)
123 INDEX_PROTO(arenas_lrun_i)
124 CTL_PROTO(arenas_narenas)
125 CTL_PROTO(arenas_initialized)
126 CTL_PROTO(arenas_quantum)
127 CTL_PROTO(arenas_page)
128 CTL_PROTO(arenas_tcache_max)
129 CTL_PROTO(arenas_nbins)
130 CTL_PROTO(arenas_nhbins)
131 CTL_PROTO(arenas_nlruns)
132 CTL_PROTO(arenas_extend)
133 CTL_PROTO(prof_active)
134 CTL_PROTO(prof_dump)
135 CTL_PROTO(prof_interval)
136 CTL_PROTO(stats_chunks_current)
137 CTL_PROTO(stats_chunks_total)
138 CTL_PROTO(stats_chunks_high)
139 CTL_PROTO(stats_arenas_i_small_allocated)
140 CTL_PROTO(stats_arenas_i_small_nmalloc)
141 CTL_PROTO(stats_arenas_i_small_ndalloc)
142 CTL_PROTO(stats_arenas_i_small_nrequests)
143 CTL_PROTO(stats_arenas_i_large_allocated)
144 CTL_PROTO(stats_arenas_i_large_nmalloc)
145 CTL_PROTO(stats_arenas_i_large_ndalloc)
146 CTL_PROTO(stats_arenas_i_large_nrequests)
147 CTL_PROTO(stats_arenas_i_huge_allocated)
148 CTL_PROTO(stats_arenas_i_huge_nmalloc)
149 CTL_PROTO(stats_arenas_i_huge_ndalloc)
150 CTL_PROTO(stats_arenas_i_huge_nrequests)
151 CTL_PROTO(stats_arenas_i_bins_j_allocated)
152 CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
153 CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
154 CTL_PROTO(stats_arenas_i_bins_j_nrequests)
155 CTL_PROTO(stats_arenas_i_bins_j_nfills)
156 CTL_PROTO(stats_arenas_i_bins_j_nflushes)
157 CTL_PROTO(stats_arenas_i_bins_j_nruns)
158 CTL_PROTO(stats_arenas_i_bins_j_nreruns)
159 CTL_PROTO(stats_arenas_i_bins_j_curruns)
160 INDEX_PROTO(stats_arenas_i_bins_j)
161 CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
162 CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
163 CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
164 CTL_PROTO(stats_arenas_i_lruns_j_curruns)
165 INDEX_PROTO(stats_arenas_i_lruns_j)
166 CTL_PROTO(stats_arenas_i_nthreads)
167 CTL_PROTO(stats_arenas_i_dss)
168 CTL_PROTO(stats_arenas_i_pactive)
169 CTL_PROTO(stats_arenas_i_pdirty)
170 CTL_PROTO(stats_arenas_i_mapped)
171 CTL_PROTO(stats_arenas_i_npurge)
172 CTL_PROTO(stats_arenas_i_nmadvise)
173 CTL_PROTO(stats_arenas_i_purged)
174 INDEX_PROTO(stats_arenas_i)
175 CTL_PROTO(stats_cactive)
176 CTL_PROTO(stats_allocated)
177 CTL_PROTO(stats_active)
178 CTL_PROTO(stats_mapped)
179 
180 /******************************************************************************/
181 /* mallctl tree. */
182 
183 /* Maximum tree depth. */
184 #define	CTL_MAX_DEPTH	6
185 
186 #define	NAME(n)	{true},	n
187 #define	CHILD(t, c)							\
188 	sizeof(c##_node) / sizeof(ctl_##t##_node_t),			\
189 	(ctl_node_t *)c##_node,						\
190 	NULL
191 #define	CTL(c)	0, NULL, c##_ctl
192 
193 /*
194  * Only handles internal indexed nodes, since there are currently no external
195  * ones.
196  */
197 #define	INDEX(i)	{false},	i##_index
198 
199 static const ctl_named_node_t	tcache_node[] = {
200 	{NAME("enabled"),	CTL(thread_tcache_enabled)},
201 	{NAME("flush"),		CTL(thread_tcache_flush)}
202 };
203 
204 static const ctl_named_node_t	thread_node[] = {
205 	{NAME("arena"),		CTL(thread_arena)},
206 	{NAME("allocated"),	CTL(thread_allocated)},
207 	{NAME("allocatedp"),	CTL(thread_allocatedp)},
208 	{NAME("deallocated"),	CTL(thread_deallocated)},
209 	{NAME("deallocatedp"),	CTL(thread_deallocatedp)},
210 	{NAME("tcache"),	CHILD(named, tcache)}
211 };
212 
213 static const ctl_named_node_t	config_node[] = {
214 	{NAME("debug"),			CTL(config_debug)},
215 	{NAME("fill"),			CTL(config_fill)},
216 	{NAME("lazy_lock"),		CTL(config_lazy_lock)},
217 	{NAME("munmap"),		CTL(config_munmap)},
218 	{NAME("prof"),			CTL(config_prof)},
219 	{NAME("prof_libgcc"),		CTL(config_prof_libgcc)},
220 	{NAME("prof_libunwind"),	CTL(config_prof_libunwind)},
221 	{NAME("stats"),			CTL(config_stats)},
222 	{NAME("tcache"),		CTL(config_tcache)},
223 	{NAME("tls"),			CTL(config_tls)},
224 	{NAME("utrace"),		CTL(config_utrace)},
225 	{NAME("valgrind"),		CTL(config_valgrind)},
226 	{NAME("xmalloc"),		CTL(config_xmalloc)}
227 };
228 
229 static const ctl_named_node_t opt_node[] = {
230 	{NAME("abort"),			CTL(opt_abort)},
231 	{NAME("dss"),			CTL(opt_dss)},
232 	{NAME("lg_chunk"),		CTL(opt_lg_chunk)},
233 	{NAME("narenas"),		CTL(opt_narenas)},
234 	{NAME("lg_dirty_mult"),		CTL(opt_lg_dirty_mult)},
235 	{NAME("stats_print"),		CTL(opt_stats_print)},
236 	{NAME("junk"),			CTL(opt_junk)},
237 	{NAME("zero"),			CTL(opt_zero)},
238 	{NAME("quarantine"),		CTL(opt_quarantine)},
239 	{NAME("redzone"),		CTL(opt_redzone)},
240 	{NAME("utrace"),		CTL(opt_utrace)},
241 	{NAME("xmalloc"),		CTL(opt_xmalloc)},
242 	{NAME("tcache"),		CTL(opt_tcache)},
243 	{NAME("lg_tcache_max"),		CTL(opt_lg_tcache_max)},
244 	{NAME("prof"),			CTL(opt_prof)},
245 	{NAME("prof_prefix"),		CTL(opt_prof_prefix)},
246 	{NAME("prof_active"),		CTL(opt_prof_active)},
247 	{NAME("lg_prof_sample"),	CTL(opt_lg_prof_sample)},
248 	{NAME("lg_prof_interval"),	CTL(opt_lg_prof_interval)},
249 	{NAME("prof_gdump"),		CTL(opt_prof_gdump)},
250 	{NAME("prof_final"),		CTL(opt_prof_final)},
251 	{NAME("prof_leak"),		CTL(opt_prof_leak)},
252 	{NAME("prof_accum"),		CTL(opt_prof_accum)}
253 };
254 
255 static const ctl_named_node_t chunk_node[] = {
256 	{NAME("alloc"),			CTL(arena_i_chunk_alloc)},
257 	{NAME("dalloc"),		CTL(arena_i_chunk_dalloc)}
258 };
259 
260 static const ctl_named_node_t arena_i_node[] = {
261 	{NAME("purge"),			CTL(arena_i_purge)},
262 	{NAME("dss"),			CTL(arena_i_dss)},
263 	{NAME("chunk"),			CHILD(named, chunk)},
264 };
265 static const ctl_named_node_t super_arena_i_node[] = {
266 	{NAME(""),			CHILD(named, arena_i)}
267 };
268 
269 static const ctl_indexed_node_t arena_node[] = {
270 	{INDEX(arena_i)}
271 };
272 
273 static const ctl_named_node_t arenas_bin_i_node[] = {
274 	{NAME("size"),			CTL(arenas_bin_i_size)},
275 	{NAME("nregs"),			CTL(arenas_bin_i_nregs)},
276 	{NAME("run_size"),		CTL(arenas_bin_i_run_size)}
277 };
278 static const ctl_named_node_t super_arenas_bin_i_node[] = {
279 	{NAME(""),			CHILD(named, arenas_bin_i)}
280 };
281 
282 static const ctl_indexed_node_t arenas_bin_node[] = {
283 	{INDEX(arenas_bin_i)}
284 };
285 
286 static const ctl_named_node_t arenas_lrun_i_node[] = {
287 	{NAME("size"),			CTL(arenas_lrun_i_size)}
288 };
289 static const ctl_named_node_t super_arenas_lrun_i_node[] = {
290 	{NAME(""),			CHILD(named, arenas_lrun_i)}
291 };
292 
293 static const ctl_indexed_node_t arenas_lrun_node[] = {
294 	{INDEX(arenas_lrun_i)}
295 };
296 
297 static const ctl_named_node_t arenas_node[] = {
298 	{NAME("narenas"),		CTL(arenas_narenas)},
299 	{NAME("initialized"),		CTL(arenas_initialized)},
300 	{NAME("quantum"),		CTL(arenas_quantum)},
301 	{NAME("page"),			CTL(arenas_page)},
302 	{NAME("tcache_max"),		CTL(arenas_tcache_max)},
303 	{NAME("nbins"),			CTL(arenas_nbins)},
304 	{NAME("nhbins"),		CTL(arenas_nhbins)},
305 	{NAME("bin"),			CHILD(indexed, arenas_bin)},
306 	{NAME("nlruns"),		CTL(arenas_nlruns)},
307 	{NAME("lrun"),			CHILD(indexed, arenas_lrun)},
308 	{NAME("extend"),		CTL(arenas_extend)}
309 };
310 
311 static const ctl_named_node_t	prof_node[] = {
312 	{NAME("active"),	CTL(prof_active)},
313 	{NAME("dump"),		CTL(prof_dump)},
314 	{NAME("interval"),	CTL(prof_interval)}
315 };
316 
317 static const ctl_named_node_t stats_chunks_node[] = {
318 	{NAME("current"),		CTL(stats_chunks_current)},
319 	{NAME("total"),			CTL(stats_chunks_total)},
320 	{NAME("high"),			CTL(stats_chunks_high)}
321 };
322 
323 static const ctl_named_node_t stats_arenas_i_small_node[] = {
324 	{NAME("allocated"),		CTL(stats_arenas_i_small_allocated)},
325 	{NAME("nmalloc"),		CTL(stats_arenas_i_small_nmalloc)},
326 	{NAME("ndalloc"),		CTL(stats_arenas_i_small_ndalloc)},
327 	{NAME("nrequests"),		CTL(stats_arenas_i_small_nrequests)}
328 };
329 
330 static const ctl_named_node_t stats_arenas_i_large_node[] = {
331 	{NAME("allocated"),		CTL(stats_arenas_i_large_allocated)},
332 	{NAME("nmalloc"),		CTL(stats_arenas_i_large_nmalloc)},
333 	{NAME("ndalloc"),		CTL(stats_arenas_i_large_ndalloc)},
334 	{NAME("nrequests"),		CTL(stats_arenas_i_large_nrequests)}
335 };
336 
337 static const ctl_named_node_t stats_arenas_i_huge_node[] = {
338 	{NAME("allocated"),		CTL(stats_arenas_i_huge_allocated)},
339 	{NAME("nmalloc"),		CTL(stats_arenas_i_huge_nmalloc)},
340 	{NAME("ndalloc"),		CTL(stats_arenas_i_huge_ndalloc)},
341 	{NAME("nrequests"),		CTL(stats_arenas_i_huge_nrequests)},
342 };
343 
344 static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
345 	{NAME("allocated"),		CTL(stats_arenas_i_bins_j_allocated)},
346 	{NAME("nmalloc"),		CTL(stats_arenas_i_bins_j_nmalloc)},
347 	{NAME("ndalloc"),		CTL(stats_arenas_i_bins_j_ndalloc)},
348 	{NAME("nrequests"),		CTL(stats_arenas_i_bins_j_nrequests)},
349 	{NAME("nfills"),		CTL(stats_arenas_i_bins_j_nfills)},
350 	{NAME("nflushes"),		CTL(stats_arenas_i_bins_j_nflushes)},
351 	{NAME("nruns"),			CTL(stats_arenas_i_bins_j_nruns)},
352 	{NAME("nreruns"),		CTL(stats_arenas_i_bins_j_nreruns)},
353 	{NAME("curruns"),		CTL(stats_arenas_i_bins_j_curruns)}
354 };
355 static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
356 	{NAME(""),			CHILD(named, stats_arenas_i_bins_j)}
357 };
358 
359 static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
360 	{INDEX(stats_arenas_i_bins_j)}
361 };
362 
363 static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
364 	{NAME("nmalloc"),		CTL(stats_arenas_i_lruns_j_nmalloc)},
365 	{NAME("ndalloc"),		CTL(stats_arenas_i_lruns_j_ndalloc)},
366 	{NAME("nrequests"),		CTL(stats_arenas_i_lruns_j_nrequests)},
367 	{NAME("curruns"),		CTL(stats_arenas_i_lruns_j_curruns)}
368 };
369 static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
370 	{NAME(""),			CHILD(named, stats_arenas_i_lruns_j)}
371 };
372 
373 static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
374 	{INDEX(stats_arenas_i_lruns_j)}
375 };
376 
377 static const ctl_named_node_t stats_arenas_i_node[] = {
378 	{NAME("nthreads"),		CTL(stats_arenas_i_nthreads)},
379 	{NAME("dss"),			CTL(stats_arenas_i_dss)},
380 	{NAME("pactive"),		CTL(stats_arenas_i_pactive)},
381 	{NAME("pdirty"),		CTL(stats_arenas_i_pdirty)},
382 	{NAME("mapped"),		CTL(stats_arenas_i_mapped)},
383 	{NAME("npurge"),		CTL(stats_arenas_i_npurge)},
384 	{NAME("nmadvise"),		CTL(stats_arenas_i_nmadvise)},
385 	{NAME("purged"),		CTL(stats_arenas_i_purged)},
386 	{NAME("small"),			CHILD(named, stats_arenas_i_small)},
387 	{NAME("large"),			CHILD(named, stats_arenas_i_large)},
388 	{NAME("huge"),			CHILD(named, stats_arenas_i_huge)},
389 	{NAME("bins"),			CHILD(indexed, stats_arenas_i_bins)},
390 	{NAME("lruns"),			CHILD(indexed, stats_arenas_i_lruns)}
391 };
392 static const ctl_named_node_t super_stats_arenas_i_node[] = {
393 	{NAME(""),			CHILD(named, stats_arenas_i)}
394 };
395 
396 static const ctl_indexed_node_t stats_arenas_node[] = {
397 	{INDEX(stats_arenas_i)}
398 };
399 
400 static const ctl_named_node_t stats_node[] = {
401 	{NAME("cactive"),		CTL(stats_cactive)},
402 	{NAME("allocated"),		CTL(stats_allocated)},
403 	{NAME("active"),		CTL(stats_active)},
404 	{NAME("mapped"),		CTL(stats_mapped)},
405 	{NAME("chunks"),		CHILD(named, stats_chunks)},
406 	{NAME("arenas"),		CHILD(indexed, stats_arenas)}
407 };
408 
409 static const ctl_named_node_t	root_node[] = {
410 	{NAME("version"),	CTL(version)},
411 	{NAME("epoch"),		CTL(epoch)},
412 	{NAME("thread"),	CHILD(named, thread)},
413 	{NAME("config"),	CHILD(named, config)},
414 	{NAME("opt"),		CHILD(named, opt)},
415 	{NAME("arena"),		CHILD(indexed, arena)},
416 	{NAME("arenas"),	CHILD(named, arenas)},
417 	{NAME("prof"),		CHILD(named, prof)},
418 	{NAME("stats"),		CHILD(named, stats)}
419 };
420 static const ctl_named_node_t super_root_node[] = {
421 	{NAME(""),		CHILD(named, root)}
422 };
423 
424 #undef NAME
425 #undef CHILD
426 #undef CTL
427 #undef INDEX
428 
429 /******************************************************************************/
430 
431 static bool
ctl_arena_init(ctl_arena_stats_t * astats)432 ctl_arena_init(ctl_arena_stats_t *astats)
433 {
434 
435 	if (astats->lstats == NULL) {
436 		astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
437 		    sizeof(malloc_large_stats_t));
438 		if (astats->lstats == NULL)
439 			return (true);
440 	}
441 
442 	return (false);
443 }
444 
445 static void
ctl_arena_clear(ctl_arena_stats_t * astats)446 ctl_arena_clear(ctl_arena_stats_t *astats)
447 {
448 
449 	astats->dss = dss_prec_names[dss_prec_limit];
450 	astats->pactive = 0;
451 	astats->pdirty = 0;
452 	if (config_stats) {
453 		memset(&astats->astats, 0, sizeof(arena_stats_t));
454 		astats->allocated_small = 0;
455 		astats->nmalloc_small = 0;
456 		astats->ndalloc_small = 0;
457 		astats->nrequests_small = 0;
458 		memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
459 		memset(astats->lstats, 0, nlclasses *
460 		    sizeof(malloc_large_stats_t));
461 	}
462 }
463 
464 static void
ctl_arena_stats_amerge(ctl_arena_stats_t * cstats,arena_t * arena)465 ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
466 {
467 	unsigned i;
468 
469 	arena_stats_merge(arena, &cstats->dss, &cstats->pactive,
470 	    &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats);
471 
472 	for (i = 0; i < NBINS; i++) {
473 		cstats->allocated_small += cstats->bstats[i].allocated;
474 		cstats->nmalloc_small += cstats->bstats[i].nmalloc;
475 		cstats->ndalloc_small += cstats->bstats[i].ndalloc;
476 		cstats->nrequests_small += cstats->bstats[i].nrequests;
477 	}
478 }
479 
480 static void
ctl_arena_stats_smerge(ctl_arena_stats_t * sstats,ctl_arena_stats_t * astats)481 ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
482 {
483 	unsigned i;
484 
485 	sstats->pactive += astats->pactive;
486 	sstats->pdirty += astats->pdirty;
487 
488 	sstats->astats.mapped += astats->astats.mapped;
489 	sstats->astats.npurge += astats->astats.npurge;
490 	sstats->astats.nmadvise += astats->astats.nmadvise;
491 	sstats->astats.purged += astats->astats.purged;
492 
493 	sstats->allocated_small += astats->allocated_small;
494 	sstats->nmalloc_small += astats->nmalloc_small;
495 	sstats->ndalloc_small += astats->ndalloc_small;
496 	sstats->nrequests_small += astats->nrequests_small;
497 
498 	sstats->astats.allocated_large += astats->astats.allocated_large;
499 	sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
500 	sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
501 	sstats->astats.nrequests_large += astats->astats.nrequests_large;
502 
503 	sstats->astats.allocated_huge += astats->astats.allocated_huge;
504 	sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
505 	sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
506 	sstats->astats.nrequests_huge += astats->astats.nrequests_huge;
507 
508 	for (i = 0; i < nlclasses; i++) {
509 		sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
510 		sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
511 		sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
512 		sstats->lstats[i].curruns += astats->lstats[i].curruns;
513 	}
514 
515 	for (i = 0; i < NBINS; i++) {
516 		sstats->bstats[i].allocated += astats->bstats[i].allocated;
517 		sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
518 		sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
519 		sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
520 		if (config_tcache) {
521 			sstats->bstats[i].nfills += astats->bstats[i].nfills;
522 			sstats->bstats[i].nflushes +=
523 			    astats->bstats[i].nflushes;
524 		}
525 		sstats->bstats[i].nruns += astats->bstats[i].nruns;
526 		sstats->bstats[i].reruns += astats->bstats[i].reruns;
527 		sstats->bstats[i].curruns += astats->bstats[i].curruns;
528 	}
529 }
530 
531 static void
ctl_arena_refresh(arena_t * arena,unsigned i)532 ctl_arena_refresh(arena_t *arena, unsigned i)
533 {
534 	ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
535 	ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
536 
537 	ctl_arena_clear(astats);
538 
539 	sstats->nthreads += astats->nthreads;
540 	if (config_stats) {
541 		ctl_arena_stats_amerge(astats, arena);
542 		/* Merge into sum stats as well. */
543 		ctl_arena_stats_smerge(sstats, astats);
544 	} else {
545 		astats->pactive += arena->nactive;
546 		astats->pdirty += arena->ndirty;
547 		/* Merge into sum stats as well. */
548 		sstats->pactive += arena->nactive;
549 		sstats->pdirty += arena->ndirty;
550 	}
551 }
552 
553 static bool
ctl_grow(void)554 ctl_grow(void)
555 {
556 	ctl_arena_stats_t *astats;
557 	arena_t **tarenas;
558 
559 	/* Allocate extended arena stats and arenas arrays. */
560 	astats = (ctl_arena_stats_t *)imalloc((ctl_stats.narenas + 2) *
561 	    sizeof(ctl_arena_stats_t));
562 	if (astats == NULL)
563 		return (true);
564 	tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
565 	    sizeof(arena_t *));
566 	if (tarenas == NULL) {
567 		idalloc(astats);
568 		return (true);
569 	}
570 
571 	/* Initialize the new astats element. */
572 	memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
573 	    sizeof(ctl_arena_stats_t));
574 	memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
575 	if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
576 		idalloc(tarenas);
577 		idalloc(astats);
578 		return (true);
579 	}
580 	/* Swap merged stats to their new location. */
581 	{
582 		ctl_arena_stats_t tstats;
583 		memcpy(&tstats, &astats[ctl_stats.narenas],
584 		    sizeof(ctl_arena_stats_t));
585 		memcpy(&astats[ctl_stats.narenas],
586 		    &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t));
587 		memcpy(&astats[ctl_stats.narenas + 1], &tstats,
588 		    sizeof(ctl_arena_stats_t));
589 	}
590 	/* Initialize the new arenas element. */
591 	tarenas[ctl_stats.narenas] = NULL;
592 	{
593 		arena_t **arenas_old = arenas;
594 		/*
595 		 * Swap extended arenas array into place.  Although ctl_mtx
596 		 * protects this function from other threads extending the
597 		 * array, it does not protect from other threads mutating it
598 		 * (i.e. initializing arenas and setting array elements to
599 		 * point to them).  Therefore, array copying must happen under
600 		 * the protection of arenas_lock.
601 		 */
602 		malloc_mutex_lock(&arenas_lock);
603 		arenas = tarenas;
604 		memcpy(arenas, arenas_old, ctl_stats.narenas *
605 		    sizeof(arena_t *));
606 		narenas_total++;
607 		arenas_extend(narenas_total - 1);
608 		malloc_mutex_unlock(&arenas_lock);
609 		/*
610 		 * Deallocate arenas_old only if it came from imalloc() (not
611 		 * base_alloc()).
612 		 */
613 		if (ctl_stats.narenas != narenas_auto)
614 			idalloc(arenas_old);
615 	}
616 	ctl_stats.arenas = astats;
617 	ctl_stats.narenas++;
618 
619 	return (false);
620 }
621 
622 static void
ctl_refresh(void)623 ctl_refresh(void)
624 {
625 	unsigned i;
626 	VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
627 
628 	if (config_stats) {
629 		malloc_mutex_lock(&chunks_mtx);
630 		ctl_stats.chunks.current = stats_chunks.curchunks;
631 		ctl_stats.chunks.total = stats_chunks.nchunks;
632 		ctl_stats.chunks.high = stats_chunks.highchunks;
633 		malloc_mutex_unlock(&chunks_mtx);
634 	}
635 
636 	/*
637 	 * Clear sum stats, since they will be merged into by
638 	 * ctl_arena_refresh().
639 	 */
640 	ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
641 	ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
642 
643 	malloc_mutex_lock(&arenas_lock);
644 	memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
645 	for (i = 0; i < ctl_stats.narenas; i++) {
646 		if (arenas[i] != NULL)
647 			ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
648 		else
649 			ctl_stats.arenas[i].nthreads = 0;
650 	}
651 	malloc_mutex_unlock(&arenas_lock);
652 	for (i = 0; i < ctl_stats.narenas; i++) {
653 		bool initialized = (tarenas[i] != NULL);
654 
655 		ctl_stats.arenas[i].initialized = initialized;
656 		if (initialized)
657 			ctl_arena_refresh(tarenas[i], i);
658 	}
659 
660 	if (config_stats) {
661 		ctl_stats.allocated =
662 		    ctl_stats.arenas[ctl_stats.narenas].allocated_small
663 		    + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
664 		    + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
665 		ctl_stats.active =
666 		    (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
667 		ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
668 	}
669 
670 	ctl_epoch++;
671 }
672 
673 static bool
ctl_init(void)674 ctl_init(void)
675 {
676 	bool ret;
677 
678 	malloc_mutex_lock(&ctl_mtx);
679 	if (ctl_initialized == false) {
680 		/*
681 		 * Allocate space for one extra arena stats element, which
682 		 * contains summed stats across all arenas.
683 		 */
684 		assert(narenas_auto == narenas_total_get());
685 		ctl_stats.narenas = narenas_auto;
686 		ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
687 		    (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
688 		if (ctl_stats.arenas == NULL) {
689 			ret = true;
690 			goto label_return;
691 		}
692 		memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
693 		    sizeof(ctl_arena_stats_t));
694 
695 		/*
696 		 * Initialize all stats structures, regardless of whether they
697 		 * ever get used.  Lazy initialization would allow errors to
698 		 * cause inconsistent state to be viewable by the application.
699 		 */
700 		if (config_stats) {
701 			unsigned i;
702 			for (i = 0; i <= ctl_stats.narenas; i++) {
703 				if (ctl_arena_init(&ctl_stats.arenas[i])) {
704 					ret = true;
705 					goto label_return;
706 				}
707 			}
708 		}
709 		ctl_stats.arenas[ctl_stats.narenas].initialized = true;
710 
711 		ctl_epoch = 0;
712 		ctl_refresh();
713 		ctl_initialized = true;
714 	}
715 
716 	ret = false;
717 label_return:
718 	malloc_mutex_unlock(&ctl_mtx);
719 	return (ret);
720 }
721 
722 static int
ctl_lookup(const char * name,ctl_node_t const ** nodesp,size_t * mibp,size_t * depthp)723 ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
724     size_t *depthp)
725 {
726 	int ret;
727 	const char *elm, *tdot, *dot;
728 	size_t elen, i, j;
729 	const ctl_named_node_t *node;
730 
731 	elm = name;
732 	/* Equivalent to strchrnul(). */
733 	dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
734 	elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
735 	if (elen == 0) {
736 		ret = ENOENT;
737 		goto label_return;
738 	}
739 	node = super_root_node;
740 	for (i = 0; i < *depthp; i++) {
741 		assert(node);
742 		assert(node->nchildren > 0);
743 		if (ctl_named_node(node->children) != NULL) {
744 			const ctl_named_node_t *pnode = node;
745 
746 			/* Children are named. */
747 			for (j = 0; j < node->nchildren; j++) {
748 				const ctl_named_node_t *child =
749 				    ctl_named_children(node, j);
750 				if (strlen(child->name) == elen &&
751 				    strncmp(elm, child->name, elen) == 0) {
752 					node = child;
753 					if (nodesp != NULL)
754 						nodesp[i] =
755 						    (const ctl_node_t *)node;
756 					mibp[i] = j;
757 					break;
758 				}
759 			}
760 			if (node == pnode) {
761 				ret = ENOENT;
762 				goto label_return;
763 			}
764 		} else {
765 			uintmax_t index;
766 			const ctl_indexed_node_t *inode;
767 
768 			/* Children are indexed. */
769 			index = malloc_strtoumax(elm, NULL, 10);
770 			if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
771 				ret = ENOENT;
772 				goto label_return;
773 			}
774 
775 			inode = ctl_indexed_node(node->children);
776 			node = inode->index(mibp, *depthp, (size_t)index);
777 			if (node == NULL) {
778 				ret = ENOENT;
779 				goto label_return;
780 			}
781 
782 			if (nodesp != NULL)
783 				nodesp[i] = (const ctl_node_t *)node;
784 			mibp[i] = (size_t)index;
785 		}
786 
787 		if (node->ctl != NULL) {
788 			/* Terminal node. */
789 			if (*dot != '\0') {
790 				/*
791 				 * The name contains more elements than are
792 				 * in this path through the tree.
793 				 */
794 				ret = ENOENT;
795 				goto label_return;
796 			}
797 			/* Complete lookup successful. */
798 			*depthp = i + 1;
799 			break;
800 		}
801 
802 		/* Update elm. */
803 		if (*dot == '\0') {
804 			/* No more elements. */
805 			ret = ENOENT;
806 			goto label_return;
807 		}
808 		elm = &dot[1];
809 		dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
810 		    strchr(elm, '\0');
811 		elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
812 	}
813 
814 	ret = 0;
815 label_return:
816 	return (ret);
817 }
818 
819 int
ctl_byname(const char * name,void * oldp,size_t * oldlenp,void * newp,size_t newlen)820 ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
821     size_t newlen)
822 {
823 	int ret;
824 	size_t depth;
825 	ctl_node_t const *nodes[CTL_MAX_DEPTH];
826 	size_t mib[CTL_MAX_DEPTH];
827 	const ctl_named_node_t *node;
828 
829 	if (ctl_initialized == false && ctl_init()) {
830 		ret = EAGAIN;
831 		goto label_return;
832 	}
833 
834 	depth = CTL_MAX_DEPTH;
835 	ret = ctl_lookup(name, nodes, mib, &depth);
836 	if (ret != 0)
837 		goto label_return;
838 
839 	node = ctl_named_node(nodes[depth-1]);
840 	if (node != NULL && node->ctl)
841 		ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen);
842 	else {
843 		/* The name refers to a partial path through the ctl tree. */
844 		ret = ENOENT;
845 	}
846 
847 label_return:
848 	return(ret);
849 }
850 
851 int
ctl_nametomib(const char * name,size_t * mibp,size_t * miblenp)852 ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
853 {
854 	int ret;
855 
856 	if (ctl_initialized == false && ctl_init()) {
857 		ret = EAGAIN;
858 		goto label_return;
859 	}
860 
861 	ret = ctl_lookup(name, NULL, mibp, miblenp);
862 label_return:
863 	return(ret);
864 }
865 
866 int
ctl_bymib(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)867 ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
868     void *newp, size_t newlen)
869 {
870 	int ret;
871 	const ctl_named_node_t *node;
872 	size_t i;
873 
874 	if (ctl_initialized == false && ctl_init()) {
875 		ret = EAGAIN;
876 		goto label_return;
877 	}
878 
879 	/* Iterate down the tree. */
880 	node = super_root_node;
881 	for (i = 0; i < miblen; i++) {
882 		assert(node);
883 		assert(node->nchildren > 0);
884 		if (ctl_named_node(node->children) != NULL) {
885 			/* Children are named. */
886 			if (node->nchildren <= mib[i]) {
887 				ret = ENOENT;
888 				goto label_return;
889 			}
890 			node = ctl_named_children(node, mib[i]);
891 		} else {
892 			const ctl_indexed_node_t *inode;
893 
894 			/* Indexed element. */
895 			inode = ctl_indexed_node(node->children);
896 			node = inode->index(mib, miblen, mib[i]);
897 			if (node == NULL) {
898 				ret = ENOENT;
899 				goto label_return;
900 			}
901 		}
902 	}
903 
904 	/* Call the ctl function. */
905 	if (node && node->ctl)
906 		ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
907 	else {
908 		/* Partial MIB. */
909 		ret = ENOENT;
910 	}
911 
912 label_return:
913 	return(ret);
914 }
915 
916 bool
ctl_boot(void)917 ctl_boot(void)
918 {
919 
920 	if (malloc_mutex_init(&ctl_mtx))
921 		return (true);
922 
923 	ctl_initialized = false;
924 
925 	return (false);
926 }
927 
928 void
ctl_prefork(void)929 ctl_prefork(void)
930 {
931 
932 	malloc_mutex_prefork(&ctl_mtx);
933 }
934 
935 void
ctl_postfork_parent(void)936 ctl_postfork_parent(void)
937 {
938 
939 	malloc_mutex_postfork_parent(&ctl_mtx);
940 }
941 
942 void
ctl_postfork_child(void)943 ctl_postfork_child(void)
944 {
945 
946 	malloc_mutex_postfork_child(&ctl_mtx);
947 }
948 
949 /******************************************************************************/
950 /* *_ctl() functions. */
951 
952 #define	READONLY()	do {						\
953 	if (newp != NULL || newlen != 0) {				\
954 		ret = EPERM;						\
955 		goto label_return;					\
956 	}								\
957 } while (0)
958 
959 #define	WRITEONLY()	do {						\
960 	if (oldp != NULL || oldlenp != NULL) {				\
961 		ret = EPERM;						\
962 		goto label_return;					\
963 	}								\
964 } while (0)
965 
966 #define	READ(v, t)	do {						\
967 	if (oldp != NULL && oldlenp != NULL) {				\
968 		if (*oldlenp != sizeof(t)) {				\
969 			size_t	copylen = (sizeof(t) <= *oldlenp)	\
970 			    ? sizeof(t) : *oldlenp;			\
971 			memcpy(oldp, (void *)&(v), copylen);		\
972 			ret = EINVAL;					\
973 			goto label_return;				\
974 		} else							\
975 			*(t *)oldp = (v);				\
976 	}								\
977 } while (0)
978 
979 #define	WRITE(v, t)	do {						\
980 	if (newp != NULL) {						\
981 		if (newlen != sizeof(t)) {				\
982 			ret = EINVAL;					\
983 			goto label_return;				\
984 		}							\
985 		(v) = *(t *)newp;					\
986 	}								\
987 } while (0)
988 
989 /*
990  * There's a lot of code duplication in the following macros due to limitations
991  * in how nested cpp macros are expanded.
992  */
993 #define	CTL_RO_CLGEN(c, l, n, v, t)					\
994 static int								\
995 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
996     void *newp, size_t newlen)						\
997 {									\
998 	int ret;							\
999 	t oldval;							\
1000 									\
1001 	if ((c) == false)						\
1002 		return (ENOENT);					\
1003 	if (l)								\
1004 		malloc_mutex_lock(&ctl_mtx);				\
1005 	READONLY();							\
1006 	oldval = (v);							\
1007 	READ(oldval, t);						\
1008 									\
1009 	ret = 0;							\
1010 label_return:								\
1011 	if (l)								\
1012 		malloc_mutex_unlock(&ctl_mtx);				\
1013 	return (ret);							\
1014 }
1015 
1016 #define	CTL_RO_CGEN(c, n, v, t)						\
1017 static int								\
1018 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
1019     void *newp, size_t newlen)						\
1020 {									\
1021 	int ret;							\
1022 	t oldval;							\
1023 									\
1024 	if ((c) == false)						\
1025 		return (ENOENT);					\
1026 	malloc_mutex_lock(&ctl_mtx);					\
1027 	READONLY();							\
1028 	oldval = (v);							\
1029 	READ(oldval, t);						\
1030 									\
1031 	ret = 0;							\
1032 label_return:								\
1033 	malloc_mutex_unlock(&ctl_mtx);					\
1034 	return (ret);							\
1035 }
1036 
1037 #define	CTL_RO_GEN(n, v, t)						\
1038 static int								\
1039 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
1040     void *newp, size_t newlen)						\
1041 {									\
1042 	int ret;							\
1043 	t oldval;							\
1044 									\
1045 	malloc_mutex_lock(&ctl_mtx);					\
1046 	READONLY();							\
1047 	oldval = (v);							\
1048 	READ(oldval, t);						\
1049 									\
1050 	ret = 0;							\
1051 label_return:								\
1052 	malloc_mutex_unlock(&ctl_mtx);					\
1053 	return (ret);							\
1054 }
1055 
1056 /*
1057  * ctl_mtx is not acquired, under the assumption that no pertinent data will
1058  * mutate during the call.
1059  */
1060 #define	CTL_RO_NL_CGEN(c, n, v, t)					\
1061 static int								\
1062 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
1063     void *newp, size_t newlen)						\
1064 {									\
1065 	int ret;							\
1066 	t oldval;							\
1067 									\
1068 	if ((c) == false)						\
1069 		return (ENOENT);					\
1070 	READONLY();							\
1071 	oldval = (v);							\
1072 	READ(oldval, t);						\
1073 									\
1074 	ret = 0;							\
1075 label_return:								\
1076 	return (ret);							\
1077 }
1078 
1079 #define	CTL_RO_NL_GEN(n, v, t)						\
1080 static int								\
1081 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
1082     void *newp, size_t newlen)						\
1083 {									\
1084 	int ret;							\
1085 	t oldval;							\
1086 									\
1087 	READONLY();							\
1088 	oldval = (v);							\
1089 	READ(oldval, t);						\
1090 									\
1091 	ret = 0;							\
1092 label_return:								\
1093 	return (ret);							\
1094 }
1095 
1096 #define	CTL_RO_BOOL_CONFIG_GEN(n)					\
1097 static int								\
1098 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
1099     void *newp, size_t newlen)						\
1100 {									\
1101 	int ret;							\
1102 	bool oldval;							\
1103 									\
1104 	READONLY();							\
1105 	oldval = n;							\
1106 	READ(oldval, bool);						\
1107 									\
1108 	ret = 0;							\
1109 label_return:								\
1110 	return (ret);							\
1111 }
1112 
1113 /******************************************************************************/
1114 
CTL_RO_NL_GEN(version,JEMALLOC_VERSION,const char *)1115 CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
1116 
1117 static int
1118 epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1119     void *newp, size_t newlen)
1120 {
1121 	int ret;
1122 	UNUSED uint64_t newval;
1123 
1124 	malloc_mutex_lock(&ctl_mtx);
1125 	WRITE(newval, uint64_t);
1126 	if (newp != NULL)
1127 		ctl_refresh();
1128 	READ(ctl_epoch, uint64_t);
1129 
1130 	ret = 0;
1131 label_return:
1132 	malloc_mutex_unlock(&ctl_mtx);
1133 	return (ret);
1134 }
1135 
1136 /******************************************************************************/
1137 
1138 CTL_RO_BOOL_CONFIG_GEN(config_debug)
CTL_RO_BOOL_CONFIG_GEN(config_fill)1139 CTL_RO_BOOL_CONFIG_GEN(config_fill)
1140 CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
1141 CTL_RO_BOOL_CONFIG_GEN(config_munmap)
1142 CTL_RO_BOOL_CONFIG_GEN(config_prof)
1143 CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
1144 CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
1145 CTL_RO_BOOL_CONFIG_GEN(config_stats)
1146 CTL_RO_BOOL_CONFIG_GEN(config_tcache)
1147 CTL_RO_BOOL_CONFIG_GEN(config_tls)
1148 CTL_RO_BOOL_CONFIG_GEN(config_utrace)
1149 CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
1150 CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
1151 
1152 /******************************************************************************/
1153 
1154 CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
1155 CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
1156 CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
1157 CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
1158 CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
1159 CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
1160 CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
1161 CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
1162 CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
1163 CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
1164 CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
1165 CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
1166 CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
1167 CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
1168 CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
1169 CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
1170 CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
1171 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
1172 CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
1173 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
1174 CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
1175 CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
1176 CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
1177 
1178 /******************************************************************************/
1179 
1180 static int
1181 thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1182     void *newp, size_t newlen)
1183 {
1184 	int ret;
1185 	unsigned newind, oldind;
1186 
1187 	malloc_mutex_lock(&ctl_mtx);
1188 	newind = oldind = choose_arena(NULL)->ind;
1189 	WRITE(newind, unsigned);
1190 	READ(oldind, unsigned);
1191 	if (newind != oldind) {
1192 		arena_t *arena;
1193 
1194 		if (newind >= ctl_stats.narenas) {
1195 			/* New arena index is out of range. */
1196 			ret = EFAULT;
1197 			goto label_return;
1198 		}
1199 
1200 		/* Initialize arena if necessary. */
1201 		malloc_mutex_lock(&arenas_lock);
1202 		if ((arena = arenas[newind]) == NULL && (arena =
1203 		    arenas_extend(newind)) == NULL) {
1204 			malloc_mutex_unlock(&arenas_lock);
1205 			ret = EAGAIN;
1206 			goto label_return;
1207 		}
1208 		assert(arena == arenas[newind]);
1209 		arenas[oldind]->nthreads--;
1210 		arenas[newind]->nthreads++;
1211 		malloc_mutex_unlock(&arenas_lock);
1212 
1213 		/* Set new arena association. */
1214 		if (config_tcache) {
1215 			tcache_t *tcache;
1216 			if ((uintptr_t)(tcache = *tcache_tsd_get()) >
1217 			    (uintptr_t)TCACHE_STATE_MAX) {
1218 				tcache_arena_dissociate(tcache);
1219 				tcache_arena_associate(tcache, arena);
1220 			}
1221 		}
1222 		arenas_tsd_set(&arena);
1223 	}
1224 
1225 	ret = 0;
1226 label_return:
1227 	malloc_mutex_unlock(&ctl_mtx);
1228 	return (ret);
1229 }
1230 
1231 CTL_RO_NL_CGEN(config_stats, thread_allocated,
1232     thread_allocated_tsd_get()->allocated, uint64_t)
1233 CTL_RO_NL_CGEN(config_stats, thread_allocatedp,
1234     &thread_allocated_tsd_get()->allocated, uint64_t *)
1235 CTL_RO_NL_CGEN(config_stats, thread_deallocated,
1236     thread_allocated_tsd_get()->deallocated, uint64_t)
1237 CTL_RO_NL_CGEN(config_stats, thread_deallocatedp,
1238     &thread_allocated_tsd_get()->deallocated, uint64_t *)
1239 
1240 static int
thread_tcache_enabled_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1241 thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
1242     size_t *oldlenp, void *newp, size_t newlen)
1243 {
1244 	int ret;
1245 	bool oldval;
1246 
1247 	if (config_tcache == false)
1248 		return (ENOENT);
1249 
1250 	oldval = tcache_enabled_get();
1251 	if (newp != NULL) {
1252 		if (newlen != sizeof(bool)) {
1253 			ret = EINVAL;
1254 			goto label_return;
1255 		}
1256 		tcache_enabled_set(*(bool *)newp);
1257 	}
1258 	READ(oldval, bool);
1259 
1260 	ret = 0;
1261 label_return:
1262 	return (ret);
1263 }
1264 
1265 static int
thread_tcache_flush_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1266 thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
1267     size_t *oldlenp, void *newp, size_t newlen)
1268 {
1269 	int ret;
1270 
1271 	if (config_tcache == false)
1272 		return (ENOENT);
1273 
1274 	READONLY();
1275 	WRITEONLY();
1276 
1277 	tcache_flush();
1278 
1279 	ret = 0;
1280 label_return:
1281 	return (ret);
1282 }
1283 
1284 /******************************************************************************/
1285 
1286 /* ctl_mutex must be held during execution of this function. */
1287 static void
arena_purge(unsigned arena_ind)1288 arena_purge(unsigned arena_ind)
1289 {
1290 	VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
1291 
1292 	malloc_mutex_lock(&arenas_lock);
1293 	memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
1294 	malloc_mutex_unlock(&arenas_lock);
1295 
1296 	if (arena_ind == ctl_stats.narenas) {
1297 		unsigned i;
1298 		for (i = 0; i < ctl_stats.narenas; i++) {
1299 			if (tarenas[i] != NULL)
1300 				arena_purge_all(tarenas[i]);
1301 		}
1302 	} else {
1303 		assert(arena_ind < ctl_stats.narenas);
1304 		if (tarenas[arena_ind] != NULL)
1305 			arena_purge_all(tarenas[arena_ind]);
1306 	}
1307 }
1308 
1309 static int
arena_i_purge_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1310 arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1311     void *newp, size_t newlen)
1312 {
1313 	int ret;
1314 
1315 	READONLY();
1316 	WRITEONLY();
1317 	malloc_mutex_lock(&ctl_mtx);
1318 	arena_purge(mib[1]);
1319 	malloc_mutex_unlock(&ctl_mtx);
1320 
1321 	ret = 0;
1322 label_return:
1323 	return (ret);
1324 }
1325 
1326 static int
arena_i_dss_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1327 arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1328     void *newp, size_t newlen)
1329 {
1330 	int ret, i;
1331 	bool match, err;
1332 	const char *dss;
1333 	unsigned arena_ind = mib[1];
1334 	dss_prec_t dss_prec_old = dss_prec_limit;
1335 	dss_prec_t dss_prec = dss_prec_limit;
1336 
1337 	malloc_mutex_lock(&ctl_mtx);
1338 	WRITE(dss, const char *);
1339 	match = false;
1340 	for (i = 0; i < dss_prec_limit; i++) {
1341 		if (strcmp(dss_prec_names[i], dss) == 0) {
1342 			dss_prec = i;
1343 			match = true;
1344 			break;
1345 		}
1346 	}
1347 	if (match == false) {
1348 		ret = EINVAL;
1349 		goto label_return;
1350 	}
1351 
1352 	if (arena_ind < ctl_stats.narenas) {
1353 		arena_t *arena = arenas[arena_ind];
1354 		if (arena != NULL) {
1355 			dss_prec_old = arena_dss_prec_get(arena);
1356 			err = arena_dss_prec_set(arena, dss_prec);
1357 		} else
1358 			err = true;
1359 	} else {
1360 		dss_prec_old = chunk_dss_prec_get();
1361 		err = chunk_dss_prec_set(dss_prec);
1362 	}
1363 	dss = dss_prec_names[dss_prec_old];
1364 	READ(dss, const char *);
1365 	if (err) {
1366 		ret = EFAULT;
1367 		goto label_return;
1368 	}
1369 
1370 	ret = 0;
1371 label_return:
1372 	malloc_mutex_unlock(&ctl_mtx);
1373 	return (ret);
1374 }
1375 
1376 static int
arena_i_chunk_alloc_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1377 arena_i_chunk_alloc_ctl(const size_t *mib, size_t miblen, void *oldp,
1378     size_t *oldlenp, void *newp, size_t newlen)
1379 {
1380 	int ret;
1381 	unsigned arena_ind = mib[1];
1382 	arena_t *arena;
1383 
1384 	malloc_mutex_lock(&ctl_mtx);
1385 	if (arena_ind < narenas_total && (arena = arenas[arena_ind]) != NULL) {
1386 		malloc_mutex_lock(&arena->lock);
1387 		READ(arena->chunk_alloc, chunk_alloc_t *);
1388 		WRITE(arena->chunk_alloc, chunk_alloc_t *);
1389 	} else {
1390 		ret = EFAULT;
1391 		goto label_outer_return;
1392 	}
1393 	ret = 0;
1394 label_return:
1395 	malloc_mutex_unlock(&arena->lock);
1396 label_outer_return:
1397 	malloc_mutex_unlock(&ctl_mtx);
1398 	return (ret);
1399 }
1400 
1401 static int
arena_i_chunk_dalloc_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1402 arena_i_chunk_dalloc_ctl(const size_t *mib, size_t miblen, void *oldp,
1403     size_t *oldlenp, void *newp, size_t newlen)
1404 {
1405 
1406 	int ret;
1407 	unsigned arena_ind = mib[1];
1408 	arena_t *arena;
1409 
1410 	malloc_mutex_lock(&ctl_mtx);
1411 	if (arena_ind < narenas_total && (arena = arenas[arena_ind]) != NULL) {
1412 		malloc_mutex_lock(&arena->lock);
1413 		READ(arena->chunk_dalloc, chunk_dalloc_t *);
1414 		WRITE(arena->chunk_dalloc, chunk_dalloc_t *);
1415 	} else {
1416 		ret = EFAULT;
1417 		goto label_outer_return;
1418 	}
1419 	ret = 0;
1420 label_return:
1421 	malloc_mutex_unlock(&arena->lock);
1422 label_outer_return:
1423 	malloc_mutex_unlock(&ctl_mtx);
1424 	return (ret);
1425 }
1426 
1427 static const ctl_named_node_t *
arena_i_index(const size_t * mib,size_t miblen,size_t i)1428 arena_i_index(const size_t *mib, size_t miblen, size_t i)
1429 {
1430 	const ctl_named_node_t * ret;
1431 
1432 	malloc_mutex_lock(&ctl_mtx);
1433 	if (i > ctl_stats.narenas) {
1434 		ret = NULL;
1435 		goto label_return;
1436 	}
1437 
1438 	ret = super_arena_i_node;
1439 label_return:
1440 	malloc_mutex_unlock(&ctl_mtx);
1441 	return (ret);
1442 }
1443 
1444 /******************************************************************************/
1445 
1446 static int
arenas_narenas_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1447 arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
1448     size_t *oldlenp, void *newp, size_t newlen)
1449 {
1450 	int ret;
1451 	unsigned narenas;
1452 
1453 	malloc_mutex_lock(&ctl_mtx);
1454 	READONLY();
1455 	if (*oldlenp != sizeof(unsigned)) {
1456 		ret = EINVAL;
1457 		goto label_return;
1458 	}
1459 	narenas = ctl_stats.narenas;
1460 	READ(narenas, unsigned);
1461 
1462 	ret = 0;
1463 label_return:
1464 	malloc_mutex_unlock(&ctl_mtx);
1465 	return (ret);
1466 }
1467 
1468 static int
arenas_initialized_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1469 arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
1470     size_t *oldlenp, void *newp, size_t newlen)
1471 {
1472 	int ret;
1473 	unsigned nread, i;
1474 
1475 	malloc_mutex_lock(&ctl_mtx);
1476 	READONLY();
1477 	if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
1478 		ret = EINVAL;
1479 		nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
1480 		    ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas;
1481 	} else {
1482 		ret = 0;
1483 		nread = ctl_stats.narenas;
1484 	}
1485 
1486 	for (i = 0; i < nread; i++)
1487 		((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
1488 
1489 label_return:
1490 	malloc_mutex_unlock(&ctl_mtx);
1491 	return (ret);
1492 }
1493 
CTL_RO_NL_GEN(arenas_quantum,QUANTUM,size_t)1494 CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
1495 CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
1496 CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
1497 CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
1498 CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
1499 CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
1500 CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
1501 CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
1502 static const ctl_named_node_t *
1503 arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
1504 {
1505 
1506 	if (i > NBINS)
1507 		return (NULL);
1508 	return (super_arenas_bin_i_node);
1509 }
1510 
CTL_RO_NL_GEN(arenas_nlruns,nlclasses,size_t)1511 CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
1512 CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
1513 static const ctl_named_node_t *
1514 arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
1515 {
1516 
1517 	if (i > nlclasses)
1518 		return (NULL);
1519 	return (super_arenas_lrun_i_node);
1520 }
1521 
1522 static int
arenas_extend_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1523 arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1524     void *newp, size_t newlen)
1525 {
1526 	int ret;
1527 	unsigned narenas;
1528 
1529 	malloc_mutex_lock(&ctl_mtx);
1530 	READONLY();
1531 	if (ctl_grow()) {
1532 		ret = EAGAIN;
1533 		goto label_return;
1534 	}
1535 	narenas = ctl_stats.narenas - 1;
1536 	READ(narenas, unsigned);
1537 
1538 	ret = 0;
1539 label_return:
1540 	malloc_mutex_unlock(&ctl_mtx);
1541 	return (ret);
1542 }
1543 
1544 /******************************************************************************/
1545 
1546 static int
prof_active_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1547 prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1548     void *newp, size_t newlen)
1549 {
1550 	int ret;
1551 	bool oldval;
1552 
1553 	if (config_prof == false)
1554 		return (ENOENT);
1555 
1556 	malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
1557 	oldval = opt_prof_active;
1558 	if (newp != NULL) {
1559 		/*
1560 		 * The memory barriers will tend to make opt_prof_active
1561 		 * propagate faster on systems with weak memory ordering.
1562 		 */
1563 		mb_write();
1564 		WRITE(opt_prof_active, bool);
1565 		mb_write();
1566 	}
1567 	READ(oldval, bool);
1568 
1569 	ret = 0;
1570 label_return:
1571 	malloc_mutex_unlock(&ctl_mtx);
1572 	return (ret);
1573 }
1574 
1575 static int
prof_dump_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1576 prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1577     void *newp, size_t newlen)
1578 {
1579 	int ret;
1580 	const char *filename = NULL;
1581 
1582 	if (config_prof == false)
1583 		return (ENOENT);
1584 
1585 	WRITEONLY();
1586 	WRITE(filename, const char *);
1587 
1588 	if (prof_mdump(filename)) {
1589 		ret = EFAULT;
1590 		goto label_return;
1591 	}
1592 
1593 	ret = 0;
1594 label_return:
1595 	return (ret);
1596 }
1597 
CTL_RO_NL_CGEN(config_prof,prof_interval,prof_interval,uint64_t)1598 CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
1599 
1600 /******************************************************************************/
1601 
1602 CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
1603 CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
1604 CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
1605 CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
1606 
1607 CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
1608     size_t)
1609 CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
1610 CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
1611 
1612 CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
1613 CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
1614 CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
1615 CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
1616 CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
1617     ctl_stats.arenas[mib[2]].astats.mapped, size_t)
1618 CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
1619     ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
1620 CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
1621     ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
1622 CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
1623     ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
1624 
1625 CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
1626     ctl_stats.arenas[mib[2]].allocated_small, size_t)
1627 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
1628     ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
1629 CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
1630     ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
1631 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
1632     ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
1633 CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
1634     ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
1635 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
1636     ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
1637 CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
1638     ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
1639 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
1640     ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
1641 CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated,
1642     ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t)
1643 CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
1644     ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t)
1645 CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc,
1646     ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t)
1647 CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests,
1648     ctl_stats.arenas[mib[2]].astats.nrequests_huge, uint64_t)
1649 
1650 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
1651     ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
1652 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
1653     ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
1654 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
1655     ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
1656 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
1657     ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
1658 CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
1659     ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
1660 CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
1661     ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
1662 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
1663     ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
1664 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
1665     ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
1666 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
1667     ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
1668 
1669 static const ctl_named_node_t *
1670 stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
1671 {
1672 
1673 	if (j > NBINS)
1674 		return (NULL);
1675 	return (super_stats_arenas_i_bins_j_node);
1676 }
1677 
1678 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
1679     ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
1680 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
1681     ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
1682 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
1683     ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
1684 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
1685     ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
1686 
1687 static const ctl_named_node_t *
stats_arenas_i_lruns_j_index(const size_t * mib,size_t miblen,size_t j)1688 stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
1689 {
1690 
1691 	if (j > nlclasses)
1692 		return (NULL);
1693 	return (super_stats_arenas_i_lruns_j_node);
1694 }
1695 
1696 static const ctl_named_node_t *
stats_arenas_i_index(const size_t * mib,size_t miblen,size_t i)1697 stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
1698 {
1699 	const ctl_named_node_t * ret;
1700 
1701 	malloc_mutex_lock(&ctl_mtx);
1702 	if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) {
1703 		ret = NULL;
1704 		goto label_return;
1705 	}
1706 
1707 	ret = super_stats_arenas_i_node;
1708 label_return:
1709 	malloc_mutex_unlock(&ctl_mtx);
1710 	return (ret);
1711 }
1712