• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define	JEMALLOC_PROF_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 /******************************************************************************/
4 
5 #ifdef JEMALLOC_PROF_LIBUNWIND
6 #define	UNW_LOCAL_ONLY
7 #include <libunwind.h>
8 #endif
9 
10 #ifdef JEMALLOC_PROF_LIBGCC
11 #include <unwind.h>
12 #endif
13 
14 /******************************************************************************/
15 /* Data. */
16 
17 bool		opt_prof = false;
18 bool		opt_prof_active = true;
19 bool		opt_prof_thread_active_init = true;
20 size_t		opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
21 ssize_t		opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
22 bool		opt_prof_gdump = false;
23 bool		opt_prof_final = false;
24 bool		opt_prof_leak = false;
25 bool		opt_prof_accum = false;
26 char		opt_prof_prefix[
27     /* Minimize memory bloat for non-prof builds. */
28 #ifdef JEMALLOC_PROF
29     PATH_MAX +
30 #endif
31     1];
32 
33 /*
34  * Initialized as opt_prof_active, and accessed via
35  * prof_active_[gs]et{_unlocked,}().
36  */
37 bool			prof_active;
38 static malloc_mutex_t	prof_active_mtx;
39 
40 /*
41  * Initialized as opt_prof_thread_active_init, and accessed via
42  * prof_thread_active_init_[gs]et().
43  */
44 static bool		prof_thread_active_init;
45 static malloc_mutex_t	prof_thread_active_init_mtx;
46 
47 /*
48  * Initialized as opt_prof_gdump, and accessed via
49  * prof_gdump_[gs]et{_unlocked,}().
50  */
51 bool			prof_gdump_val;
52 static malloc_mutex_t	prof_gdump_mtx;
53 
54 uint64_t	prof_interval = 0;
55 
56 size_t		lg_prof_sample;
57 
58 /*
59  * Table of mutexes that are shared among gctx's.  These are leaf locks, so
60  * there is no problem with using them for more than one gctx at the same time.
61  * The primary motivation for this sharing though is that gctx's are ephemeral,
62  * and destroying mutexes causes complications for systems that allocate when
63  * creating/destroying mutexes.
64  */
65 static malloc_mutex_t	*gctx_locks;
66 static unsigned		cum_gctxs; /* Atomic counter. */
67 
68 /*
69  * Table of mutexes that are shared among tdata's.  No operations require
70  * holding multiple tdata locks, so there is no problem with using them for more
71  * than one tdata at the same time, even though a gctx lock may be acquired
72  * while holding a tdata lock.
73  */
74 static malloc_mutex_t	*tdata_locks;
75 
76 /*
77  * Global hash of (prof_bt_t *)-->(prof_gctx_t *).  This is the master data
78  * structure that knows about all backtraces currently captured.
79  */
80 static ckh_t		bt2gctx;
81 static malloc_mutex_t	bt2gctx_mtx;
82 
83 /*
84  * Tree of all extant prof_tdata_t structures, regardless of state,
85  * {attached,detached,expired}.
86  */
87 static prof_tdata_tree_t	tdatas;
88 static malloc_mutex_t	tdatas_mtx;
89 
90 static uint64_t		next_thr_uid;
91 static malloc_mutex_t	next_thr_uid_mtx;
92 
93 static malloc_mutex_t	prof_dump_seq_mtx;
94 static uint64_t		prof_dump_seq;
95 static uint64_t		prof_dump_iseq;
96 static uint64_t		prof_dump_mseq;
97 static uint64_t		prof_dump_useq;
98 
99 /*
100  * This buffer is rather large for stack allocation, so use a single buffer for
101  * all profile dumps.
102  */
103 static malloc_mutex_t	prof_dump_mtx;
104 static char		prof_dump_buf[
105     /* Minimize memory bloat for non-prof builds. */
106 #ifdef JEMALLOC_PROF
107     PROF_DUMP_BUFSIZE
108 #else
109     1
110 #endif
111 ];
112 static size_t		prof_dump_buf_end;
113 static int		prof_dump_fd;
114 
115 /* Do not dump any profiles until bootstrapping is complete. */
116 static bool		prof_booted = false;
117 
118 /******************************************************************************/
119 /*
120  * Function prototypes for static functions that are referenced prior to
121  * definition.
122  */
123 
124 static bool	prof_tctx_should_destroy(prof_tctx_t *tctx);
125 static void	prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
126 static bool	prof_tdata_should_destroy(prof_tdata_t *tdata,
127     bool even_if_attached);
128 static void	prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
129     bool even_if_attached);
130 static char	*prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
131 
132 /******************************************************************************/
133 /* Red-black trees. */
134 
135 JEMALLOC_INLINE_C int
prof_tctx_comp(const prof_tctx_t * a,const prof_tctx_t * b)136 prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
137 {
138 	uint64_t a_thr_uid = a->thr_uid;
139 	uint64_t b_thr_uid = b->thr_uid;
140 	int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
141 	if (ret == 0) {
142 		uint64_t a_thr_discrim = a->thr_discrim;
143 		uint64_t b_thr_discrim = b->thr_discrim;
144 		ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
145 		    b_thr_discrim);
146 		if (ret == 0) {
147 			uint64_t a_tctx_uid = a->tctx_uid;
148 			uint64_t b_tctx_uid = b->tctx_uid;
149 			ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
150 			    b_tctx_uid);
151 		}
152 	}
153 	return (ret);
154 }
155 
rb_gen(static UNUSED,tctx_tree_,prof_tctx_tree_t,prof_tctx_t,tctx_link,prof_tctx_comp)156 rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
157     tctx_link, prof_tctx_comp)
158 
159 JEMALLOC_INLINE_C int
160 prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b)
161 {
162 	unsigned a_len = a->bt.len;
163 	unsigned b_len = b->bt.len;
164 	unsigned comp_len = (a_len < b_len) ? a_len : b_len;
165 	int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
166 	if (ret == 0)
167 		ret = (a_len > b_len) - (a_len < b_len);
168 	return (ret);
169 }
170 
rb_gen(static UNUSED,gctx_tree_,prof_gctx_tree_t,prof_gctx_t,dump_link,prof_gctx_comp)171 rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
172     prof_gctx_comp)
173 
174 JEMALLOC_INLINE_C int
175 prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b)
176 {
177 	int ret;
178 	uint64_t a_uid = a->thr_uid;
179 	uint64_t b_uid = b->thr_uid;
180 
181 	ret = ((a_uid > b_uid) - (a_uid < b_uid));
182 	if (ret == 0) {
183 		uint64_t a_discrim = a->thr_discrim;
184 		uint64_t b_discrim = b->thr_discrim;
185 
186 		ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
187 	}
188 	return (ret);
189 }
190 
rb_gen(static UNUSED,tdata_tree_,prof_tdata_tree_t,prof_tdata_t,tdata_link,prof_tdata_comp)191 rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
192     prof_tdata_comp)
193 
194 /******************************************************************************/
195 
196 void
197 prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
198 {
199 	prof_tdata_t *tdata;
200 
201 	cassert(config_prof);
202 
203 	if (updated) {
204 		/*
205 		 * Compute a new sample threshold.  This isn't very important in
206 		 * practice, because this function is rarely executed, so the
207 		 * potential for sample bias is minimal except in contrived
208 		 * programs.
209 		 */
210 		tdata = prof_tdata_get(tsd, true);
211 		if (tdata != NULL)
212 			prof_sample_threshold_update(tdata);
213 	}
214 
215 	if ((uintptr_t)tctx > (uintptr_t)1U) {
216 		malloc_mutex_lock(tctx->tdata->lock);
217 		tctx->prepared = false;
218 		if (prof_tctx_should_destroy(tctx))
219 			prof_tctx_destroy(tsd, tctx);
220 		else
221 			malloc_mutex_unlock(tctx->tdata->lock);
222 	}
223 }
224 
225 void
prof_malloc_sample_object(const void * ptr,size_t usize,prof_tctx_t * tctx)226 prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
227 {
228 
229 	prof_tctx_set(ptr, usize, tctx);
230 
231 	malloc_mutex_lock(tctx->tdata->lock);
232 	tctx->cnts.curobjs++;
233 	tctx->cnts.curbytes += usize;
234 	if (opt_prof_accum) {
235 		tctx->cnts.accumobjs++;
236 		tctx->cnts.accumbytes += usize;
237 	}
238 	tctx->prepared = false;
239 	malloc_mutex_unlock(tctx->tdata->lock);
240 }
241 
242 void
prof_free_sampled_object(tsd_t * tsd,size_t usize,prof_tctx_t * tctx)243 prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
244 {
245 
246 	malloc_mutex_lock(tctx->tdata->lock);
247 	assert(tctx->cnts.curobjs > 0);
248 	assert(tctx->cnts.curbytes >= usize);
249 	tctx->cnts.curobjs--;
250 	tctx->cnts.curbytes -= usize;
251 
252 	if (prof_tctx_should_destroy(tctx))
253 		prof_tctx_destroy(tsd, tctx);
254 	else
255 		malloc_mutex_unlock(tctx->tdata->lock);
256 }
257 
258 void
bt_init(prof_bt_t * bt,void ** vec)259 bt_init(prof_bt_t *bt, void **vec)
260 {
261 
262 	cassert(config_prof);
263 
264 	bt->vec = vec;
265 	bt->len = 0;
266 }
267 
268 JEMALLOC_INLINE_C void
prof_enter(tsd_t * tsd,prof_tdata_t * tdata)269 prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
270 {
271 
272 	cassert(config_prof);
273 	assert(tdata == prof_tdata_get(tsd, false));
274 
275 	if (tdata != NULL) {
276 		assert(!tdata->enq);
277 		tdata->enq = true;
278 	}
279 
280 	malloc_mutex_lock(&bt2gctx_mtx);
281 }
282 
283 JEMALLOC_INLINE_C void
prof_leave(tsd_t * tsd,prof_tdata_t * tdata)284 prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
285 {
286 
287 	cassert(config_prof);
288 	assert(tdata == prof_tdata_get(tsd, false));
289 
290 	malloc_mutex_unlock(&bt2gctx_mtx);
291 
292 	if (tdata != NULL) {
293 		bool idump, gdump;
294 
295 		assert(tdata->enq);
296 		tdata->enq = false;
297 		idump = tdata->enq_idump;
298 		tdata->enq_idump = false;
299 		gdump = tdata->enq_gdump;
300 		tdata->enq_gdump = false;
301 
302 		if (idump)
303 			prof_idump();
304 		if (gdump)
305 			prof_gdump();
306 	}
307 }
308 
309 #ifdef JEMALLOC_PROF_LIBUNWIND
310 void
prof_backtrace(prof_bt_t * bt)311 prof_backtrace(prof_bt_t *bt)
312 {
313 	int nframes;
314 
315 	cassert(config_prof);
316 	assert(bt->len == 0);
317 	assert(bt->vec != NULL);
318 
319 	nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
320 	if (nframes <= 0)
321 		return;
322 	bt->len = nframes;
323 }
324 #elif (defined(JEMALLOC_PROF_LIBGCC))
325 static _Unwind_Reason_Code
prof_unwind_init_callback(struct _Unwind_Context * context,void * arg)326 prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
327 {
328 
329 	cassert(config_prof);
330 
331 	return (_URC_NO_REASON);
332 }
333 
334 static _Unwind_Reason_Code
prof_unwind_callback(struct _Unwind_Context * context,void * arg)335 prof_unwind_callback(struct _Unwind_Context *context, void *arg)
336 {
337 	prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
338 	void *ip;
339 
340 	cassert(config_prof);
341 
342 	ip = (void *)_Unwind_GetIP(context);
343 	if (ip == NULL)
344 		return (_URC_END_OF_STACK);
345 	data->bt->vec[data->bt->len] = ip;
346 	data->bt->len++;
347 	if (data->bt->len == data->max)
348 		return (_URC_END_OF_STACK);
349 
350 	return (_URC_NO_REASON);
351 }
352 
353 void
prof_backtrace(prof_bt_t * bt)354 prof_backtrace(prof_bt_t *bt)
355 {
356 	prof_unwind_data_t data = {bt, PROF_BT_MAX};
357 
358 	cassert(config_prof);
359 
360 	_Unwind_Backtrace(prof_unwind_callback, &data);
361 }
362 #elif (defined(JEMALLOC_PROF_GCC))
363 void
prof_backtrace(prof_bt_t * bt)364 prof_backtrace(prof_bt_t *bt)
365 {
366 #define	BT_FRAME(i)							\
367 	if ((i) < PROF_BT_MAX) {					\
368 		void *p;						\
369 		if (__builtin_frame_address(i) == 0)			\
370 			return;						\
371 		p = __builtin_return_address(i);			\
372 		if (p == NULL)						\
373 			return;						\
374 		bt->vec[(i)] = p;					\
375 		bt->len = (i) + 1;					\
376 	} else								\
377 		return;
378 
379 	cassert(config_prof);
380 
381 	BT_FRAME(0)
382 	BT_FRAME(1)
383 	BT_FRAME(2)
384 	BT_FRAME(3)
385 	BT_FRAME(4)
386 	BT_FRAME(5)
387 	BT_FRAME(6)
388 	BT_FRAME(7)
389 	BT_FRAME(8)
390 	BT_FRAME(9)
391 
392 	BT_FRAME(10)
393 	BT_FRAME(11)
394 	BT_FRAME(12)
395 	BT_FRAME(13)
396 	BT_FRAME(14)
397 	BT_FRAME(15)
398 	BT_FRAME(16)
399 	BT_FRAME(17)
400 	BT_FRAME(18)
401 	BT_FRAME(19)
402 
403 	BT_FRAME(20)
404 	BT_FRAME(21)
405 	BT_FRAME(22)
406 	BT_FRAME(23)
407 	BT_FRAME(24)
408 	BT_FRAME(25)
409 	BT_FRAME(26)
410 	BT_FRAME(27)
411 	BT_FRAME(28)
412 	BT_FRAME(29)
413 
414 	BT_FRAME(30)
415 	BT_FRAME(31)
416 	BT_FRAME(32)
417 	BT_FRAME(33)
418 	BT_FRAME(34)
419 	BT_FRAME(35)
420 	BT_FRAME(36)
421 	BT_FRAME(37)
422 	BT_FRAME(38)
423 	BT_FRAME(39)
424 
425 	BT_FRAME(40)
426 	BT_FRAME(41)
427 	BT_FRAME(42)
428 	BT_FRAME(43)
429 	BT_FRAME(44)
430 	BT_FRAME(45)
431 	BT_FRAME(46)
432 	BT_FRAME(47)
433 	BT_FRAME(48)
434 	BT_FRAME(49)
435 
436 	BT_FRAME(50)
437 	BT_FRAME(51)
438 	BT_FRAME(52)
439 	BT_FRAME(53)
440 	BT_FRAME(54)
441 	BT_FRAME(55)
442 	BT_FRAME(56)
443 	BT_FRAME(57)
444 	BT_FRAME(58)
445 	BT_FRAME(59)
446 
447 	BT_FRAME(60)
448 	BT_FRAME(61)
449 	BT_FRAME(62)
450 	BT_FRAME(63)
451 	BT_FRAME(64)
452 	BT_FRAME(65)
453 	BT_FRAME(66)
454 	BT_FRAME(67)
455 	BT_FRAME(68)
456 	BT_FRAME(69)
457 
458 	BT_FRAME(70)
459 	BT_FRAME(71)
460 	BT_FRAME(72)
461 	BT_FRAME(73)
462 	BT_FRAME(74)
463 	BT_FRAME(75)
464 	BT_FRAME(76)
465 	BT_FRAME(77)
466 	BT_FRAME(78)
467 	BT_FRAME(79)
468 
469 	BT_FRAME(80)
470 	BT_FRAME(81)
471 	BT_FRAME(82)
472 	BT_FRAME(83)
473 	BT_FRAME(84)
474 	BT_FRAME(85)
475 	BT_FRAME(86)
476 	BT_FRAME(87)
477 	BT_FRAME(88)
478 	BT_FRAME(89)
479 
480 	BT_FRAME(90)
481 	BT_FRAME(91)
482 	BT_FRAME(92)
483 	BT_FRAME(93)
484 	BT_FRAME(94)
485 	BT_FRAME(95)
486 	BT_FRAME(96)
487 	BT_FRAME(97)
488 	BT_FRAME(98)
489 	BT_FRAME(99)
490 
491 	BT_FRAME(100)
492 	BT_FRAME(101)
493 	BT_FRAME(102)
494 	BT_FRAME(103)
495 	BT_FRAME(104)
496 	BT_FRAME(105)
497 	BT_FRAME(106)
498 	BT_FRAME(107)
499 	BT_FRAME(108)
500 	BT_FRAME(109)
501 
502 	BT_FRAME(110)
503 	BT_FRAME(111)
504 	BT_FRAME(112)
505 	BT_FRAME(113)
506 	BT_FRAME(114)
507 	BT_FRAME(115)
508 	BT_FRAME(116)
509 	BT_FRAME(117)
510 	BT_FRAME(118)
511 	BT_FRAME(119)
512 
513 	BT_FRAME(120)
514 	BT_FRAME(121)
515 	BT_FRAME(122)
516 	BT_FRAME(123)
517 	BT_FRAME(124)
518 	BT_FRAME(125)
519 	BT_FRAME(126)
520 	BT_FRAME(127)
521 #undef BT_FRAME
522 }
523 #else
524 void
prof_backtrace(prof_bt_t * bt)525 prof_backtrace(prof_bt_t *bt)
526 {
527 
528 	cassert(config_prof);
529 	not_reached();
530 }
531 #endif
532 
533 static malloc_mutex_t *
prof_gctx_mutex_choose(void)534 prof_gctx_mutex_choose(void)
535 {
536 	unsigned ngctxs = atomic_add_u(&cum_gctxs, 1);
537 
538 	return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]);
539 }
540 
541 static malloc_mutex_t *
prof_tdata_mutex_choose(uint64_t thr_uid)542 prof_tdata_mutex_choose(uint64_t thr_uid)
543 {
544 
545 	return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
546 }
547 
548 static prof_gctx_t *
prof_gctx_create(tsd_t * tsd,prof_bt_t * bt)549 prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
550 {
551 	/*
552 	 * Create a single allocation that has space for vec of length bt->len.
553 	 */
554 	size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
555 	prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, size,
556 	    size2index(size), false, tcache_get(tsd, true), true, NULL, true);
557 	if (gctx == NULL)
558 		return (NULL);
559 	gctx->lock = prof_gctx_mutex_choose();
560 	/*
561 	 * Set nlimbo to 1, in order to avoid a race condition with
562 	 * prof_tctx_destroy()/prof_gctx_try_destroy().
563 	 */
564 	gctx->nlimbo = 1;
565 	tctx_tree_new(&gctx->tctxs);
566 	/* Duplicate bt. */
567 	memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
568 	gctx->bt.vec = gctx->vec;
569 	gctx->bt.len = bt->len;
570 	return (gctx);
571 }
572 
573 static void
prof_gctx_try_destroy(tsd_t * tsd,prof_tdata_t * tdata_self,prof_gctx_t * gctx,prof_tdata_t * tdata)574 prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
575     prof_tdata_t *tdata)
576 {
577 
578 	cassert(config_prof);
579 
580 	/*
581 	 * Check that gctx is still unused by any thread cache before destroying
582 	 * it.  prof_lookup() increments gctx->nlimbo in order to avoid a race
583 	 * condition with this function, as does prof_tctx_destroy() in order to
584 	 * avoid a race between the main body of prof_tctx_destroy() and entry
585 	 * into this function.
586 	 */
587 	prof_enter(tsd, tdata_self);
588 	malloc_mutex_lock(gctx->lock);
589 	assert(gctx->nlimbo != 0);
590 	if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
591 		/* Remove gctx from bt2gctx. */
592 		if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
593 			not_reached();
594 		prof_leave(tsd, tdata_self);
595 		/* Destroy gctx. */
596 		malloc_mutex_unlock(gctx->lock);
597 		idalloctm(tsd, gctx, tcache_get(tsd, false), true, true);
598 	} else {
599 		/*
600 		 * Compensate for increment in prof_tctx_destroy() or
601 		 * prof_lookup().
602 		 */
603 		gctx->nlimbo--;
604 		malloc_mutex_unlock(gctx->lock);
605 		prof_leave(tsd, tdata_self);
606 	}
607 }
608 
609 /* tctx->tdata->lock must be held. */
610 static bool
prof_tctx_should_destroy(prof_tctx_t * tctx)611 prof_tctx_should_destroy(prof_tctx_t *tctx)
612 {
613 
614 	if (opt_prof_accum)
615 		return (false);
616 	if (tctx->cnts.curobjs != 0)
617 		return (false);
618 	if (tctx->prepared)
619 		return (false);
620 	return (true);
621 }
622 
623 static bool
prof_gctx_should_destroy(prof_gctx_t * gctx)624 prof_gctx_should_destroy(prof_gctx_t *gctx)
625 {
626 
627 	if (opt_prof_accum)
628 		return (false);
629 	if (!tctx_tree_empty(&gctx->tctxs))
630 		return (false);
631 	if (gctx->nlimbo != 0)
632 		return (false);
633 	return (true);
634 }
635 
636 /* tctx->tdata->lock is held upon entry, and released before return. */
637 static void
prof_tctx_destroy(tsd_t * tsd,prof_tctx_t * tctx)638 prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
639 {
640 	prof_tdata_t *tdata = tctx->tdata;
641 	prof_gctx_t *gctx = tctx->gctx;
642 	bool destroy_tdata, destroy_tctx, destroy_gctx;
643 
644 	assert(tctx->cnts.curobjs == 0);
645 	assert(tctx->cnts.curbytes == 0);
646 	assert(!opt_prof_accum);
647 	assert(tctx->cnts.accumobjs == 0);
648 	assert(tctx->cnts.accumbytes == 0);
649 
650 	ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
651 	destroy_tdata = prof_tdata_should_destroy(tdata, false);
652 	malloc_mutex_unlock(tdata->lock);
653 
654 	malloc_mutex_lock(gctx->lock);
655 	switch (tctx->state) {
656 	case prof_tctx_state_nominal:
657 		tctx_tree_remove(&gctx->tctxs, tctx);
658 		destroy_tctx = true;
659 		if (prof_gctx_should_destroy(gctx)) {
660 			/*
661 			 * Increment gctx->nlimbo in order to keep another
662 			 * thread from winning the race to destroy gctx while
663 			 * this one has gctx->lock dropped.  Without this, it
664 			 * would be possible for another thread to:
665 			 *
666 			 * 1) Sample an allocation associated with gctx.
667 			 * 2) Deallocate the sampled object.
668 			 * 3) Successfully prof_gctx_try_destroy(gctx).
669 			 *
670 			 * The result would be that gctx no longer exists by the
671 			 * time this thread accesses it in
672 			 * prof_gctx_try_destroy().
673 			 */
674 			gctx->nlimbo++;
675 			destroy_gctx = true;
676 		} else
677 			destroy_gctx = false;
678 		break;
679 	case prof_tctx_state_dumping:
680 		/*
681 		 * A dumping thread needs tctx to remain valid until dumping
682 		 * has finished.  Change state such that the dumping thread will
683 		 * complete destruction during a late dump iteration phase.
684 		 */
685 		tctx->state = prof_tctx_state_purgatory;
686 		destroy_tctx = false;
687 		destroy_gctx = false;
688 		break;
689 	default:
690 		not_reached();
691 		destroy_tctx = false;
692 		destroy_gctx = false;
693 	}
694 	malloc_mutex_unlock(gctx->lock);
695 	if (destroy_gctx) {
696 		prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
697 		    tdata);
698 	}
699 
700 	if (destroy_tdata)
701 		prof_tdata_destroy(tsd, tdata, false);
702 
703 	if (destroy_tctx)
704 		idalloctm(tsd, tctx, tcache_get(tsd, false), true, true);
705 }
706 
707 static bool
prof_lookup_global(tsd_t * tsd,prof_bt_t * bt,prof_tdata_t * tdata,void ** p_btkey,prof_gctx_t ** p_gctx,bool * p_new_gctx)708 prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
709     void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
710 {
711 	union {
712 		prof_gctx_t	*p;
713 		void		*v;
714 	} gctx;
715 	union {
716 		prof_bt_t	*p;
717 		void		*v;
718 	} btkey;
719 	bool new_gctx;
720 
721 	prof_enter(tsd, tdata);
722 	if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
723 		/* bt has never been seen before.  Insert it. */
724 		gctx.p = prof_gctx_create(tsd, bt);
725 		if (gctx.v == NULL) {
726 			prof_leave(tsd, tdata);
727 			return (true);
728 		}
729 		btkey.p = &gctx.p->bt;
730 		if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
731 			/* OOM. */
732 			prof_leave(tsd, tdata);
733 			idalloctm(tsd, gctx.v, tcache_get(tsd, false), true,
734 			    true);
735 			return (true);
736 		}
737 		new_gctx = true;
738 	} else {
739 		/*
740 		 * Increment nlimbo, in order to avoid a race condition with
741 		 * prof_tctx_destroy()/prof_gctx_try_destroy().
742 		 */
743 		malloc_mutex_lock(gctx.p->lock);
744 		gctx.p->nlimbo++;
745 		malloc_mutex_unlock(gctx.p->lock);
746 		new_gctx = false;
747 	}
748 	prof_leave(tsd, tdata);
749 
750 	*p_btkey = btkey.v;
751 	*p_gctx = gctx.p;
752 	*p_new_gctx = new_gctx;
753 	return (false);
754 }
755 
756 prof_tctx_t *
prof_lookup(tsd_t * tsd,prof_bt_t * bt)757 prof_lookup(tsd_t *tsd, prof_bt_t *bt)
758 {
759 	union {
760 		prof_tctx_t	*p;
761 		void		*v;
762 	} ret;
763 	prof_tdata_t *tdata;
764 	bool not_found;
765 
766 	cassert(config_prof);
767 
768 	tdata = prof_tdata_get(tsd, false);
769 	if (tdata == NULL)
770 		return (NULL);
771 
772 	malloc_mutex_lock(tdata->lock);
773 	not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
774 	if (!not_found) /* Note double negative! */
775 		ret.p->prepared = true;
776 	malloc_mutex_unlock(tdata->lock);
777 	if (not_found) {
778 		tcache_t *tcache;
779 		void *btkey;
780 		prof_gctx_t *gctx;
781 		bool new_gctx, error;
782 
783 		/*
784 		 * This thread's cache lacks bt.  Look for it in the global
785 		 * cache.
786 		 */
787 		if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
788 		    &new_gctx))
789 			return (NULL);
790 
791 		/* Link a prof_tctx_t into gctx for this thread. */
792 		tcache = tcache_get(tsd, true);
793 		ret.v = iallocztm(tsd, sizeof(prof_tctx_t),
794 		    size2index(sizeof(prof_tctx_t)), false, tcache, true, NULL,
795 		    true);
796 		if (ret.p == NULL) {
797 			if (new_gctx)
798 				prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
799 			return (NULL);
800 		}
801 		ret.p->tdata = tdata;
802 		ret.p->thr_uid = tdata->thr_uid;
803 		ret.p->thr_discrim = tdata->thr_discrim;
804 		memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
805 		ret.p->gctx = gctx;
806 		ret.p->tctx_uid = tdata->tctx_uid_next++;
807 		ret.p->prepared = true;
808 		ret.p->state = prof_tctx_state_initializing;
809 		malloc_mutex_lock(tdata->lock);
810 		error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
811 		malloc_mutex_unlock(tdata->lock);
812 		if (error) {
813 			if (new_gctx)
814 				prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
815 			idalloctm(tsd, ret.v, tcache, true, true);
816 			return (NULL);
817 		}
818 		malloc_mutex_lock(gctx->lock);
819 		ret.p->state = prof_tctx_state_nominal;
820 		tctx_tree_insert(&gctx->tctxs, ret.p);
821 		gctx->nlimbo--;
822 		malloc_mutex_unlock(gctx->lock);
823 	}
824 
825 	return (ret.p);
826 }
827 
828 void
prof_sample_threshold_update(prof_tdata_t * tdata)829 prof_sample_threshold_update(prof_tdata_t *tdata)
830 {
831 	/*
832 	 * The body of this function is compiled out unless heap profiling is
833 	 * enabled, so that it is possible to compile jemalloc with floating
834 	 * point support completely disabled.  Avoiding floating point code is
835 	 * important on memory-constrained systems, but it also enables a
836 	 * workaround for versions of glibc that don't properly save/restore
837 	 * floating point registers during dynamic lazy symbol loading (which
838 	 * internally calls into whatever malloc implementation happens to be
839 	 * integrated into the application).  Note that some compilers (e.g.
840 	 * gcc 4.8) may use floating point registers for fast memory moves, so
841 	 * jemalloc must be compiled with such optimizations disabled (e.g.
842 	 * -mno-sse) in order for the workaround to be complete.
843 	 */
844 #ifdef JEMALLOC_PROF
845 	uint64_t r;
846 	double u;
847 
848 	if (!config_prof)
849 		return;
850 
851 	if (lg_prof_sample == 0) {
852 		tdata->bytes_until_sample = 0;
853 		return;
854 	}
855 
856 	/*
857 	 * Compute sample interval as a geometrically distributed random
858 	 * variable with mean (2^lg_prof_sample).
859 	 *
860 	 *                             __        __
861 	 *                             |  log(u)  |                     1
862 	 * tdata->bytes_until_sample = | -------- |, where p = ---------------
863 	 *                             | log(1-p) |             lg_prof_sample
864 	 *                                                     2
865 	 *
866 	 * For more information on the math, see:
867 	 *
868 	 *   Non-Uniform Random Variate Generation
869 	 *   Luc Devroye
870 	 *   Springer-Verlag, New York, 1986
871 	 *   pp 500
872 	 *   (http://luc.devroye.org/rnbookindex.html)
873 	 */
874 	r = prng_lg_range(&tdata->prng_state, 53);
875 	u = (double)r * (1.0/9007199254740992.0L);
876 	tdata->bytes_until_sample = (uint64_t)(log(u) /
877 	    log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
878 	    + (uint64_t)1U;
879 #endif
880 }
881 
882 #ifdef JEMALLOC_JET
883 static prof_tdata_t *
prof_tdata_count_iter(prof_tdata_tree_t * tdatas,prof_tdata_t * tdata,void * arg)884 prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
885 {
886 	size_t *tdata_count = (size_t *)arg;
887 
888 	(*tdata_count)++;
889 
890 	return (NULL);
891 }
892 
893 size_t
prof_tdata_count(void)894 prof_tdata_count(void)
895 {
896 	size_t tdata_count = 0;
897 
898 	malloc_mutex_lock(&tdatas_mtx);
899 	tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
900 	    (void *)&tdata_count);
901 	malloc_mutex_unlock(&tdatas_mtx);
902 
903 	return (tdata_count);
904 }
905 #endif
906 
907 #ifdef JEMALLOC_JET
908 size_t
prof_bt_count(void)909 prof_bt_count(void)
910 {
911 	size_t bt_count;
912 	tsd_t *tsd;
913 	prof_tdata_t *tdata;
914 
915 	tsd = tsd_fetch();
916 	tdata = prof_tdata_get(tsd, false);
917 	if (tdata == NULL)
918 		return (0);
919 
920 	malloc_mutex_lock(&bt2gctx_mtx);
921 	bt_count = ckh_count(&bt2gctx);
922 	malloc_mutex_unlock(&bt2gctx_mtx);
923 
924 	return (bt_count);
925 }
926 #endif
927 
928 #ifdef JEMALLOC_JET
929 #undef prof_dump_open
930 #define	prof_dump_open JEMALLOC_N(prof_dump_open_impl)
931 #endif
932 static int
prof_dump_open(bool propagate_err,const char * filename)933 prof_dump_open(bool propagate_err, const char *filename)
934 {
935 	int fd;
936 
937 	fd = creat(filename, 0644);
938 	if (fd == -1 && !propagate_err) {
939 		malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
940 		    filename);
941 		if (opt_abort)
942 			abort();
943 	}
944 
945 	return (fd);
946 }
947 #ifdef JEMALLOC_JET
948 #undef prof_dump_open
949 #define	prof_dump_open JEMALLOC_N(prof_dump_open)
950 prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
951 #endif
952 
953 static bool
prof_dump_flush(bool propagate_err)954 prof_dump_flush(bool propagate_err)
955 {
956 	bool ret = false;
957 	ssize_t err;
958 
959 	cassert(config_prof);
960 
961 	err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
962 	if (err == -1) {
963 		if (!propagate_err) {
964 			malloc_write("<jemalloc>: write() failed during heap "
965 			    "profile flush\n");
966 			if (opt_abort)
967 				abort();
968 		}
969 		ret = true;
970 	}
971 	prof_dump_buf_end = 0;
972 
973 	return (ret);
974 }
975 
976 static bool
prof_dump_close(bool propagate_err)977 prof_dump_close(bool propagate_err)
978 {
979 	bool ret;
980 
981 	assert(prof_dump_fd != -1);
982 	ret = prof_dump_flush(propagate_err);
983 	close(prof_dump_fd);
984 	prof_dump_fd = -1;
985 
986 	return (ret);
987 }
988 
989 static bool
prof_dump_write(bool propagate_err,const char * s)990 prof_dump_write(bool propagate_err, const char *s)
991 {
992 	size_t i, slen, n;
993 
994 	cassert(config_prof);
995 
996 	i = 0;
997 	slen = strlen(s);
998 	while (i < slen) {
999 		/* Flush the buffer if it is full. */
1000 		if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
1001 			if (prof_dump_flush(propagate_err) && propagate_err)
1002 				return (true);
1003 
1004 		if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
1005 			/* Finish writing. */
1006 			n = slen - i;
1007 		} else {
1008 			/* Write as much of s as will fit. */
1009 			n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
1010 		}
1011 		memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
1012 		prof_dump_buf_end += n;
1013 		i += n;
1014 	}
1015 
1016 	return (false);
1017 }
1018 
1019 JEMALLOC_FORMAT_PRINTF(2, 3)
1020 static bool
prof_dump_printf(bool propagate_err,const char * format,...)1021 prof_dump_printf(bool propagate_err, const char *format, ...)
1022 {
1023 	bool ret;
1024 	va_list ap;
1025 	char buf[PROF_PRINTF_BUFSIZE];
1026 
1027 	va_start(ap, format);
1028 	malloc_vsnprintf(buf, sizeof(buf), format, ap);
1029 	va_end(ap);
1030 	ret = prof_dump_write(propagate_err, buf);
1031 
1032 	return (ret);
1033 }
1034 
1035 /* tctx->tdata->lock is held. */
1036 static void
prof_tctx_merge_tdata(prof_tctx_t * tctx,prof_tdata_t * tdata)1037 prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata)
1038 {
1039 
1040 	malloc_mutex_lock(tctx->gctx->lock);
1041 
1042 	switch (tctx->state) {
1043 	case prof_tctx_state_initializing:
1044 		malloc_mutex_unlock(tctx->gctx->lock);
1045 		return;
1046 	case prof_tctx_state_nominal:
1047 		tctx->state = prof_tctx_state_dumping;
1048 		malloc_mutex_unlock(tctx->gctx->lock);
1049 
1050 		memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
1051 
1052 		tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1053 		tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1054 		if (opt_prof_accum) {
1055 			tdata->cnt_summed.accumobjs +=
1056 			    tctx->dump_cnts.accumobjs;
1057 			tdata->cnt_summed.accumbytes +=
1058 			    tctx->dump_cnts.accumbytes;
1059 		}
1060 		break;
1061 	case prof_tctx_state_dumping:
1062 	case prof_tctx_state_purgatory:
1063 		not_reached();
1064 	}
1065 }
1066 
1067 /* gctx->lock is held. */
1068 static void
prof_tctx_merge_gctx(prof_tctx_t * tctx,prof_gctx_t * gctx)1069 prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx)
1070 {
1071 
1072 	gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1073 	gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1074 	if (opt_prof_accum) {
1075 		gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
1076 		gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
1077 	}
1078 }
1079 
1080 /* tctx->gctx is held. */
1081 static prof_tctx_t *
prof_tctx_merge_iter(prof_tctx_tree_t * tctxs,prof_tctx_t * tctx,void * arg)1082 prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1083 {
1084 
1085 	switch (tctx->state) {
1086 	case prof_tctx_state_nominal:
1087 		/* New since dumping started; ignore. */
1088 		break;
1089 	case prof_tctx_state_dumping:
1090 	case prof_tctx_state_purgatory:
1091 		prof_tctx_merge_gctx(tctx, tctx->gctx);
1092 		break;
1093 	default:
1094 		not_reached();
1095 	}
1096 
1097 	return (NULL);
1098 }
1099 
1100 /* gctx->lock is held. */
1101 static prof_tctx_t *
prof_tctx_dump_iter(prof_tctx_tree_t * tctxs,prof_tctx_t * tctx,void * arg)1102 prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1103 {
1104 	bool propagate_err = *(bool *)arg;
1105 
1106 	switch (tctx->state) {
1107 	case prof_tctx_state_initializing:
1108 	case prof_tctx_state_nominal:
1109 		/* Not captured by this dump. */
1110 		break;
1111 	case prof_tctx_state_dumping:
1112 	case prof_tctx_state_purgatory:
1113 		if (prof_dump_printf(propagate_err,
1114 		    "  t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
1115 		    "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
1116 		    tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
1117 		    tctx->dump_cnts.accumbytes))
1118 			return (tctx);
1119 		break;
1120 	default:
1121 		not_reached();
1122 	}
1123 	return (NULL);
1124 }
1125 
1126 /* tctx->gctx is held. */
1127 static prof_tctx_t *
prof_tctx_finish_iter(prof_tctx_tree_t * tctxs,prof_tctx_t * tctx,void * arg)1128 prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1129 {
1130 	prof_tctx_t *ret;
1131 
1132 	switch (tctx->state) {
1133 	case prof_tctx_state_nominal:
1134 		/* New since dumping started; ignore. */
1135 		break;
1136 	case prof_tctx_state_dumping:
1137 		tctx->state = prof_tctx_state_nominal;
1138 		break;
1139 	case prof_tctx_state_purgatory:
1140 		ret = tctx;
1141 		goto label_return;
1142 	default:
1143 		not_reached();
1144 	}
1145 
1146 	ret = NULL;
1147 label_return:
1148 	return (ret);
1149 }
1150 
1151 static void
prof_dump_gctx_prep(prof_gctx_t * gctx,prof_gctx_tree_t * gctxs)1152 prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
1153 {
1154 
1155 	cassert(config_prof);
1156 
1157 	malloc_mutex_lock(gctx->lock);
1158 
1159 	/*
1160 	 * Increment nlimbo so that gctx won't go away before dump.
1161 	 * Additionally, link gctx into the dump list so that it is included in
1162 	 * prof_dump()'s second pass.
1163 	 */
1164 	gctx->nlimbo++;
1165 	gctx_tree_insert(gctxs, gctx);
1166 
1167 	memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
1168 
1169 	malloc_mutex_unlock(gctx->lock);
1170 }
1171 
1172 static prof_gctx_t *
prof_gctx_merge_iter(prof_gctx_tree_t * gctxs,prof_gctx_t * gctx,void * arg)1173 prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
1174 {
1175 	size_t *leak_ngctx = (size_t *)arg;
1176 
1177 	malloc_mutex_lock(gctx->lock);
1178 	tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL);
1179 	if (gctx->cnt_summed.curobjs != 0)
1180 		(*leak_ngctx)++;
1181 	malloc_mutex_unlock(gctx->lock);
1182 
1183 	return (NULL);
1184 }
1185 
1186 static void
prof_gctx_finish(tsd_t * tsd,prof_gctx_tree_t * gctxs)1187 prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
1188 {
1189 	prof_tdata_t *tdata = prof_tdata_get(tsd, false);
1190 	prof_gctx_t *gctx;
1191 
1192 	/*
1193 	 * Standard tree iteration won't work here, because as soon as we
1194 	 * decrement gctx->nlimbo and unlock gctx, another thread can
1195 	 * concurrently destroy it, which will corrupt the tree.  Therefore,
1196 	 * tear down the tree one node at a time during iteration.
1197 	 */
1198 	while ((gctx = gctx_tree_first(gctxs)) != NULL) {
1199 		gctx_tree_remove(gctxs, gctx);
1200 		malloc_mutex_lock(gctx->lock);
1201 		{
1202 			prof_tctx_t *next;
1203 
1204 			next = NULL;
1205 			do {
1206 				prof_tctx_t *to_destroy =
1207 				    tctx_tree_iter(&gctx->tctxs, next,
1208 				    prof_tctx_finish_iter, NULL);
1209 				if (to_destroy != NULL) {
1210 					next = tctx_tree_next(&gctx->tctxs,
1211 					    to_destroy);
1212 					tctx_tree_remove(&gctx->tctxs,
1213 					    to_destroy);
1214 					idalloctm(tsd, to_destroy,
1215 					    tcache_get(tsd, false), true, true);
1216 				} else
1217 					next = NULL;
1218 			} while (next != NULL);
1219 		}
1220 		gctx->nlimbo--;
1221 		if (prof_gctx_should_destroy(gctx)) {
1222 			gctx->nlimbo++;
1223 			malloc_mutex_unlock(gctx->lock);
1224 			prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
1225 		} else
1226 			malloc_mutex_unlock(gctx->lock);
1227 	}
1228 }
1229 
1230 static prof_tdata_t *
prof_tdata_merge_iter(prof_tdata_tree_t * tdatas,prof_tdata_t * tdata,void * arg)1231 prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1232 {
1233 	prof_cnt_t *cnt_all = (prof_cnt_t *)arg;
1234 
1235 	malloc_mutex_lock(tdata->lock);
1236 	if (!tdata->expired) {
1237 		size_t tabind;
1238 		union {
1239 			prof_tctx_t	*p;
1240 			void		*v;
1241 		} tctx;
1242 
1243 		tdata->dumping = true;
1244 		memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
1245 		for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
1246 		    &tctx.v);)
1247 			prof_tctx_merge_tdata(tctx.p, tdata);
1248 
1249 		cnt_all->curobjs += tdata->cnt_summed.curobjs;
1250 		cnt_all->curbytes += tdata->cnt_summed.curbytes;
1251 		if (opt_prof_accum) {
1252 			cnt_all->accumobjs += tdata->cnt_summed.accumobjs;
1253 			cnt_all->accumbytes += tdata->cnt_summed.accumbytes;
1254 		}
1255 	} else
1256 		tdata->dumping = false;
1257 	malloc_mutex_unlock(tdata->lock);
1258 
1259 	return (NULL);
1260 }
1261 
1262 static prof_tdata_t *
prof_tdata_dump_iter(prof_tdata_tree_t * tdatas,prof_tdata_t * tdata,void * arg)1263 prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1264 {
1265 	bool propagate_err = *(bool *)arg;
1266 
1267 	if (!tdata->dumping)
1268 		return (NULL);
1269 
1270 	if (prof_dump_printf(propagate_err,
1271 	    "  t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
1272 	    tdata->thr_uid, tdata->cnt_summed.curobjs,
1273 	    tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
1274 	    tdata->cnt_summed.accumbytes,
1275 	    (tdata->thread_name != NULL) ? " " : "",
1276 	    (tdata->thread_name != NULL) ? tdata->thread_name : ""))
1277 		return (tdata);
1278 	return (NULL);
1279 }
1280 
1281 #ifdef JEMALLOC_JET
1282 #undef prof_dump_header
1283 #define	prof_dump_header JEMALLOC_N(prof_dump_header_impl)
1284 #endif
1285 static bool
prof_dump_header(bool propagate_err,const prof_cnt_t * cnt_all)1286 prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
1287 {
1288 	bool ret;
1289 
1290 	if (prof_dump_printf(propagate_err,
1291 	    "heap_v2/%"FMTu64"\n"
1292 	    "  t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
1293 	    ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
1294 	    cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
1295 		return (true);
1296 
1297 	malloc_mutex_lock(&tdatas_mtx);
1298 	ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
1299 	    (void *)&propagate_err) != NULL);
1300 	malloc_mutex_unlock(&tdatas_mtx);
1301 	return (ret);
1302 }
1303 #ifdef JEMALLOC_JET
1304 #undef prof_dump_header
1305 #define	prof_dump_header JEMALLOC_N(prof_dump_header)
1306 prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
1307 #endif
1308 
1309 /* gctx->lock is held. */
1310 static bool
prof_dump_gctx(bool propagate_err,prof_gctx_t * gctx,const prof_bt_t * bt,prof_gctx_tree_t * gctxs)1311 prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt,
1312     prof_gctx_tree_t *gctxs)
1313 {
1314 	bool ret;
1315 	unsigned i;
1316 
1317 	cassert(config_prof);
1318 
1319 	/* Avoid dumping such gctx's that have no useful data. */
1320 	if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
1321 	    (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
1322 		assert(gctx->cnt_summed.curobjs == 0);
1323 		assert(gctx->cnt_summed.curbytes == 0);
1324 		assert(gctx->cnt_summed.accumobjs == 0);
1325 		assert(gctx->cnt_summed.accumbytes == 0);
1326 		ret = false;
1327 		goto label_return;
1328 	}
1329 
1330 	if (prof_dump_printf(propagate_err, "@")) {
1331 		ret = true;
1332 		goto label_return;
1333 	}
1334 	for (i = 0; i < bt->len; i++) {
1335 		if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
1336 		    (uintptr_t)bt->vec[i])) {
1337 			ret = true;
1338 			goto label_return;
1339 		}
1340 	}
1341 
1342 	if (prof_dump_printf(propagate_err,
1343 	    "\n"
1344 	    "  t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
1345 	    gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
1346 	    gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
1347 		ret = true;
1348 		goto label_return;
1349 	}
1350 
1351 	if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
1352 	    (void *)&propagate_err) != NULL) {
1353 		ret = true;
1354 		goto label_return;
1355 	}
1356 
1357 	ret = false;
1358 label_return:
1359 	return (ret);
1360 }
1361 
1362 #ifndef _WIN32
1363 JEMALLOC_FORMAT_PRINTF(1, 2)
1364 static int
prof_open_maps(const char * format,...)1365 prof_open_maps(const char *format, ...)
1366 {
1367 	int mfd;
1368 	va_list ap;
1369 	char filename[PATH_MAX + 1];
1370 
1371 	va_start(ap, format);
1372 	malloc_vsnprintf(filename, sizeof(filename), format, ap);
1373 	va_end(ap);
1374 	mfd = open(filename, O_RDONLY);
1375 
1376 	return (mfd);
1377 }
1378 #endif
1379 
1380 static int
prof_getpid(void)1381 prof_getpid(void)
1382 {
1383 
1384 #ifdef _WIN32
1385 	return (GetCurrentProcessId());
1386 #else
1387 	return (getpid());
1388 #endif
1389 }
1390 
1391 static bool
prof_dump_maps(bool propagate_err)1392 prof_dump_maps(bool propagate_err)
1393 {
1394 	bool ret;
1395 	int mfd;
1396 
1397 	cassert(config_prof);
1398 #ifdef __FreeBSD__
1399 	mfd = prof_open_maps("/proc/curproc/map");
1400 #elif defined(_WIN32)
1401 	mfd = -1; // Not implemented
1402 #else
1403 	{
1404 		int pid = prof_getpid();
1405 
1406 		mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
1407 		if (mfd == -1)
1408 			mfd = prof_open_maps("/proc/%d/maps", pid);
1409 	}
1410 #endif
1411 	if (mfd != -1) {
1412 		ssize_t nread;
1413 
1414 		if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
1415 		    propagate_err) {
1416 			ret = true;
1417 			goto label_return;
1418 		}
1419 		nread = 0;
1420 		do {
1421 			prof_dump_buf_end += nread;
1422 			if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
1423 				/* Make space in prof_dump_buf before read(). */
1424 				if (prof_dump_flush(propagate_err) &&
1425 				    propagate_err) {
1426 					ret = true;
1427 					goto label_return;
1428 				}
1429 			}
1430 			nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
1431 			    PROF_DUMP_BUFSIZE - prof_dump_buf_end);
1432 		} while (nread > 0);
1433 	} else {
1434 		ret = true;
1435 		goto label_return;
1436 	}
1437 
1438 	ret = false;
1439 label_return:
1440 	if (mfd != -1)
1441 		close(mfd);
1442 	return (ret);
1443 }
1444 
1445 static void
prof_leakcheck(const prof_cnt_t * cnt_all,size_t leak_ngctx,const char * filename)1446 prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
1447     const char *filename)
1448 {
1449 
1450 	if (cnt_all->curbytes != 0) {
1451 		malloc_printf("<jemalloc>: Leak summary: %"FMTu64" byte%s, %"
1452 		    FMTu64" object%s, %zu context%s\n",
1453 		    cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
1454 		    cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
1455 		    leak_ngctx, (leak_ngctx != 1) ? "s" : "");
1456 		malloc_printf(
1457 		    "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
1458 		    filename);
1459 	}
1460 }
1461 
1462 static prof_gctx_t *
prof_gctx_dump_iter(prof_gctx_tree_t * gctxs,prof_gctx_t * gctx,void * arg)1463 prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
1464 {
1465 	prof_gctx_t *ret;
1466 	bool propagate_err = *(bool *)arg;
1467 
1468 	malloc_mutex_lock(gctx->lock);
1469 
1470 	if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) {
1471 		ret = gctx;
1472 		goto label_return;
1473 	}
1474 
1475 	ret = NULL;
1476 label_return:
1477 	malloc_mutex_unlock(gctx->lock);
1478 	return (ret);
1479 }
1480 
1481 static bool
prof_dump(tsd_t * tsd,bool propagate_err,const char * filename,bool leakcheck)1482 prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
1483 {
1484 	prof_tdata_t *tdata;
1485 	prof_cnt_t cnt_all;
1486 	size_t tabind;
1487 	union {
1488 		prof_gctx_t	*p;
1489 		void		*v;
1490 	} gctx;
1491 	size_t leak_ngctx;
1492 	prof_gctx_tree_t gctxs;
1493 
1494 	cassert(config_prof);
1495 
1496 	tdata = prof_tdata_get(tsd, true);
1497 	if (tdata == NULL)
1498 		return (true);
1499 
1500 	malloc_mutex_lock(&prof_dump_mtx);
1501 	prof_enter(tsd, tdata);
1502 
1503 	/*
1504 	 * Put gctx's in limbo and clear their counters in preparation for
1505 	 * summing.
1506 	 */
1507 	gctx_tree_new(&gctxs);
1508 	for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
1509 		prof_dump_gctx_prep(gctx.p, &gctxs);
1510 
1511 	/*
1512 	 * Iterate over tdatas, and for the non-expired ones snapshot their tctx
1513 	 * stats and merge them into the associated gctx's.
1514 	 */
1515 	memset(&cnt_all, 0, sizeof(prof_cnt_t));
1516 	malloc_mutex_lock(&tdatas_mtx);
1517 	tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all);
1518 	malloc_mutex_unlock(&tdatas_mtx);
1519 
1520 	/* Merge tctx stats into gctx's. */
1521 	leak_ngctx = 0;
1522 	gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx);
1523 
1524 	prof_leave(tsd, tdata);
1525 
1526 	/* Create dump file. */
1527 	if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
1528 		goto label_open_close_error;
1529 
1530 	/* Dump profile header. */
1531 	if (prof_dump_header(propagate_err, &cnt_all))
1532 		goto label_write_error;
1533 
1534 	/* Dump per gctx profile stats. */
1535 	if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
1536 	    (void *)&propagate_err) != NULL)
1537 		goto label_write_error;
1538 
1539 	/* Dump /proc/<pid>/maps if possible. */
1540 	if (prof_dump_maps(propagate_err))
1541 		goto label_write_error;
1542 
1543 	if (prof_dump_close(propagate_err))
1544 		goto label_open_close_error;
1545 
1546 	prof_gctx_finish(tsd, &gctxs);
1547 	malloc_mutex_unlock(&prof_dump_mtx);
1548 
1549 	if (leakcheck)
1550 		prof_leakcheck(&cnt_all, leak_ngctx, filename);
1551 
1552 	return (false);
1553 label_write_error:
1554 	prof_dump_close(propagate_err);
1555 label_open_close_error:
1556 	prof_gctx_finish(tsd, &gctxs);
1557 	malloc_mutex_unlock(&prof_dump_mtx);
1558 	return (true);
1559 }
1560 
1561 #define	DUMP_FILENAME_BUFSIZE	(PATH_MAX + 1)
1562 #define	VSEQ_INVALID		UINT64_C(0xffffffffffffffff)
1563 static void
prof_dump_filename(char * filename,char v,uint64_t vseq)1564 prof_dump_filename(char *filename, char v, uint64_t vseq)
1565 {
1566 
1567 	cassert(config_prof);
1568 
1569 	if (vseq != VSEQ_INVALID) {
1570 	        /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
1571 		malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
1572 		    "%s.%d.%"FMTu64".%c%"FMTu64".heap",
1573 		    opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
1574 	} else {
1575 	        /* "<prefix>.<pid>.<seq>.<v>.heap" */
1576 		malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
1577 		    "%s.%d.%"FMTu64".%c.heap",
1578 		    opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
1579 	}
1580 	prof_dump_seq++;
1581 }
1582 
1583 static void
prof_fdump(void)1584 prof_fdump(void)
1585 {
1586 	tsd_t *tsd;
1587 	char filename[DUMP_FILENAME_BUFSIZE];
1588 
1589 	cassert(config_prof);
1590 	assert(opt_prof_final);
1591 	assert(opt_prof_prefix[0] != '\0');
1592 
1593 	if (!prof_booted)
1594 		return;
1595 	tsd = tsd_fetch();
1596 
1597 	malloc_mutex_lock(&prof_dump_seq_mtx);
1598 	prof_dump_filename(filename, 'f', VSEQ_INVALID);
1599 	malloc_mutex_unlock(&prof_dump_seq_mtx);
1600 	prof_dump(tsd, false, filename, opt_prof_leak);
1601 }
1602 
1603 void
prof_idump(void)1604 prof_idump(void)
1605 {
1606 	tsd_t *tsd;
1607 	prof_tdata_t *tdata;
1608 
1609 	cassert(config_prof);
1610 
1611 	if (!prof_booted)
1612 		return;
1613 	tsd = tsd_fetch();
1614 	tdata = prof_tdata_get(tsd, false);
1615 	if (tdata == NULL)
1616 		return;
1617 	if (tdata->enq) {
1618 		tdata->enq_idump = true;
1619 		return;
1620 	}
1621 
1622 	if (opt_prof_prefix[0] != '\0') {
1623 		char filename[PATH_MAX + 1];
1624 		malloc_mutex_lock(&prof_dump_seq_mtx);
1625 		prof_dump_filename(filename, 'i', prof_dump_iseq);
1626 		prof_dump_iseq++;
1627 		malloc_mutex_unlock(&prof_dump_seq_mtx);
1628 		prof_dump(tsd, false, filename, false);
1629 	}
1630 }
1631 
1632 bool
prof_mdump(const char * filename)1633 prof_mdump(const char *filename)
1634 {
1635 	tsd_t *tsd;
1636 	char filename_buf[DUMP_FILENAME_BUFSIZE];
1637 
1638 	cassert(config_prof);
1639 
1640 	if (!opt_prof || !prof_booted)
1641 		return (true);
1642 	tsd = tsd_fetch();
1643 
1644 	if (filename == NULL) {
1645 		/* No filename specified, so automatically generate one. */
1646 		if (opt_prof_prefix[0] == '\0')
1647 			return (true);
1648 		malloc_mutex_lock(&prof_dump_seq_mtx);
1649 		prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
1650 		prof_dump_mseq++;
1651 		malloc_mutex_unlock(&prof_dump_seq_mtx);
1652 		filename = filename_buf;
1653 	}
1654 	return (prof_dump(tsd, true, filename, false));
1655 }
1656 
1657 void
prof_gdump(void)1658 prof_gdump(void)
1659 {
1660 	tsd_t *tsd;
1661 	prof_tdata_t *tdata;
1662 
1663 	cassert(config_prof);
1664 
1665 	if (!prof_booted)
1666 		return;
1667 	tsd = tsd_fetch();
1668 	tdata = prof_tdata_get(tsd, false);
1669 	if (tdata == NULL)
1670 		return;
1671 	if (tdata->enq) {
1672 		tdata->enq_gdump = true;
1673 		return;
1674 	}
1675 
1676 	if (opt_prof_prefix[0] != '\0') {
1677 		char filename[DUMP_FILENAME_BUFSIZE];
1678 		malloc_mutex_lock(&prof_dump_seq_mtx);
1679 		prof_dump_filename(filename, 'u', prof_dump_useq);
1680 		prof_dump_useq++;
1681 		malloc_mutex_unlock(&prof_dump_seq_mtx);
1682 		prof_dump(tsd, false, filename, false);
1683 	}
1684 }
1685 
1686 static void
prof_bt_hash(const void * key,size_t r_hash[2])1687 prof_bt_hash(const void *key, size_t r_hash[2])
1688 {
1689 	prof_bt_t *bt = (prof_bt_t *)key;
1690 
1691 	cassert(config_prof);
1692 
1693 	hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
1694 }
1695 
1696 static bool
prof_bt_keycomp(const void * k1,const void * k2)1697 prof_bt_keycomp(const void *k1, const void *k2)
1698 {
1699 	const prof_bt_t *bt1 = (prof_bt_t *)k1;
1700 	const prof_bt_t *bt2 = (prof_bt_t *)k2;
1701 
1702 	cassert(config_prof);
1703 
1704 	if (bt1->len != bt2->len)
1705 		return (false);
1706 	return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
1707 }
1708 
1709 JEMALLOC_INLINE_C uint64_t
prof_thr_uid_alloc(void)1710 prof_thr_uid_alloc(void)
1711 {
1712 	uint64_t thr_uid;
1713 
1714 	malloc_mutex_lock(&next_thr_uid_mtx);
1715 	thr_uid = next_thr_uid;
1716 	next_thr_uid++;
1717 	malloc_mutex_unlock(&next_thr_uid_mtx);
1718 
1719 	return (thr_uid);
1720 }
1721 
1722 static prof_tdata_t *
prof_tdata_init_impl(tsd_t * tsd,uint64_t thr_uid,uint64_t thr_discrim,char * thread_name,bool active)1723 prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
1724     char *thread_name, bool active)
1725 {
1726 	prof_tdata_t *tdata;
1727 	tcache_t *tcache;
1728 
1729 	cassert(config_prof);
1730 
1731 	/* Initialize an empty cache for this thread. */
1732 	tcache = tcache_get(tsd, true);
1733 	tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t),
1734 	    size2index(sizeof(prof_tdata_t)), false, tcache, true, NULL, true);
1735 	if (tdata == NULL)
1736 		return (NULL);
1737 
1738 	tdata->lock = prof_tdata_mutex_choose(thr_uid);
1739 	tdata->thr_uid = thr_uid;
1740 	tdata->thr_discrim = thr_discrim;
1741 	tdata->thread_name = thread_name;
1742 	tdata->attached = true;
1743 	tdata->expired = false;
1744 	tdata->tctx_uid_next = 0;
1745 
1746 	if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
1747 	    prof_bt_hash, prof_bt_keycomp)) {
1748 		idalloctm(tsd, tdata, tcache, true, true);
1749 		return (NULL);
1750 	}
1751 
1752 	tdata->prng_state = (uint64_t)(uintptr_t)tdata;
1753 	prof_sample_threshold_update(tdata);
1754 
1755 	tdata->enq = false;
1756 	tdata->enq_idump = false;
1757 	tdata->enq_gdump = false;
1758 
1759 	tdata->dumping = false;
1760 	tdata->active = active;
1761 
1762 	malloc_mutex_lock(&tdatas_mtx);
1763 	tdata_tree_insert(&tdatas, tdata);
1764 	malloc_mutex_unlock(&tdatas_mtx);
1765 
1766 	return (tdata);
1767 }
1768 
1769 prof_tdata_t *
prof_tdata_init(tsd_t * tsd)1770 prof_tdata_init(tsd_t *tsd)
1771 {
1772 
1773 	return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL,
1774 	    prof_thread_active_init_get()));
1775 }
1776 
1777 /* tdata->lock must be held. */
1778 static bool
prof_tdata_should_destroy(prof_tdata_t * tdata,bool even_if_attached)1779 prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached)
1780 {
1781 
1782 	if (tdata->attached && !even_if_attached)
1783 		return (false);
1784 	if (ckh_count(&tdata->bt2tctx) != 0)
1785 		return (false);
1786 	return (true);
1787 }
1788 
1789 /* tdatas_mtx must be held. */
1790 static void
prof_tdata_destroy_locked(tsd_t * tsd,prof_tdata_t * tdata,bool even_if_attached)1791 prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
1792     bool even_if_attached)
1793 {
1794 	tcache_t *tcache;
1795 
1796 	assert(prof_tdata_should_destroy(tdata, even_if_attached));
1797 	assert(tsd_prof_tdata_get(tsd) != tdata);
1798 
1799 	tdata_tree_remove(&tdatas, tdata);
1800 
1801 	tcache = tcache_get(tsd, false);
1802 	if (tdata->thread_name != NULL)
1803 		idalloctm(tsd, tdata->thread_name, tcache, true, true);
1804 	ckh_delete(tsd, &tdata->bt2tctx);
1805 	idalloctm(tsd, tdata, tcache, true, true);
1806 }
1807 
1808 static void
prof_tdata_destroy(tsd_t * tsd,prof_tdata_t * tdata,bool even_if_attached)1809 prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
1810 {
1811 
1812 	malloc_mutex_lock(&tdatas_mtx);
1813 	prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
1814 	malloc_mutex_unlock(&tdatas_mtx);
1815 }
1816 
1817 static void
prof_tdata_detach(tsd_t * tsd,prof_tdata_t * tdata)1818 prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
1819 {
1820 	bool destroy_tdata;
1821 
1822 	malloc_mutex_lock(tdata->lock);
1823 	if (tdata->attached) {
1824 		destroy_tdata = prof_tdata_should_destroy(tdata, true);
1825 		/*
1826 		 * Only detach if !destroy_tdata, because detaching would allow
1827 		 * another thread to win the race to destroy tdata.
1828 		 */
1829 		if (!destroy_tdata)
1830 			tdata->attached = false;
1831 		tsd_prof_tdata_set(tsd, NULL);
1832 	} else
1833 		destroy_tdata = false;
1834 	malloc_mutex_unlock(tdata->lock);
1835 	if (destroy_tdata)
1836 		prof_tdata_destroy(tsd, tdata, true);
1837 }
1838 
1839 prof_tdata_t *
prof_tdata_reinit(tsd_t * tsd,prof_tdata_t * tdata)1840 prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
1841 {
1842 	uint64_t thr_uid = tdata->thr_uid;
1843 	uint64_t thr_discrim = tdata->thr_discrim + 1;
1844 	char *thread_name = (tdata->thread_name != NULL) ?
1845 	    prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
1846 	bool active = tdata->active;
1847 
1848 	prof_tdata_detach(tsd, tdata);
1849 	return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
1850 	    active));
1851 }
1852 
1853 static bool
prof_tdata_expire(prof_tdata_t * tdata)1854 prof_tdata_expire(prof_tdata_t *tdata)
1855 {
1856 	bool destroy_tdata;
1857 
1858 	malloc_mutex_lock(tdata->lock);
1859 	if (!tdata->expired) {
1860 		tdata->expired = true;
1861 		destroy_tdata = tdata->attached ? false :
1862 		    prof_tdata_should_destroy(tdata, false);
1863 	} else
1864 		destroy_tdata = false;
1865 	malloc_mutex_unlock(tdata->lock);
1866 
1867 	return (destroy_tdata);
1868 }
1869 
1870 static prof_tdata_t *
prof_tdata_reset_iter(prof_tdata_tree_t * tdatas,prof_tdata_t * tdata,void * arg)1871 prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1872 {
1873 
1874 	return (prof_tdata_expire(tdata) ? tdata : NULL);
1875 }
1876 
1877 void
prof_reset(tsd_t * tsd,size_t lg_sample)1878 prof_reset(tsd_t *tsd, size_t lg_sample)
1879 {
1880 	prof_tdata_t *next;
1881 
1882 	assert(lg_sample < (sizeof(uint64_t) << 3));
1883 
1884 	malloc_mutex_lock(&prof_dump_mtx);
1885 	malloc_mutex_lock(&tdatas_mtx);
1886 
1887 	lg_prof_sample = lg_sample;
1888 
1889 	next = NULL;
1890 	do {
1891 		prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
1892 		    prof_tdata_reset_iter, NULL);
1893 		if (to_destroy != NULL) {
1894 			next = tdata_tree_next(&tdatas, to_destroy);
1895 			prof_tdata_destroy_locked(tsd, to_destroy, false);
1896 		} else
1897 			next = NULL;
1898 	} while (next != NULL);
1899 
1900 	malloc_mutex_unlock(&tdatas_mtx);
1901 	malloc_mutex_unlock(&prof_dump_mtx);
1902 }
1903 
1904 void
prof_tdata_cleanup(tsd_t * tsd)1905 prof_tdata_cleanup(tsd_t *tsd)
1906 {
1907 	prof_tdata_t *tdata;
1908 
1909 	if (!config_prof)
1910 		return;
1911 
1912 	tdata = tsd_prof_tdata_get(tsd);
1913 	if (tdata != NULL)
1914 		prof_tdata_detach(tsd, tdata);
1915 }
1916 
1917 bool
prof_active_get(void)1918 prof_active_get(void)
1919 {
1920 	bool prof_active_current;
1921 
1922 	malloc_mutex_lock(&prof_active_mtx);
1923 	prof_active_current = prof_active;
1924 	malloc_mutex_unlock(&prof_active_mtx);
1925 	return (prof_active_current);
1926 }
1927 
1928 bool
prof_active_set(bool active)1929 prof_active_set(bool active)
1930 {
1931 	bool prof_active_old;
1932 
1933 	malloc_mutex_lock(&prof_active_mtx);
1934 	prof_active_old = prof_active;
1935 	prof_active = active;
1936 	malloc_mutex_unlock(&prof_active_mtx);
1937 	return (prof_active_old);
1938 }
1939 
1940 const char *
prof_thread_name_get(void)1941 prof_thread_name_get(void)
1942 {
1943 	tsd_t *tsd;
1944 	prof_tdata_t *tdata;
1945 
1946 	tsd = tsd_fetch();
1947 	tdata = prof_tdata_get(tsd, true);
1948 	if (tdata == NULL)
1949 		return ("");
1950 	return (tdata->thread_name != NULL ? tdata->thread_name : "");
1951 }
1952 
1953 static char *
prof_thread_name_alloc(tsd_t * tsd,const char * thread_name)1954 prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
1955 {
1956 	char *ret;
1957 	size_t size;
1958 
1959 	if (thread_name == NULL)
1960 		return (NULL);
1961 
1962 	size = strlen(thread_name) + 1;
1963 	if (size == 1)
1964 		return ("");
1965 
1966 	ret = iallocztm(tsd, size, size2index(size), false, tcache_get(tsd,
1967 	    true), true, NULL, true);
1968 	if (ret == NULL)
1969 		return (NULL);
1970 	memcpy(ret, thread_name, size);
1971 	return (ret);
1972 }
1973 
1974 int
prof_thread_name_set(tsd_t * tsd,const char * thread_name)1975 prof_thread_name_set(tsd_t *tsd, const char *thread_name)
1976 {
1977 	prof_tdata_t *tdata;
1978 	unsigned i;
1979 	char *s;
1980 
1981 	tdata = prof_tdata_get(tsd, true);
1982 	if (tdata == NULL)
1983 		return (EAGAIN);
1984 
1985 	/* Validate input. */
1986 	if (thread_name == NULL)
1987 		return (EFAULT);
1988 	for (i = 0; thread_name[i] != '\0'; i++) {
1989 		char c = thread_name[i];
1990 		if (!isgraph(c) && !isblank(c))
1991 			return (EFAULT);
1992 	}
1993 
1994 	s = prof_thread_name_alloc(tsd, thread_name);
1995 	if (s == NULL)
1996 		return (EAGAIN);
1997 
1998 	if (tdata->thread_name != NULL) {
1999 		idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false),
2000 		    true, true);
2001 		tdata->thread_name = NULL;
2002 	}
2003 	if (strlen(s) > 0)
2004 		tdata->thread_name = s;
2005 	return (0);
2006 }
2007 
2008 bool
prof_thread_active_get(void)2009 prof_thread_active_get(void)
2010 {
2011 	tsd_t *tsd;
2012 	prof_tdata_t *tdata;
2013 
2014 	tsd = tsd_fetch();
2015 	tdata = prof_tdata_get(tsd, true);
2016 	if (tdata == NULL)
2017 		return (false);
2018 	return (tdata->active);
2019 }
2020 
2021 bool
prof_thread_active_set(bool active)2022 prof_thread_active_set(bool active)
2023 {
2024 	tsd_t *tsd;
2025 	prof_tdata_t *tdata;
2026 
2027 	tsd = tsd_fetch();
2028 	tdata = prof_tdata_get(tsd, true);
2029 	if (tdata == NULL)
2030 		return (true);
2031 	tdata->active = active;
2032 	return (false);
2033 }
2034 
2035 bool
prof_thread_active_init_get(void)2036 prof_thread_active_init_get(void)
2037 {
2038 	bool active_init;
2039 
2040 	malloc_mutex_lock(&prof_thread_active_init_mtx);
2041 	active_init = prof_thread_active_init;
2042 	malloc_mutex_unlock(&prof_thread_active_init_mtx);
2043 	return (active_init);
2044 }
2045 
2046 bool
prof_thread_active_init_set(bool active_init)2047 prof_thread_active_init_set(bool active_init)
2048 {
2049 	bool active_init_old;
2050 
2051 	malloc_mutex_lock(&prof_thread_active_init_mtx);
2052 	active_init_old = prof_thread_active_init;
2053 	prof_thread_active_init = active_init;
2054 	malloc_mutex_unlock(&prof_thread_active_init_mtx);
2055 	return (active_init_old);
2056 }
2057 
2058 bool
prof_gdump_get(void)2059 prof_gdump_get(void)
2060 {
2061 	bool prof_gdump_current;
2062 
2063 	malloc_mutex_lock(&prof_gdump_mtx);
2064 	prof_gdump_current = prof_gdump_val;
2065 	malloc_mutex_unlock(&prof_gdump_mtx);
2066 	return (prof_gdump_current);
2067 }
2068 
2069 bool
prof_gdump_set(bool gdump)2070 prof_gdump_set(bool gdump)
2071 {
2072 	bool prof_gdump_old;
2073 
2074 	malloc_mutex_lock(&prof_gdump_mtx);
2075 	prof_gdump_old = prof_gdump_val;
2076 	prof_gdump_val = gdump;
2077 	malloc_mutex_unlock(&prof_gdump_mtx);
2078 	return (prof_gdump_old);
2079 }
2080 
2081 void
prof_boot0(void)2082 prof_boot0(void)
2083 {
2084 
2085 	cassert(config_prof);
2086 
2087 	memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
2088 	    sizeof(PROF_PREFIX_DEFAULT));
2089 }
2090 
2091 void
prof_boot1(void)2092 prof_boot1(void)
2093 {
2094 
2095 	cassert(config_prof);
2096 
2097 	/*
2098 	 * opt_prof must be in its final state before any arenas are
2099 	 * initialized, so this function must be executed early.
2100 	 */
2101 
2102 	if (opt_prof_leak && !opt_prof) {
2103 		/*
2104 		 * Enable opt_prof, but in such a way that profiles are never
2105 		 * automatically dumped.
2106 		 */
2107 		opt_prof = true;
2108 		opt_prof_gdump = false;
2109 	} else if (opt_prof) {
2110 		if (opt_lg_prof_interval >= 0) {
2111 			prof_interval = (((uint64_t)1U) <<
2112 			    opt_lg_prof_interval);
2113 		}
2114 	}
2115 }
2116 
2117 bool
prof_boot2(void)2118 prof_boot2(void)
2119 {
2120 
2121 	cassert(config_prof);
2122 
2123 	if (opt_prof) {
2124 		tsd_t *tsd;
2125 		unsigned i;
2126 
2127 		lg_prof_sample = opt_lg_prof_sample;
2128 
2129 		prof_active = opt_prof_active;
2130 		if (malloc_mutex_init(&prof_active_mtx))
2131 			return (true);
2132 
2133 		prof_gdump_val = opt_prof_gdump;
2134 		if (malloc_mutex_init(&prof_gdump_mtx))
2135 			return (true);
2136 
2137 		prof_thread_active_init = opt_prof_thread_active_init;
2138 		if (malloc_mutex_init(&prof_thread_active_init_mtx))
2139 			return (true);
2140 
2141 		tsd = tsd_fetch();
2142 		if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
2143 		    prof_bt_keycomp))
2144 			return (true);
2145 		if (malloc_mutex_init(&bt2gctx_mtx))
2146 			return (true);
2147 
2148 		tdata_tree_new(&tdatas);
2149 		if (malloc_mutex_init(&tdatas_mtx))
2150 			return (true);
2151 
2152 		next_thr_uid = 0;
2153 		if (malloc_mutex_init(&next_thr_uid_mtx))
2154 			return (true);
2155 
2156 		if (malloc_mutex_init(&prof_dump_seq_mtx))
2157 			return (true);
2158 		if (malloc_mutex_init(&prof_dump_mtx))
2159 			return (true);
2160 
2161 		if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
2162 		    atexit(prof_fdump) != 0) {
2163 			malloc_write("<jemalloc>: Error in atexit()\n");
2164 			if (opt_abort)
2165 				abort();
2166 		}
2167 
2168 		gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
2169 		    sizeof(malloc_mutex_t));
2170 		if (gctx_locks == NULL)
2171 			return (true);
2172 		for (i = 0; i < PROF_NCTX_LOCKS; i++) {
2173 			if (malloc_mutex_init(&gctx_locks[i]))
2174 				return (true);
2175 		}
2176 
2177 		tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS *
2178 		    sizeof(malloc_mutex_t));
2179 		if (tdata_locks == NULL)
2180 			return (true);
2181 		for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
2182 			if (malloc_mutex_init(&tdata_locks[i]))
2183 				return (true);
2184 		}
2185 	}
2186 
2187 #ifdef JEMALLOC_PROF_LIBGCC
2188 	/*
2189 	 * Cause the backtracing machinery to allocate its internal state
2190 	 * before enabling profiling.
2191 	 */
2192 	_Unwind_Backtrace(prof_unwind_init_callback, NULL);
2193 #endif
2194 
2195 	prof_booted = true;
2196 
2197 	return (false);
2198 }
2199 
2200 void
prof_prefork(void)2201 prof_prefork(void)
2202 {
2203 
2204 	if (opt_prof) {
2205 		unsigned i;
2206 
2207 		malloc_mutex_prefork(&tdatas_mtx);
2208 		malloc_mutex_prefork(&bt2gctx_mtx);
2209 		malloc_mutex_prefork(&next_thr_uid_mtx);
2210 		malloc_mutex_prefork(&prof_dump_seq_mtx);
2211 		for (i = 0; i < PROF_NCTX_LOCKS; i++)
2212 			malloc_mutex_prefork(&gctx_locks[i]);
2213 		for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2214 			malloc_mutex_prefork(&tdata_locks[i]);
2215 	}
2216 }
2217 
2218 void
prof_postfork_parent(void)2219 prof_postfork_parent(void)
2220 {
2221 
2222 	if (opt_prof) {
2223 		unsigned i;
2224 
2225 		for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2226 			malloc_mutex_postfork_parent(&tdata_locks[i]);
2227 		for (i = 0; i < PROF_NCTX_LOCKS; i++)
2228 			malloc_mutex_postfork_parent(&gctx_locks[i]);
2229 		malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
2230 		malloc_mutex_postfork_parent(&next_thr_uid_mtx);
2231 		malloc_mutex_postfork_parent(&bt2gctx_mtx);
2232 		malloc_mutex_postfork_parent(&tdatas_mtx);
2233 	}
2234 }
2235 
2236 void
prof_postfork_child(void)2237 prof_postfork_child(void)
2238 {
2239 
2240 	if (opt_prof) {
2241 		unsigned i;
2242 
2243 		for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2244 			malloc_mutex_postfork_child(&tdata_locks[i]);
2245 		for (i = 0; i < PROF_NCTX_LOCKS; i++)
2246 			malloc_mutex_postfork_child(&gctx_locks[i]);
2247 		malloc_mutex_postfork_child(&prof_dump_seq_mtx);
2248 		malloc_mutex_postfork_child(&next_thr_uid_mtx);
2249 		malloc_mutex_postfork_child(&bt2gctx_mtx);
2250 		malloc_mutex_postfork_child(&tdatas_mtx);
2251 	}
2252 }
2253 
2254 /******************************************************************************/
2255