• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
2 #define JEMALLOC_INTERNAL_PROF_INLINES_A_H
3 
4 #include "jemalloc/internal/mutex.h"
5 
6 static inline bool
prof_accum_add(tsdn_t * tsdn,prof_accum_t * prof_accum,uint64_t accumbytes)7 prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) {
8 	cassert(config_prof);
9 
10 	bool overflow;
11 	uint64_t a0, a1;
12 
13 	/*
14 	 * If the application allocates fast enough (and/or if idump is slow
15 	 * enough), extreme overflow here (a1 >= prof_interval * 2) can cause
16 	 * idump trigger coalescing.  This is an intentional mechanism that
17 	 * avoids rate-limiting allocation.
18 	 */
19 #ifdef JEMALLOC_ATOMIC_U64
20 	a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
21 	do {
22 		a1 = a0 + accumbytes;
23 		assert(a1 >= a0);
24 		overflow = (a1 >= prof_interval);
25 		if (overflow) {
26 			a1 %= prof_interval;
27 		}
28 	} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
29 	    a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
30 #else
31 	malloc_mutex_lock(tsdn, &prof_accum->mtx);
32 	a0 = prof_accum->accumbytes;
33 	a1 = a0 + accumbytes;
34 	overflow = (a1 >= prof_interval);
35 	if (overflow) {
36 		a1 %= prof_interval;
37 	}
38 	prof_accum->accumbytes = a1;
39 	malloc_mutex_unlock(tsdn, &prof_accum->mtx);
40 #endif
41 	return overflow;
42 }
43 
44 static inline void
prof_accum_cancel(tsdn_t * tsdn,prof_accum_t * prof_accum,size_t usize)45 prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize) {
46 	cassert(config_prof);
47 
48 	/*
49 	 * Cancel out as much of the excessive prof_accumbytes increase as
50 	 * possible without underflowing.  Interval-triggered dumps occur
51 	 * slightly more often than intended as a result of incomplete
52 	 * canceling.
53 	 */
54 	uint64_t a0, a1;
55 #ifdef JEMALLOC_ATOMIC_U64
56 	a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
57 	do {
58 		a1 = (a0 >= LARGE_MINCLASS - usize) ?  a0 - (LARGE_MINCLASS -
59 		    usize) : 0;
60 	} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
61 	    a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
62 #else
63 	malloc_mutex_lock(tsdn, &prof_accum->mtx);
64 	a0 = prof_accum->accumbytes;
65 	a1 = (a0 >= LARGE_MINCLASS - usize) ?  a0 - (LARGE_MINCLASS - usize) :
66 	    0;
67 	prof_accum->accumbytes = a1;
68 	malloc_mutex_unlock(tsdn, &prof_accum->mtx);
69 #endif
70 }
71 
72 JEMALLOC_ALWAYS_INLINE bool
prof_active_get_unlocked(void)73 prof_active_get_unlocked(void) {
74 	/*
75 	 * Even if opt_prof is true, sampling can be temporarily disabled by
76 	 * setting prof_active to false.  No locking is used when reading
77 	 * prof_active in the fast path, so there are no guarantees regarding
78 	 * how long it will take for all threads to notice state changes.
79 	 */
80 	return prof_active;
81 }
82 
83 #endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
84