1 #ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
2 #define JEMALLOC_INTERNAL_PROF_INLINES_B_H
3
4 #include "jemalloc/internal/sz.h"
5
6 JEMALLOC_ALWAYS_INLINE bool
prof_gdump_get_unlocked(void)7 prof_gdump_get_unlocked(void) {
8 /*
9 * No locking is used when reading prof_gdump_val in the fast path, so
10 * there are no guarantees regarding how long it will take for all
11 * threads to notice state changes.
12 */
13 return prof_gdump_val;
14 }
15
16 JEMALLOC_ALWAYS_INLINE prof_tdata_t *
prof_tdata_get(tsd_t * tsd,bool create)17 prof_tdata_get(tsd_t *tsd, bool create) {
18 prof_tdata_t *tdata;
19
20 cassert(config_prof);
21
22 tdata = tsd_prof_tdata_get(tsd);
23 if (create) {
24 if (unlikely(tdata == NULL)) {
25 if (tsd_nominal(tsd)) {
26 tdata = prof_tdata_init(tsd);
27 tsd_prof_tdata_set(tsd, tdata);
28 }
29 } else if (unlikely(tdata->expired)) {
30 tdata = prof_tdata_reinit(tsd, tdata);
31 tsd_prof_tdata_set(tsd, tdata);
32 }
33 assert(tdata == NULL || tdata->attached);
34 }
35
36 return tdata;
37 }
38
39 JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_tctx_get(tsdn_t * tsdn,const void * ptr,alloc_ctx_t * alloc_ctx)40 prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
41 cassert(config_prof);
42 assert(ptr != NULL);
43
44 return arena_prof_tctx_get(tsdn, ptr, alloc_ctx);
45 }
46
47 JEMALLOC_ALWAYS_INLINE void
prof_tctx_set(tsdn_t * tsdn,const void * ptr,size_t usize,alloc_ctx_t * alloc_ctx,prof_tctx_t * tctx)48 prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
49 alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
50 cassert(config_prof);
51 assert(ptr != NULL);
52
53 arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx);
54 }
55
56 JEMALLOC_ALWAYS_INLINE void
prof_tctx_reset(tsdn_t * tsdn,const void * ptr,prof_tctx_t * tctx)57 prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
58 cassert(config_prof);
59 assert(ptr != NULL);
60
61 arena_prof_tctx_reset(tsdn, ptr, tctx);
62 }
63
64 JEMALLOC_ALWAYS_INLINE bool
prof_sample_accum_update(tsd_t * tsd,size_t usize,bool update,prof_tdata_t ** tdata_out)65 prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
66 prof_tdata_t **tdata_out) {
67 prof_tdata_t *tdata;
68
69 cassert(config_prof);
70
71 tdata = prof_tdata_get(tsd, true);
72 if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) {
73 tdata = NULL;
74 }
75
76 if (tdata_out != NULL) {
77 *tdata_out = tdata;
78 }
79
80 if (unlikely(tdata == NULL)) {
81 return true;
82 }
83
84 if (likely(tdata->bytes_until_sample >= usize)) {
85 if (update) {
86 tdata->bytes_until_sample -= usize;
87 }
88 return true;
89 } else {
90 if (tsd_reentrancy_level_get(tsd) > 0) {
91 return true;
92 }
93 /* Compute new sample threshold. */
94 if (update) {
95 prof_sample_threshold_update(tdata);
96 }
97 return !tdata->active;
98 }
99 }
100
101 JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_alloc_prep(tsd_t * tsd,size_t usize,bool prof_active,bool update)102 prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
103 prof_tctx_t *ret;
104 prof_tdata_t *tdata;
105 prof_bt_t bt;
106
107 assert(usize == sz_s2u(usize));
108
109 if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
110 &tdata))) {
111 ret = (prof_tctx_t *)(uintptr_t)1U;
112 } else {
113 bt_init(&bt, tdata->vec);
114 prof_backtrace(&bt);
115 ret = prof_lookup(tsd, &bt);
116 }
117
118 return ret;
119 }
120
121 JEMALLOC_ALWAYS_INLINE void
prof_malloc(tsdn_t * tsdn,const void * ptr,size_t usize,alloc_ctx_t * alloc_ctx,prof_tctx_t * tctx)122 prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx,
123 prof_tctx_t *tctx) {
124 cassert(config_prof);
125 assert(ptr != NULL);
126 assert(usize == isalloc(tsdn, ptr));
127
128 if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
129 prof_malloc_sample_object(tsdn, ptr, usize, tctx);
130 } else {
131 prof_tctx_set(tsdn, ptr, usize, alloc_ctx,
132 (prof_tctx_t *)(uintptr_t)1U);
133 }
134 }
135
136 JEMALLOC_ALWAYS_INLINE void
prof_realloc(tsd_t * tsd,const void * ptr,size_t usize,prof_tctx_t * tctx,bool prof_active,bool updated,const void * old_ptr,size_t old_usize,prof_tctx_t * old_tctx)137 prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
138 bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
139 prof_tctx_t *old_tctx) {
140 bool sampled, old_sampled, moved;
141
142 cassert(config_prof);
143 assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
144
145 if (prof_active && !updated && ptr != NULL) {
146 assert(usize == isalloc(tsd_tsdn(tsd), ptr));
147 if (prof_sample_accum_update(tsd, usize, true, NULL)) {
148 /*
149 * Don't sample. The usize passed to prof_alloc_prep()
150 * was larger than what actually got allocated, so a
151 * backtrace was captured for this allocation, even
152 * though its actual usize was insufficient to cross the
153 * sample threshold.
154 */
155 prof_alloc_rollback(tsd, tctx, true);
156 tctx = (prof_tctx_t *)(uintptr_t)1U;
157 }
158 }
159
160 sampled = ((uintptr_t)tctx > (uintptr_t)1U);
161 old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
162 moved = (ptr != old_ptr);
163
164 if (unlikely(sampled)) {
165 prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
166 } else if (moved) {
167 prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL,
168 (prof_tctx_t *)(uintptr_t)1U);
169 } else if (unlikely(old_sampled)) {
170 /*
171 * prof_tctx_set() would work for the !moved case as well, but
172 * prof_tctx_reset() is slightly cheaper, and the proper thing
173 * to do here in the presence of explicit knowledge re: moved
174 * state.
175 */
176 prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx);
177 } else {
178 assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) ==
179 (uintptr_t)1U);
180 }
181
182 /*
183 * The prof_free_sampled_object() call must come after the
184 * prof_malloc_sample_object() call, because tctx and old_tctx may be
185 * the same, in which case reversing the call order could cause the tctx
186 * to be prematurely destroyed as a side effect of momentarily zeroed
187 * counters.
188 */
189 if (unlikely(old_sampled)) {
190 prof_free_sampled_object(tsd, old_usize, old_tctx);
191 }
192 }
193
194 JEMALLOC_ALWAYS_INLINE void
prof_free(tsd_t * tsd,const void * ptr,size_t usize,alloc_ctx_t * alloc_ctx)195 prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) {
196 prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
197
198 cassert(config_prof);
199 assert(usize == isalloc(tsd_tsdn(tsd), ptr));
200
201 if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
202 prof_free_sampled_object(tsd, usize, tctx);
203 }
204 }
205
206 #endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */
207