• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
2 #define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
3 
4 #include "jemalloc/internal/jemalloc_internal_types.h"
5 #include "jemalloc/internal/mutex.h"
6 #include "jemalloc/internal/rtree.h"
7 #include "jemalloc/internal/size_classes.h"
8 #include "jemalloc/internal/sz.h"
9 #include "jemalloc/internal/ticker.h"
10 
11 JEMALLOC_ALWAYS_INLINE prof_tctx_t *
arena_prof_tctx_get(tsdn_t * tsdn,const void * ptr,alloc_ctx_t * alloc_ctx)12 arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
13 	cassert(config_prof);
14 	assert(ptr != NULL);
15 
16 	/* Static check. */
17 	if (alloc_ctx == NULL) {
18 		const extent_t *extent = iealloc(tsdn, ptr);
19 		if (unlikely(!extent_slab_get(extent))) {
20 			return large_prof_tctx_get(tsdn, extent);
21 		}
22 	} else {
23 		if (unlikely(!alloc_ctx->slab)) {
24 			return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr));
25 		}
26 	}
27 	return (prof_tctx_t *)(uintptr_t)1U;
28 }
29 
30 JEMALLOC_ALWAYS_INLINE void
arena_prof_tctx_set(tsdn_t * tsdn,const void * ptr,UNUSED size_t usize,alloc_ctx_t * alloc_ctx,prof_tctx_t * tctx)31 arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, UNUSED size_t usize,
32     alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
33 	cassert(config_prof);
34 	assert(ptr != NULL);
35 
36 	/* Static check. */
37 	if (alloc_ctx == NULL) {
38 		extent_t *extent = iealloc(tsdn, ptr);
39 		if (unlikely(!extent_slab_get(extent))) {
40 			large_prof_tctx_set(tsdn, extent, tctx);
41 		}
42 	} else {
43 		if (unlikely(!alloc_ctx->slab)) {
44 			large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx);
45 		}
46 	}
47 }
48 
49 static inline void
arena_prof_tctx_reset(tsdn_t * tsdn,const void * ptr,UNUSED prof_tctx_t * tctx)50 arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, UNUSED prof_tctx_t *tctx) {
51 	cassert(config_prof);
52 	assert(ptr != NULL);
53 
54 	extent_t *extent = iealloc(tsdn, ptr);
55 	assert(!extent_slab_get(extent));
56 
57 	large_prof_tctx_reset(tsdn, extent);
58 }
59 
60 JEMALLOC_ALWAYS_INLINE void
arena_decay_ticks(tsdn_t * tsdn,arena_t * arena,unsigned nticks)61 arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
62 	tsd_t *tsd;
63 	ticker_t *decay_ticker;
64 
65 	if (unlikely(tsdn_null(tsdn))) {
66 		return;
67 	}
68 	tsd = tsdn_tsd(tsdn);
69 	decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
70 	if (unlikely(decay_ticker == NULL)) {
71 		return;
72 	}
73 	if (unlikely(ticker_ticks(decay_ticker, nticks))) {
74 		arena_decay(tsdn, arena, false, false);
75 	}
76 }
77 
78 JEMALLOC_ALWAYS_INLINE void
arena_decay_tick(tsdn_t * tsdn,arena_t * arena)79 arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
80 	malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
81 	malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
82 
83 	arena_decay_ticks(tsdn, arena, 1);
84 }
85 
86 JEMALLOC_ALWAYS_INLINE void *
arena_malloc(tsdn_t * tsdn,arena_t * arena,size_t size,szind_t ind,bool zero,tcache_t * tcache,bool slow_path)87 arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
88     tcache_t *tcache, bool slow_path) {
89 	assert(!tsdn_null(tsdn) || tcache == NULL);
90 	assert(size != 0);
91 
92 	if (likely(tcache != NULL)) {
93 		if (likely(size <= SMALL_MAXCLASS)) {
94 			return tcache_alloc_small(tsdn_tsd(tsdn), arena,
95 			    tcache, size, ind, zero, slow_path);
96 		}
97 		if (likely(size <= tcache_maxclass)) {
98 			return tcache_alloc_large(tsdn_tsd(tsdn), arena,
99 			    tcache, size, ind, zero, slow_path);
100 		}
101 		/* (size > tcache_maxclass) case falls through. */
102 		assert(size > tcache_maxclass);
103 	}
104 
105 	return arena_malloc_hard(tsdn, arena, size, ind, zero);
106 }
107 
108 JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(tsdn_t * tsdn,const void * ptr)109 arena_aalloc(tsdn_t *tsdn, const void *ptr) {
110 	return extent_arena_get(iealloc(tsdn, ptr));
111 }
112 
113 JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(tsdn_t * tsdn,const void * ptr)114 arena_salloc(tsdn_t *tsdn, const void *ptr) {
115 	assert(ptr != NULL);
116 
117 	rtree_ctx_t rtree_ctx_fallback;
118 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
119 
120 	szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
121 	    (uintptr_t)ptr, true);
122 	assert(szind != NSIZES);
123 
124 	return sz_index2size(szind);
125 }
126 
127 JEMALLOC_ALWAYS_INLINE size_t
arena_vsalloc(tsdn_t * tsdn,const void * ptr)128 arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
129 	/*
130 	 * Return 0 if ptr is not within an extent managed by jemalloc.  This
131 	 * function has two extra costs relative to isalloc():
132 	 * - The rtree calls cannot claim to be dependent lookups, which induces
133 	 *   rtree lookup load dependencies.
134 	 * - The lookup may fail, so there is an extra branch to check for
135 	 *   failure.
136 	 */
137 
138 	rtree_ctx_t rtree_ctx_fallback;
139 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
140 
141 	extent_t *extent;
142 	szind_t szind;
143 	if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
144 	    (uintptr_t)ptr, false, &extent, &szind)) {
145 		return 0;
146 	}
147 
148 	if (extent == NULL) {
149 		return 0;
150 	}
151 	assert(extent_state_get(extent) == extent_state_active);
152 	/* Only slab members should be looked up via interior pointers. */
153 	assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
154 
155 	assert(szind != NSIZES);
156 
157 	return sz_index2size(szind);
158 }
159 
160 static inline void
arena_dalloc_no_tcache(tsdn_t * tsdn,void * ptr)161 arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
162 	assert(ptr != NULL);
163 
164 	rtree_ctx_t rtree_ctx_fallback;
165 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
166 
167 	szind_t szind;
168 	bool slab;
169 	rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
170 	    true, &szind, &slab);
171 
172 	if (config_debug) {
173 		extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
174 		    rtree_ctx, (uintptr_t)ptr, true);
175 		assert(szind == extent_szind_get(extent));
176 		assert(szind < NSIZES);
177 		assert(slab == extent_slab_get(extent));
178 	}
179 
180 	if (likely(slab)) {
181 		/* Small allocation. */
182 		arena_dalloc_small(tsdn, ptr);
183 	} else {
184 		extent_t *extent = iealloc(tsdn, ptr);
185 		large_dalloc(tsdn, extent);
186 	}
187 }
188 
189 JEMALLOC_ALWAYS_INLINE void
arena_dalloc(tsdn_t * tsdn,void * ptr,tcache_t * tcache,alloc_ctx_t * alloc_ctx,bool slow_path)190 arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
191     alloc_ctx_t *alloc_ctx, bool slow_path) {
192 	assert(!tsdn_null(tsdn) || tcache == NULL);
193 	assert(ptr != NULL);
194 
195 	if (unlikely(tcache == NULL)) {
196 		arena_dalloc_no_tcache(tsdn, ptr);
197 		return;
198 	}
199 
200 	szind_t szind;
201 	bool slab;
202 	rtree_ctx_t *rtree_ctx;
203 	if (alloc_ctx != NULL) {
204 		szind = alloc_ctx->szind;
205 		slab = alloc_ctx->slab;
206 		assert(szind != NSIZES);
207 	} else {
208 		rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
209 		rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
210 		    (uintptr_t)ptr, true, &szind, &slab);
211 	}
212 
213 	if (config_debug) {
214 		rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
215 		extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
216 		    rtree_ctx, (uintptr_t)ptr, true);
217 		assert(szind == extent_szind_get(extent));
218 		assert(szind < NSIZES);
219 		assert(slab == extent_slab_get(extent));
220 	}
221 
222 	if (likely(slab)) {
223 		/* Small allocation. */
224 		tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
225 		    slow_path);
226 	} else {
227 		if (szind < nhbins) {
228 			if (config_prof && unlikely(szind < NBINS)) {
229 				arena_dalloc_promoted(tsdn, ptr, tcache,
230 				    slow_path);
231 			} else {
232 				tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
233 				    szind, slow_path);
234 			}
235 		} else {
236 			extent_t *extent = iealloc(tsdn, ptr);
237 			large_dalloc(tsdn, extent);
238 		}
239 	}
240 }
241 
242 static inline void
arena_sdalloc_no_tcache(tsdn_t * tsdn,void * ptr,size_t size)243 arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
244 	assert(ptr != NULL);
245 	assert(size <= LARGE_MAXCLASS);
246 
247 	szind_t szind;
248 	bool slab;
249 	if (!config_prof || !opt_prof) {
250 		/*
251 		 * There is no risk of being confused by a promoted sampled
252 		 * object, so base szind and slab on the given size.
253 		 */
254 		szind = sz_size2index(size);
255 		slab = (szind < NBINS);
256 	}
257 
258 	if ((config_prof && opt_prof) || config_debug) {
259 		rtree_ctx_t rtree_ctx_fallback;
260 		rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
261 		    &rtree_ctx_fallback);
262 
263 		rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
264 		    (uintptr_t)ptr, true, &szind, &slab);
265 
266 		assert(szind == sz_size2index(size));
267 		assert((config_prof && opt_prof) || slab == (szind < NBINS));
268 
269 		if (config_debug) {
270 			extent_t *extent = rtree_extent_read(tsdn,
271 			    &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
272 			assert(szind == extent_szind_get(extent));
273 			assert(slab == extent_slab_get(extent));
274 		}
275 	}
276 
277 	if (likely(slab)) {
278 		/* Small allocation. */
279 		arena_dalloc_small(tsdn, ptr);
280 	} else {
281 		extent_t *extent = iealloc(tsdn, ptr);
282 		large_dalloc(tsdn, extent);
283 	}
284 }
285 
286 JEMALLOC_ALWAYS_INLINE void
arena_sdalloc(tsdn_t * tsdn,void * ptr,size_t size,tcache_t * tcache,alloc_ctx_t * alloc_ctx,bool slow_path)287 arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
288     alloc_ctx_t *alloc_ctx, bool slow_path) {
289 	assert(!tsdn_null(tsdn) || tcache == NULL);
290 	assert(ptr != NULL);
291 	assert(size <= LARGE_MAXCLASS);
292 
293 	if (unlikely(tcache == NULL)) {
294 		arena_sdalloc_no_tcache(tsdn, ptr, size);
295 		return;
296 	}
297 
298 	szind_t szind;
299 	bool slab;
300 	UNUSED alloc_ctx_t local_ctx;
301 	if (config_prof && opt_prof) {
302 		if (alloc_ctx == NULL) {
303 			/* Uncommon case and should be a static check. */
304 			rtree_ctx_t rtree_ctx_fallback;
305 			rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
306 			    &rtree_ctx_fallback);
307 			rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
308 			    (uintptr_t)ptr, true, &local_ctx.szind,
309 			    &local_ctx.slab);
310 			assert(local_ctx.szind == sz_size2index(size));
311 			alloc_ctx = &local_ctx;
312 		}
313 		slab = alloc_ctx->slab;
314 		szind = alloc_ctx->szind;
315 	} else {
316 		/*
317 		 * There is no risk of being confused by a promoted sampled
318 		 * object, so base szind and slab on the given size.
319 		 */
320 		szind = sz_size2index(size);
321 		slab = (szind < NBINS);
322 	}
323 
324 	if (config_debug) {
325 		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
326 		rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
327 		    (uintptr_t)ptr, true, &szind, &slab);
328 		extent_t *extent = rtree_extent_read(tsdn,
329 		    &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
330 		assert(szind == extent_szind_get(extent));
331 		assert(slab == extent_slab_get(extent));
332 	}
333 
334 	if (likely(slab)) {
335 		/* Small allocation. */
336 		tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
337 		    slow_path);
338 	} else {
339 		if (szind < nhbins) {
340 			if (config_prof && unlikely(szind < NBINS)) {
341 				arena_dalloc_promoted(tsdn, ptr, tcache,
342 				    slow_path);
343 			} else {
344 				tcache_dalloc_large(tsdn_tsd(tsdn),
345 				    tcache, ptr, szind, slow_path);
346 			}
347 		} else {
348 			extent_t *extent = iealloc(tsdn, ptr);
349 			large_dalloc(tsdn, extent);
350 		}
351 	}
352 }
353 
354 #endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
355