1 #define JEMALLOC_LARGE_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_mmap.h"
7 #include "jemalloc/internal/mutex.h"
8 #include "jemalloc/internal/rtree.h"
9 #include "jemalloc/internal/util.h"
10
11 /******************************************************************************/
12
13 void *
large_malloc(tsdn_t * tsdn,arena_t * arena,size_t usize,bool zero)14 large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) {
15 assert(usize == sz_s2u(usize));
16
17 return large_palloc(tsdn, arena, usize, CACHELINE, zero);
18 }
19
20 void *
large_palloc(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool zero)21 large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
22 bool zero) {
23 size_t ausize;
24 extent_t *extent;
25 bool is_zeroed;
26 UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
27
28 assert(!tsdn_null(tsdn) || arena != NULL);
29
30 ausize = sz_sa2u(usize, alignment);
31 if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) {
32 return NULL;
33 }
34
35 if (config_fill && unlikely(opt_zero)) {
36 zero = true;
37 }
38 /*
39 * Copy zero into is_zeroed and pass the copy when allocating the
40 * extent, so that it is possible to make correct junk/zero fill
41 * decisions below, even if is_zeroed ends up true when zero is false.
42 */
43 is_zeroed = zero;
44 if (likely(!tsdn_null(tsdn))) {
45 #if defined(__BIONIC__) && !defined(__LP64__) && !defined(JEMALLOC_JET) && !defined(JEMALLOC_INTEGRATION_TEST)
46 /* On 32 bit systems, using a per arena cache can exhaust
47 * virtual address space. Force all huge allocations to
48 * always take place in the first arena.
49 */
50 arena = arena_get(tsdn, 0, false);
51 #else
52 arena = arena_choose(tsdn_tsd(tsdn), arena);
53 #endif
54 }
55 if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
56 arena, usize, alignment, &is_zeroed)) == NULL) {
57 return NULL;
58 }
59
60 /* See comments in arena_bin_slabs_full_insert(). */
61 if (!arena_is_auto(arena)) {
62 /* Insert extent into large. */
63 malloc_mutex_lock(tsdn, &arena->large_mtx);
64 extent_list_append(&arena->large, extent);
65 malloc_mutex_unlock(tsdn, &arena->large_mtx);
66 }
67 if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
68 prof_idump(tsdn);
69 }
70
71 if (zero) {
72 assert(is_zeroed);
73 } else if (config_fill && unlikely(opt_junk_alloc)) {
74 memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
75 extent_usize_get(extent));
76 }
77
78 arena_decay_tick(tsdn, arena);
79 return extent_addr_get(extent);
80 }
81
82 static void
large_dalloc_junk_impl(void * ptr,size_t size)83 large_dalloc_junk_impl(void *ptr, size_t size) {
84 memset(ptr, JEMALLOC_FREE_JUNK, size);
85 }
86 large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl;
87
88 static void
large_dalloc_maybe_junk_impl(void * ptr,size_t size)89 large_dalloc_maybe_junk_impl(void *ptr, size_t size) {
90 if (config_fill && have_dss && unlikely(opt_junk_free)) {
91 /*
92 * Only bother junk filling if the extent isn't about to be
93 * unmapped.
94 */
95 if (opt_retain || (have_dss && extent_in_dss(ptr))) {
96 large_dalloc_junk(ptr, size);
97 }
98 }
99 }
100 large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk =
101 large_dalloc_maybe_junk_impl;
102
103 static bool
large_ralloc_no_move_shrink(tsdn_t * tsdn,extent_t * extent,size_t usize)104 large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
105 arena_t *arena = extent_arena_get(extent);
106 size_t oldusize = extent_usize_get(extent);
107 extent_hooks_t *extent_hooks = extent_hooks_get(arena);
108 size_t diff = extent_size_get(extent) - (usize + sz_large_pad);
109
110 assert(oldusize > usize);
111
112 if (extent_hooks->split == NULL) {
113 return true;
114 }
115
116 /* Split excess pages. */
117 if (diff != 0) {
118 extent_t *trail = extent_split_wrapper(tsdn, arena,
119 &extent_hooks, extent, usize + sz_large_pad,
120 sz_size2index(usize), false, diff, NSIZES, false);
121 if (trail == NULL) {
122 return true;
123 }
124
125 if (config_fill && unlikely(opt_junk_free)) {
126 large_dalloc_maybe_junk(extent_addr_get(trail),
127 extent_size_get(trail));
128 }
129
130 arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail);
131 }
132
133 arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
134
135 return false;
136 }
137
138 static bool
large_ralloc_no_move_expand(tsdn_t * tsdn,extent_t * extent,size_t usize,bool zero)139 large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
140 bool zero) {
141 arena_t *arena = extent_arena_get(extent);
142 size_t oldusize = extent_usize_get(extent);
143 extent_hooks_t *extent_hooks = extent_hooks_get(arena);
144 size_t trailsize = usize - oldusize;
145
146 if (extent_hooks->merge == NULL) {
147 return true;
148 }
149
150 if (config_fill && unlikely(opt_zero)) {
151 zero = true;
152 }
153 /*
154 * Copy zero into is_zeroed_trail and pass the copy when allocating the
155 * extent, so that it is possible to make correct junk/zero fill
156 * decisions below, even if is_zeroed_trail ends up true when zero is
157 * false.
158 */
159 bool is_zeroed_trail = zero;
160 bool commit = true;
161 extent_t *trail;
162 bool new_mapping;
163 if ((trail = extents_alloc(tsdn, arena, &extent_hooks,
164 &arena->extents_dirty, extent_past_get(extent), trailsize, 0,
165 CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL
166 || (trail = extents_alloc(tsdn, arena, &extent_hooks,
167 &arena->extents_muzzy, extent_past_get(extent), trailsize, 0,
168 CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL) {
169 if (config_stats) {
170 new_mapping = false;
171 }
172 } else {
173 if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
174 extent_past_get(extent), trailsize, 0, CACHELINE, false,
175 NSIZES, &is_zeroed_trail, &commit)) == NULL) {
176 return true;
177 }
178 if (config_stats) {
179 new_mapping = true;
180 }
181 }
182
183 if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) {
184 extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail);
185 return true;
186 }
187 rtree_ctx_t rtree_ctx_fallback;
188 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
189 szind_t szind = sz_size2index(usize);
190 extent_szind_set(extent, szind);
191 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
192 (uintptr_t)extent_addr_get(extent), szind, false);
193
194 if (config_stats && new_mapping) {
195 arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
196 }
197
198 if (zero) {
199 if (config_cache_oblivious) {
200 /*
201 * Zero the trailing bytes of the original allocation's
202 * last page, since they are in an indeterminate state.
203 * There will always be trailing bytes, because ptr's
204 * offset from the beginning of the extent is a multiple
205 * of CACHELINE in [0 .. PAGE).
206 */
207 void *zbase = (void *)
208 ((uintptr_t)extent_addr_get(extent) + oldusize);
209 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
210 PAGE));
211 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
212 assert(nzero > 0);
213 memset(zbase, 0, nzero);
214 }
215 assert(is_zeroed_trail);
216 } else if (config_fill && unlikely(opt_junk_alloc)) {
217 memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize),
218 JEMALLOC_ALLOC_JUNK, usize - oldusize);
219 }
220
221 arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
222
223 return false;
224 }
225
226 bool
large_ralloc_no_move(tsdn_t * tsdn,extent_t * extent,size_t usize_min,size_t usize_max,bool zero)227 large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
228 size_t usize_max, bool zero) {
229 size_t oldusize = extent_usize_get(extent);
230
231 /* The following should have been caught by callers. */
232 assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS);
233 /* Both allocation sizes must be large to avoid a move. */
234 assert(oldusize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS);
235
236 if (usize_max > oldusize) {
237 /* Attempt to expand the allocation in-place. */
238 if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
239 zero)) {
240 arena_decay_tick(tsdn, extent_arena_get(extent));
241 return false;
242 }
243 /* Try again, this time with usize_min. */
244 if (usize_min < usize_max && usize_min > oldusize &&
245 large_ralloc_no_move_expand(tsdn, extent, usize_min,
246 zero)) {
247 arena_decay_tick(tsdn, extent_arena_get(extent));
248 return false;
249 }
250 }
251
252 /*
253 * Avoid moving the allocation if the existing extent size accommodates
254 * the new size.
255 */
256 if (oldusize >= usize_min && oldusize <= usize_max) {
257 arena_decay_tick(tsdn, extent_arena_get(extent));
258 return false;
259 }
260
261 /* Attempt to shrink the allocation in-place. */
262 if (oldusize > usize_max) {
263 if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
264 arena_decay_tick(tsdn, extent_arena_get(extent));
265 return false;
266 }
267 }
268 return true;
269 }
270
271 static void *
large_ralloc_move_helper(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool zero)272 large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
273 size_t alignment, bool zero) {
274 if (alignment <= CACHELINE) {
275 return large_malloc(tsdn, arena, usize, zero);
276 }
277 return large_palloc(tsdn, arena, usize, alignment, zero);
278 }
279
280 void *
large_ralloc(tsdn_t * tsdn,arena_t * arena,extent_t * extent,size_t usize,size_t alignment,bool zero,tcache_t * tcache)281 large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
282 size_t alignment, bool zero, tcache_t *tcache) {
283 size_t oldusize = extent_usize_get(extent);
284
285 /* The following should have been caught by callers. */
286 assert(usize > 0 && usize <= LARGE_MAXCLASS);
287 /* Both allocation sizes must be large to avoid a move. */
288 assert(oldusize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS);
289
290 /* Try to avoid moving the allocation. */
291 if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
292 return extent_addr_get(extent);
293 }
294
295 /*
296 * usize and old size are different enough that we need to use a
297 * different size class. In that case, fall back to allocating new
298 * space and copying.
299 */
300 void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment,
301 zero);
302 if (ret == NULL) {
303 return NULL;
304 }
305
306 size_t copysize = (usize < oldusize) ? usize : oldusize;
307 memcpy(ret, extent_addr_get(extent), copysize);
308 isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true);
309 return ret;
310 }
311
312 /*
313 * junked_locked indicates whether the extent's data have been junk-filled, and
314 * whether the arena's large_mtx is currently held.
315 */
316 static void
large_dalloc_prep_impl(tsdn_t * tsdn,arena_t * arena,extent_t * extent,bool junked_locked)317 large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
318 bool junked_locked) {
319 if (!junked_locked) {
320 /* See comments in arena_bin_slabs_full_insert(). */
321 if (!arena_is_auto(arena)) {
322 malloc_mutex_lock(tsdn, &arena->large_mtx);
323 extent_list_remove(&arena->large, extent);
324 malloc_mutex_unlock(tsdn, &arena->large_mtx);
325 }
326 large_dalloc_maybe_junk(extent_addr_get(extent),
327 extent_usize_get(extent));
328 } else {
329 malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
330 if (!arena_is_auto(arena)) {
331 extent_list_remove(&arena->large, extent);
332 }
333 }
334 arena_extent_dalloc_large_prep(tsdn, arena, extent);
335 }
336
337 static void
large_dalloc_finish_impl(tsdn_t * tsdn,arena_t * arena,extent_t * extent)338 large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
339 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
340 arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent);
341 }
342
343 void
large_dalloc_prep_junked_locked(tsdn_t * tsdn,extent_t * extent)344 large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) {
345 large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true);
346 }
347
348 void
large_dalloc_finish(tsdn_t * tsdn,extent_t * extent)349 large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) {
350 large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent);
351 }
352
353 void
large_dalloc(tsdn_t * tsdn,extent_t * extent)354 large_dalloc(tsdn_t *tsdn, extent_t *extent) {
355 arena_t *arena = extent_arena_get(extent);
356 large_dalloc_prep_impl(tsdn, arena, extent, false);
357 large_dalloc_finish_impl(tsdn, arena, extent);
358 arena_decay_tick(tsdn, arena);
359 }
360
361 size_t
large_salloc(tsdn_t * tsdn,const extent_t * extent)362 large_salloc(tsdn_t *tsdn, const extent_t *extent) {
363 return extent_usize_get(extent);
364 }
365
366 prof_tctx_t *
large_prof_tctx_get(tsdn_t * tsdn,const extent_t * extent)367 large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) {
368 return extent_prof_tctx_get(extent);
369 }
370
371 void
large_prof_tctx_set(tsdn_t * tsdn,extent_t * extent,prof_tctx_t * tctx)372 large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) {
373 extent_prof_tctx_set(extent, tctx);
374 }
375
376 void
large_prof_tctx_reset(tsdn_t * tsdn,extent_t * extent)377 large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) {
378 large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);
379 }
380