1 #define JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/div.h"
7 #include "jemalloc/internal/extent_dss.h"
8 #include "jemalloc/internal/extent_mmap.h"
9 #include "jemalloc/internal/mutex.h"
10 #include "jemalloc/internal/rtree.h"
11 #include "jemalloc/internal/size_classes.h"
12 #include "jemalloc/internal/util.h"
13
14 /******************************************************************************/
15 /* Data. */
16
17 /*
18 * Define names for both unininitialized and initialized phases, so that
19 * options and mallctl processing are straightforward.
20 */
21 const char *percpu_arena_mode_names[] = {
22 "percpu",
23 "phycpu",
24 "disabled",
25 "percpu",
26 "phycpu"
27 };
28 percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT;
29
30 ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
31 ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
32
33 static atomic_zd_t dirty_decay_ms_default;
34 static atomic_zd_t muzzy_decay_ms_default;
35
36 const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
37 #define STEP(step, h, x, y) \
38 h,
39 SMOOTHSTEP
40 #undef STEP
41 };
42
43 static div_info_t arena_binind_div_info[NBINS];
44
45 /******************************************************************************/
46 /*
47 * Function prototypes for static functions that are referenced prior to
48 * definition.
49 */
50
51 static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
52 arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
53 size_t npages_decay_max, bool is_background_thread);
54 static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
55 bool is_background_thread, bool all);
56 static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
57 bin_t *bin);
58 static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
59 bin_t *bin);
60
61 /******************************************************************************/
62
63 void
arena_basic_stats_merge(UNUSED tsdn_t * tsdn,arena_t * arena,unsigned * nthreads,const char ** dss,ssize_t * dirty_decay_ms,ssize_t * muzzy_decay_ms,size_t * nactive,size_t * ndirty,size_t * nmuzzy)64 arena_basic_stats_merge(UNUSED tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
65 const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
66 size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
67 *nthreads += arena_nthreads_get(arena, false);
68 *dss = dss_prec_names[arena_dss_prec_get(arena)];
69 *dirty_decay_ms = arena_dirty_decay_ms_get(arena);
70 *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
71 *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
72 *ndirty += extents_npages_get(&arena->extents_dirty);
73 *nmuzzy += extents_npages_get(&arena->extents_muzzy);
74 }
75
76 void
arena_stats_merge(tsdn_t * tsdn,arena_t * arena,unsigned * nthreads,const char ** dss,ssize_t * dirty_decay_ms,ssize_t * muzzy_decay_ms,size_t * nactive,size_t * ndirty,size_t * nmuzzy,arena_stats_t * astats,bin_stats_t * bstats,arena_stats_large_t * lstats)77 arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
78 const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
79 size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
80 bin_stats_t *bstats, arena_stats_large_t *lstats) {
81 cassert(config_stats);
82
83 arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
84 muzzy_decay_ms, nactive, ndirty, nmuzzy);
85
86 size_t base_allocated, base_resident, base_mapped, metadata_thp;
87 base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
88 &base_mapped, &metadata_thp);
89
90 arena_stats_lock(tsdn, &arena->stats);
91
92 arena_stats_accum_zu(&astats->mapped, base_mapped
93 + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
94 arena_stats_accum_zu(&astats->retained,
95 extents_npages_get(&arena->extents_retained) << LG_PAGE);
96
97 arena_stats_accum_u64(&astats->decay_dirty.npurge,
98 arena_stats_read_u64(tsdn, &arena->stats,
99 &arena->stats.decay_dirty.npurge));
100 arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
101 arena_stats_read_u64(tsdn, &arena->stats,
102 &arena->stats.decay_dirty.nmadvise));
103 arena_stats_accum_u64(&astats->decay_dirty.purged,
104 arena_stats_read_u64(tsdn, &arena->stats,
105 &arena->stats.decay_dirty.purged));
106
107 arena_stats_accum_u64(&astats->decay_muzzy.npurge,
108 arena_stats_read_u64(tsdn, &arena->stats,
109 &arena->stats.decay_muzzy.npurge));
110 arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
111 arena_stats_read_u64(tsdn, &arena->stats,
112 &arena->stats.decay_muzzy.nmadvise));
113 arena_stats_accum_u64(&astats->decay_muzzy.purged,
114 arena_stats_read_u64(tsdn, &arena->stats,
115 &arena->stats.decay_muzzy.purged));
116
117 arena_stats_accum_zu(&astats->base, base_allocated);
118 arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
119 arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
120 arena_stats_accum_zu(&astats->resident, base_resident +
121 (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
122 extents_npages_get(&arena->extents_dirty) +
123 extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
124
125 for (szind_t i = 0; i < NSIZES - NBINS; i++) {
126 uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
127 &arena->stats.lstats[i].nmalloc);
128 arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
129 arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
130
131 uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
132 &arena->stats.lstats[i].ndalloc);
133 arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
134 arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
135
136 uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
137 &arena->stats.lstats[i].nrequests);
138 arena_stats_accum_u64(&lstats[i].nrequests,
139 nmalloc + nrequests);
140 arena_stats_accum_u64(&astats->nrequests_large,
141 nmalloc + nrequests);
142
143 assert(nmalloc >= ndalloc);
144 assert(nmalloc - ndalloc <= SIZE_T_MAX);
145 size_t curlextents = (size_t)(nmalloc - ndalloc);
146 lstats[i].curlextents += curlextents;
147 arena_stats_accum_zu(&astats->allocated_large,
148 curlextents * sz_index2size(NBINS + i));
149 }
150
151 arena_stats_unlock(tsdn, &arena->stats);
152
153 /* tcache_bytes counts currently cached bytes. */
154 atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
155 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
156 cache_bin_array_descriptor_t *descriptor;
157 ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
158 szind_t i = 0;
159 for (; i < NBINS; i++) {
160 cache_bin_t *tbin = &descriptor->bins_small[i];
161 arena_stats_accum_zu(&astats->tcache_bytes,
162 tbin->ncached * sz_index2size(i));
163 }
164 for (; i < nhbins; i++) {
165 cache_bin_t *tbin = &descriptor->bins_large[i];
166 arena_stats_accum_zu(&astats->tcache_bytes,
167 tbin->ncached * sz_index2size(i));
168 }
169 }
170 malloc_mutex_prof_read(tsdn,
171 &astats->mutex_prof_data[arena_prof_mutex_tcache_list],
172 &arena->tcache_ql_mtx);
173 malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
174
175 #define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \
176 malloc_mutex_lock(tsdn, &arena->mtx); \
177 malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \
178 &arena->mtx); \
179 malloc_mutex_unlock(tsdn, &arena->mtx);
180
181 /* Gather per arena mutex profiling data. */
182 READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
183 READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
184 arena_prof_mutex_extent_avail)
185 READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
186 arena_prof_mutex_extents_dirty)
187 READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
188 arena_prof_mutex_extents_muzzy)
189 READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
190 arena_prof_mutex_extents_retained)
191 READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
192 arena_prof_mutex_decay_dirty)
193 READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
194 arena_prof_mutex_decay_muzzy)
195 READ_ARENA_MUTEX_PROF_DATA(base->mtx,
196 arena_prof_mutex_base)
197 #undef READ_ARENA_MUTEX_PROF_DATA
198
199 nstime_copy(&astats->uptime, &arena->create_time);
200 nstime_update(&astats->uptime);
201 nstime_subtract(&astats->uptime, &arena->create_time);
202
203 for (szind_t i = 0; i < NBINS; i++) {
204 bin_stats_merge(tsdn, &bstats[i], &arena->bins[i]);
205 }
206 }
207
208 void
arena_extents_dirty_dalloc(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent)209 arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
210 extent_hooks_t **r_extent_hooks, extent_t *extent) {
211 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
212 WITNESS_RANK_CORE, 0);
213
214 extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
215 extent);
216 if (arena_dirty_decay_ms_get(arena) == 0) {
217 arena_decay_dirty(tsdn, arena, false, true);
218 } else {
219 arena_background_thread_inactivity_check(tsdn, arena, false);
220 }
221 }
222
223 static void *
arena_slab_reg_alloc(extent_t * slab,const bin_info_t * bin_info)224 arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
225 void *ret;
226 arena_slab_data_t *slab_data = extent_slab_data_get(slab);
227 size_t regind;
228
229 assert(extent_nfree_get(slab) > 0);
230 assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
231
232 regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
233 ret = (void *)((uintptr_t)extent_addr_get(slab) +
234 (uintptr_t)(bin_info->reg_size * regind));
235 extent_nfree_dec(slab);
236 return ret;
237 }
238
239 #ifndef JEMALLOC_JET
240 static
241 #endif
242 size_t
arena_slab_regind(extent_t * slab,szind_t binind,const void * ptr)243 arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
244 size_t diff, regind;
245
246 /* Freeing a pointer outside the slab can cause assertion failure. */
247 assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
248 assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
249 /* Freeing an interior pointer can cause assertion failure. */
250 assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
251 (uintptr_t)bin_infos[binind].reg_size == 0);
252
253 diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
254
255 /* Avoid doing division with a variable divisor. */
256 regind = div_compute(&arena_binind_div_info[binind], diff);
257
258 assert(regind < bin_infos[binind].nregs);
259
260 return regind;
261 }
262
263 static void
arena_slab_reg_dalloc(extent_t * slab,arena_slab_data_t * slab_data,void * ptr)264 arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) {
265 szind_t binind = extent_szind_get(slab);
266 const bin_info_t *bin_info = &bin_infos[binind];
267 size_t regind = arena_slab_regind(slab, binind, ptr);
268
269 assert(extent_nfree_get(slab) < bin_info->nregs);
270 /* Freeing an unallocated pointer can cause assertion failure. */
271 assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
272
273 bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
274 extent_nfree_inc(slab);
275 }
276
277 static void
arena_nactive_add(arena_t * arena,size_t add_pages)278 arena_nactive_add(arena_t *arena, size_t add_pages) {
279 atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
280 }
281
282 static void
arena_nactive_sub(arena_t * arena,size_t sub_pages)283 arena_nactive_sub(arena_t *arena, size_t sub_pages) {
284 assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
285 atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
286 }
287
288 static void
arena_large_malloc_stats_update(tsdn_t * tsdn,arena_t * arena,size_t usize)289 arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
290 szind_t index, hindex;
291
292 cassert(config_stats);
293
294 if (usize < LARGE_MINCLASS) {
295 usize = LARGE_MINCLASS;
296 }
297 index = sz_size2index(usize);
298 hindex = (index >= NBINS) ? index - NBINS : 0;
299
300 arena_stats_add_u64(tsdn, &arena->stats,
301 &arena->stats.lstats[hindex].nmalloc, 1);
302 }
303
304 static void
arena_large_dalloc_stats_update(tsdn_t * tsdn,arena_t * arena,size_t usize)305 arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
306 szind_t index, hindex;
307
308 cassert(config_stats);
309
310 if (usize < LARGE_MINCLASS) {
311 usize = LARGE_MINCLASS;
312 }
313 index = sz_size2index(usize);
314 hindex = (index >= NBINS) ? index - NBINS : 0;
315
316 arena_stats_add_u64(tsdn, &arena->stats,
317 &arena->stats.lstats[hindex].ndalloc, 1);
318 }
319
320 static void
arena_large_ralloc_stats_update(tsdn_t * tsdn,arena_t * arena,size_t oldusize,size_t usize)321 arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
322 size_t usize) {
323 arena_large_dalloc_stats_update(tsdn, arena, oldusize);
324 arena_large_malloc_stats_update(tsdn, arena, usize);
325 }
326
327 extent_t *
arena_extent_alloc_large(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool * zero)328 arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
329 size_t alignment, bool *zero) {
330 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
331
332 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
333 WITNESS_RANK_CORE, 0);
334
335 szind_t szind = sz_size2index(usize);
336 size_t mapped_add;
337 bool commit = true;
338 extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
339 &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
340 szind, zero, &commit);
341 if (extent == NULL) {
342 extent = extents_alloc(tsdn, arena, &extent_hooks,
343 &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
344 false, szind, zero, &commit);
345 }
346 size_t size = usize + sz_large_pad;
347 if (extent == NULL) {
348 extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
349 usize, sz_large_pad, alignment, false, szind, zero,
350 &commit);
351 if (config_stats) {
352 /*
353 * extent may be NULL on OOM, but in that case
354 * mapped_add isn't used below, so there's no need to
355 * conditionlly set it to 0 here.
356 */
357 mapped_add = size;
358 }
359 } else if (config_stats) {
360 mapped_add = 0;
361 }
362
363 if (extent != NULL) {
364 if (config_stats) {
365 arena_stats_lock(tsdn, &arena->stats);
366 arena_large_malloc_stats_update(tsdn, arena, usize);
367 if (mapped_add != 0) {
368 arena_stats_add_zu(tsdn, &arena->stats,
369 &arena->stats.mapped, mapped_add);
370 }
371 arena_stats_unlock(tsdn, &arena->stats);
372 }
373 arena_nactive_add(arena, size >> LG_PAGE);
374 }
375
376 return extent;
377 }
378
379 void
arena_extent_dalloc_large_prep(tsdn_t * tsdn,arena_t * arena,extent_t * extent)380 arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
381 if (config_stats) {
382 arena_stats_lock(tsdn, &arena->stats);
383 arena_large_dalloc_stats_update(tsdn, arena,
384 extent_usize_get(extent));
385 arena_stats_unlock(tsdn, &arena->stats);
386 }
387 arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
388 }
389
390 void
arena_extent_ralloc_large_shrink(tsdn_t * tsdn,arena_t * arena,extent_t * extent,size_t oldusize)391 arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
392 size_t oldusize) {
393 size_t usize = extent_usize_get(extent);
394 size_t udiff = oldusize - usize;
395
396 if (config_stats) {
397 arena_stats_lock(tsdn, &arena->stats);
398 arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
399 arena_stats_unlock(tsdn, &arena->stats);
400 }
401 arena_nactive_sub(arena, udiff >> LG_PAGE);
402 }
403
404 void
arena_extent_ralloc_large_expand(tsdn_t * tsdn,arena_t * arena,extent_t * extent,size_t oldusize)405 arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
406 size_t oldusize) {
407 size_t usize = extent_usize_get(extent);
408 size_t udiff = usize - oldusize;
409
410 if (config_stats) {
411 arena_stats_lock(tsdn, &arena->stats);
412 arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
413 arena_stats_unlock(tsdn, &arena->stats);
414 }
415 arena_nactive_add(arena, udiff >> LG_PAGE);
416 }
417
418 static ssize_t
arena_decay_ms_read(arena_decay_t * decay)419 arena_decay_ms_read(arena_decay_t *decay) {
420 return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
421 }
422
423 static void
arena_decay_ms_write(arena_decay_t * decay,ssize_t decay_ms)424 arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
425 atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
426 }
427
428 static void
arena_decay_deadline_init(arena_decay_t * decay)429 arena_decay_deadline_init(arena_decay_t *decay) {
430 /*
431 * Generate a new deadline that is uniformly random within the next
432 * epoch after the current one.
433 */
434 nstime_copy(&decay->deadline, &decay->epoch);
435 nstime_add(&decay->deadline, &decay->interval);
436 if (arena_decay_ms_read(decay) > 0) {
437 nstime_t jitter;
438
439 nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
440 nstime_ns(&decay->interval)));
441 nstime_add(&decay->deadline, &jitter);
442 }
443 }
444
445 static bool
arena_decay_deadline_reached(const arena_decay_t * decay,const nstime_t * time)446 arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
447 return (nstime_compare(&decay->deadline, time) <= 0);
448 }
449
450 static size_t
arena_decay_backlog_npages_limit(const arena_decay_t * decay)451 arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
452 uint64_t sum;
453 size_t npages_limit_backlog;
454 unsigned i;
455
456 /*
457 * For each element of decay_backlog, multiply by the corresponding
458 * fixed-point smoothstep decay factor. Sum the products, then divide
459 * to round down to the nearest whole number of pages.
460 */
461 sum = 0;
462 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
463 sum += decay->backlog[i] * h_steps[i];
464 }
465 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
466
467 return npages_limit_backlog;
468 }
469
470 static void
arena_decay_backlog_update_last(arena_decay_t * decay,size_t current_npages)471 arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
472 size_t npages_delta = (current_npages > decay->nunpurged) ?
473 current_npages - decay->nunpurged : 0;
474 decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
475
476 if (config_debug) {
477 if (current_npages > decay->ceil_npages) {
478 decay->ceil_npages = current_npages;
479 }
480 size_t npages_limit = arena_decay_backlog_npages_limit(decay);
481 assert(decay->ceil_npages >= npages_limit);
482 if (decay->ceil_npages > npages_limit) {
483 decay->ceil_npages = npages_limit;
484 }
485 }
486 }
487
488 static void
arena_decay_backlog_update(arena_decay_t * decay,uint64_t nadvance_u64,size_t current_npages)489 arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
490 size_t current_npages) {
491 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
492 memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
493 sizeof(size_t));
494 } else {
495 size_t nadvance_z = (size_t)nadvance_u64;
496
497 assert((uint64_t)nadvance_z == nadvance_u64);
498
499 memmove(decay->backlog, &decay->backlog[nadvance_z],
500 (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
501 if (nadvance_z > 1) {
502 memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
503 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
504 }
505 }
506
507 arena_decay_backlog_update_last(decay, current_npages);
508 }
509
510 static void
arena_decay_try_purge(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,size_t current_npages,size_t npages_limit,bool is_background_thread)511 arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
512 extents_t *extents, size_t current_npages, size_t npages_limit,
513 bool is_background_thread) {
514 if (current_npages > npages_limit) {
515 arena_decay_to_limit(tsdn, arena, decay, extents, false,
516 npages_limit, current_npages - npages_limit,
517 is_background_thread);
518 }
519 }
520
521 static void
arena_decay_epoch_advance_helper(arena_decay_t * decay,const nstime_t * time,size_t current_npages)522 arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
523 size_t current_npages) {
524 assert(arena_decay_deadline_reached(decay, time));
525
526 nstime_t delta;
527 nstime_copy(&delta, time);
528 nstime_subtract(&delta, &decay->epoch);
529
530 uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
531 assert(nadvance_u64 > 0);
532
533 /* Add nadvance_u64 decay intervals to epoch. */
534 nstime_copy(&delta, &decay->interval);
535 nstime_imultiply(&delta, nadvance_u64);
536 nstime_add(&decay->epoch, &delta);
537
538 /* Set a new deadline. */
539 arena_decay_deadline_init(decay);
540
541 /* Update the backlog. */
542 arena_decay_backlog_update(decay, nadvance_u64, current_npages);
543 }
544
545 static void
arena_decay_epoch_advance(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,const nstime_t * time,bool is_background_thread)546 arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
547 extents_t *extents, const nstime_t *time, bool is_background_thread) {
548 size_t current_npages = extents_npages_get(extents);
549 arena_decay_epoch_advance_helper(decay, time, current_npages);
550
551 size_t npages_limit = arena_decay_backlog_npages_limit(decay);
552 /* We may unlock decay->mtx when try_purge(). Finish logging first. */
553 decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
554 current_npages;
555
556 if (!background_thread_enabled() || is_background_thread) {
557 arena_decay_try_purge(tsdn, arena, decay, extents,
558 current_npages, npages_limit, is_background_thread);
559 }
560 }
561
562 static void
arena_decay_reinit(arena_decay_t * decay,ssize_t decay_ms)563 arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) {
564 arena_decay_ms_write(decay, decay_ms);
565 if (decay_ms > 0) {
566 nstime_init(&decay->interval, (uint64_t)decay_ms *
567 KQU(1000000));
568 nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
569 }
570
571 nstime_init(&decay->epoch, 0);
572 nstime_update(&decay->epoch);
573 decay->jitter_state = (uint64_t)(uintptr_t)decay;
574 arena_decay_deadline_init(decay);
575 decay->nunpurged = 0;
576 memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
577 }
578
579 static bool
arena_decay_init(arena_decay_t * decay,ssize_t decay_ms,arena_stats_decay_t * stats)580 arena_decay_init(arena_decay_t *decay, ssize_t decay_ms,
581 arena_stats_decay_t *stats) {
582 if (config_debug) {
583 for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
584 assert(((char *)decay)[i] == 0);
585 }
586 decay->ceil_npages = 0;
587 }
588 if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
589 malloc_mutex_rank_exclusive)) {
590 return true;
591 }
592 decay->purging = false;
593 arena_decay_reinit(decay, decay_ms);
594 /* Memory is zeroed, so there is no need to clear stats. */
595 if (config_stats) {
596 decay->stats = stats;
597 }
598 return false;
599 }
600
601 static bool
arena_decay_ms_valid(ssize_t decay_ms)602 arena_decay_ms_valid(ssize_t decay_ms) {
603 if (decay_ms < -1) {
604 return false;
605 }
606 if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
607 KQU(1000)) {
608 return true;
609 }
610 return false;
611 }
612
613 static bool
arena_maybe_decay(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,bool is_background_thread)614 arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
615 extents_t *extents, bool is_background_thread) {
616 malloc_mutex_assert_owner(tsdn, &decay->mtx);
617
618 /* Purge all or nothing if the option is disabled. */
619 ssize_t decay_ms = arena_decay_ms_read(decay);
620 if (decay_ms <= 0) {
621 if (decay_ms == 0) {
622 arena_decay_to_limit(tsdn, arena, decay, extents, false,
623 0, extents_npages_get(extents),
624 is_background_thread);
625 }
626 return false;
627 }
628
629 nstime_t time;
630 nstime_init(&time, 0);
631 nstime_update(&time);
632 if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)
633 > 0)) {
634 /*
635 * Time went backwards. Move the epoch back in time and
636 * generate a new deadline, with the expectation that time
637 * typically flows forward for long enough periods of time that
638 * epochs complete. Unfortunately, this strategy is susceptible
639 * to clock jitter triggering premature epoch advances, but
640 * clock jitter estimation and compensation isn't feasible here
641 * because calls into this code are event-driven.
642 */
643 nstime_copy(&decay->epoch, &time);
644 arena_decay_deadline_init(decay);
645 } else {
646 /* Verify that time does not go backwards. */
647 assert(nstime_compare(&decay->epoch, &time) <= 0);
648 }
649
650 /*
651 * If the deadline has been reached, advance to the current epoch and
652 * purge to the new limit if necessary. Note that dirty pages created
653 * during the current epoch are not subject to purge until a future
654 * epoch, so as a result purging only happens during epoch advances, or
655 * being triggered by background threads (scheduled event).
656 */
657 bool advance_epoch = arena_decay_deadline_reached(decay, &time);
658 if (advance_epoch) {
659 arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
660 is_background_thread);
661 } else if (is_background_thread) {
662 arena_decay_try_purge(tsdn, arena, decay, extents,
663 extents_npages_get(extents),
664 arena_decay_backlog_npages_limit(decay),
665 is_background_thread);
666 }
667
668 return advance_epoch;
669 }
670
671 static ssize_t
arena_decay_ms_get(arena_decay_t * decay)672 arena_decay_ms_get(arena_decay_t *decay) {
673 return arena_decay_ms_read(decay);
674 }
675
676 ssize_t
arena_dirty_decay_ms_get(arena_t * arena)677 arena_dirty_decay_ms_get(arena_t *arena) {
678 return arena_decay_ms_get(&arena->decay_dirty);
679 }
680
681 ssize_t
arena_muzzy_decay_ms_get(arena_t * arena)682 arena_muzzy_decay_ms_get(arena_t *arena) {
683 return arena_decay_ms_get(&arena->decay_muzzy);
684 }
685
686 static bool
arena_decay_ms_set(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,ssize_t decay_ms)687 arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
688 extents_t *extents, ssize_t decay_ms) {
689 if (!arena_decay_ms_valid(decay_ms)) {
690 return true;
691 }
692
693 malloc_mutex_lock(tsdn, &decay->mtx);
694 /*
695 * Restart decay backlog from scratch, which may cause many dirty pages
696 * to be immediately purged. It would conceptually be possible to map
697 * the old backlog onto the new backlog, but there is no justification
698 * for such complexity since decay_ms changes are intended to be
699 * infrequent, either between the {-1, 0, >0} states, or a one-time
700 * arbitrary change during initial arena configuration.
701 */
702 arena_decay_reinit(decay, decay_ms);
703 arena_maybe_decay(tsdn, arena, decay, extents, false);
704 malloc_mutex_unlock(tsdn, &decay->mtx);
705
706 return false;
707 }
708
709 bool
arena_dirty_decay_ms_set(tsdn_t * tsdn,arena_t * arena,ssize_t decay_ms)710 arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
711 ssize_t decay_ms) {
712 return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
713 &arena->extents_dirty, decay_ms);
714 }
715
716 bool
arena_muzzy_decay_ms_set(tsdn_t * tsdn,arena_t * arena,ssize_t decay_ms)717 arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
718 ssize_t decay_ms) {
719 return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
720 &arena->extents_muzzy, decay_ms);
721 }
722
723 static size_t
arena_stash_decayed(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,size_t npages_limit,size_t npages_decay_max,extent_list_t * decay_extents)724 arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
725 extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
726 size_t npages_decay_max, extent_list_t *decay_extents) {
727 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
728 WITNESS_RANK_CORE, 0);
729
730 /* Stash extents according to npages_limit. */
731 size_t nstashed = 0;
732 extent_t *extent;
733 while (nstashed < npages_decay_max &&
734 (extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
735 npages_limit)) != NULL) {
736 extent_list_append(decay_extents, extent);
737 nstashed += extent_size_get(extent) >> LG_PAGE;
738 }
739 return nstashed;
740 }
741
742 static size_t
arena_decay_stashed(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,arena_decay_t * decay,extents_t * extents,bool all,extent_list_t * decay_extents,bool is_background_thread)743 arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
744 extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
745 bool all, extent_list_t *decay_extents, bool is_background_thread) {
746 UNUSED size_t nmadvise, nunmapped;
747 size_t npurged;
748
749 if (config_stats) {
750 nmadvise = 0;
751 nunmapped = 0;
752 }
753 npurged = 0;
754
755 ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
756 for (extent_t *extent = extent_list_first(decay_extents); extent !=
757 NULL; extent = extent_list_first(decay_extents)) {
758 if (config_stats) {
759 nmadvise++;
760 }
761 size_t npages = extent_size_get(extent) >> LG_PAGE;
762 npurged += npages;
763 extent_list_remove(decay_extents, extent);
764 switch (extents_state_get(extents)) {
765 case extent_state_active:
766 not_reached();
767 case extent_state_dirty:
768 if (!all && muzzy_decay_ms != 0 &&
769 !extent_purge_lazy_wrapper(tsdn, arena,
770 r_extent_hooks, extent, 0,
771 extent_size_get(extent))) {
772 extents_dalloc(tsdn, arena, r_extent_hooks,
773 &arena->extents_muzzy, extent);
774 arena_background_thread_inactivity_check(tsdn,
775 arena, is_background_thread);
776 break;
777 }
778 /* Fall through. */
779 case extent_state_muzzy:
780 extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
781 extent);
782 if (config_stats) {
783 nunmapped += npages;
784 }
785 break;
786 case extent_state_retained:
787 default:
788 not_reached();
789 }
790 }
791
792 if (config_stats) {
793 arena_stats_lock(tsdn, &arena->stats);
794 arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
795 1);
796 arena_stats_add_u64(tsdn, &arena->stats,
797 &decay->stats->nmadvise, nmadvise);
798 arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
799 npurged);
800 arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
801 nunmapped << LG_PAGE);
802 arena_stats_unlock(tsdn, &arena->stats);
803 }
804
805 return npurged;
806 }
807
808 /*
809 * npages_limit: Decay at most npages_decay_max pages without violating the
810 * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper
811 * bound on number of pages in order to prevent unbounded growth (namely in
812 * stashed), otherwise unbounded new pages could be added to extents during the
813 * current decay run, so that the purging thread never finishes.
814 */
815 static void
arena_decay_to_limit(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,bool all,size_t npages_limit,size_t npages_decay_max,bool is_background_thread)816 arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
817 extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max,
818 bool is_background_thread) {
819 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
820 WITNESS_RANK_CORE, 1);
821 malloc_mutex_assert_owner(tsdn, &decay->mtx);
822
823 if (decay->purging) {
824 return;
825 }
826 decay->purging = true;
827 malloc_mutex_unlock(tsdn, &decay->mtx);
828
829 extent_hooks_t *extent_hooks = extent_hooks_get(arena);
830
831 extent_list_t decay_extents;
832 extent_list_init(&decay_extents);
833
834 size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
835 npages_limit, npages_decay_max, &decay_extents);
836 if (npurge != 0) {
837 UNUSED size_t npurged = arena_decay_stashed(tsdn, arena,
838 &extent_hooks, decay, extents, all, &decay_extents,
839 is_background_thread);
840 assert(npurged == npurge);
841 }
842
843 malloc_mutex_lock(tsdn, &decay->mtx);
844 decay->purging = false;
845 }
846
847 static bool
arena_decay_impl(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,bool is_background_thread,bool all)848 arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
849 extents_t *extents, bool is_background_thread, bool all) {
850 if (all) {
851 malloc_mutex_lock(tsdn, &decay->mtx);
852 arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
853 extents_npages_get(extents), is_background_thread);
854 malloc_mutex_unlock(tsdn, &decay->mtx);
855
856 return false;
857 }
858
859 if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
860 /* No need to wait if another thread is in progress. */
861 return true;
862 }
863
864 bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
865 is_background_thread);
866 UNUSED size_t npages_new;
867 if (epoch_advanced) {
868 /* Backlog is updated on epoch advance. */
869 npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
870 }
871 malloc_mutex_unlock(tsdn, &decay->mtx);
872
873 if (have_background_thread && background_thread_enabled() &&
874 epoch_advanced && !is_background_thread) {
875 background_thread_interval_check(tsdn, arena, decay,
876 npages_new);
877 }
878
879 return false;
880 }
881
882 static bool
arena_decay_dirty(tsdn_t * tsdn,arena_t * arena,bool is_background_thread,bool all)883 arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
884 bool all) {
885 return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
886 &arena->extents_dirty, is_background_thread, all);
887 }
888
889 static bool
arena_decay_muzzy(tsdn_t * tsdn,arena_t * arena,bool is_background_thread,bool all)890 arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
891 bool all) {
892 return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
893 &arena->extents_muzzy, is_background_thread, all);
894 }
895
896 void
arena_decay(tsdn_t * tsdn,arena_t * arena,bool is_background_thread,bool all)897 arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
898 if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
899 return;
900 }
901 arena_decay_muzzy(tsdn, arena, is_background_thread, all);
902 }
903
904 static void
arena_slab_dalloc(tsdn_t * tsdn,arena_t * arena,extent_t * slab)905 arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
906 arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
907
908 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
909 arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
910 }
911
912 static void
arena_bin_slabs_nonfull_insert(bin_t * bin,extent_t * slab)913 arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
914 assert(extent_nfree_get(slab) > 0);
915 extent_heap_insert(&bin->slabs_nonfull, slab);
916 }
917
918 static void
arena_bin_slabs_nonfull_remove(bin_t * bin,extent_t * slab)919 arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
920 extent_heap_remove(&bin->slabs_nonfull, slab);
921 }
922
923 static extent_t *
arena_bin_slabs_nonfull_tryget(bin_t * bin)924 arena_bin_slabs_nonfull_tryget(bin_t *bin) {
925 extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
926 if (slab == NULL) {
927 return NULL;
928 }
929 if (config_stats) {
930 bin->stats.reslabs++;
931 }
932 return slab;
933 }
934
935 static void
arena_bin_slabs_full_insert(arena_t * arena,bin_t * bin,extent_t * slab)936 arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
937 assert(extent_nfree_get(slab) == 0);
938 /*
939 * Tracking extents is required by arena_reset, which is not allowed
940 * for auto arenas. Bypass this step to avoid touching the extent
941 * linkage (often results in cache misses) for auto arenas.
942 */
943 if (arena_is_auto(arena)) {
944 return;
945 }
946 extent_list_append(&bin->slabs_full, slab);
947 }
948
949 static void
arena_bin_slabs_full_remove(arena_t * arena,bin_t * bin,extent_t * slab)950 arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
951 if (arena_is_auto(arena)) {
952 return;
953 }
954 extent_list_remove(&bin->slabs_full, slab);
955 }
956
957 void
arena_reset(tsd_t * tsd,arena_t * arena)958 arena_reset(tsd_t *tsd, arena_t *arena) {
959 /*
960 * Locking in this function is unintuitive. The caller guarantees that
961 * no concurrent operations are happening in this arena, but there are
962 * still reasons that some locking is necessary:
963 *
964 * - Some of the functions in the transitive closure of calls assume
965 * appropriate locks are held, and in some cases these locks are
966 * temporarily dropped to avoid lock order reversal or deadlock due to
967 * reentry.
968 * - mallctl("epoch", ...) may concurrently refresh stats. While
969 * strictly speaking this is a "concurrent operation", disallowing
970 * stats refreshes would impose an inconvenient burden.
971 */
972
973 /* Large allocations. */
974 malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
975
976 for (extent_t *extent = extent_list_first(&arena->large); extent !=
977 NULL; extent = extent_list_first(&arena->large)) {
978 void *ptr = extent_base_get(extent);
979 size_t usize;
980
981 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
982 alloc_ctx_t alloc_ctx;
983 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
984 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
985 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
986 assert(alloc_ctx.szind != NSIZES);
987
988 if (config_stats || (config_prof && opt_prof)) {
989 usize = sz_index2size(alloc_ctx.szind);
990 assert(usize == isalloc(tsd_tsdn(tsd), ptr));
991 }
992 /* Remove large allocation from prof sample set. */
993 if (config_prof && opt_prof) {
994 prof_free(tsd, ptr, usize, &alloc_ctx);
995 }
996 large_dalloc(tsd_tsdn(tsd), extent);
997 malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
998 }
999 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
1000
1001 /* Bins. */
1002 for (unsigned i = 0; i < NBINS; i++) {
1003 extent_t *slab;
1004 bin_t *bin = &arena->bins[i];
1005 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1006 if (bin->slabcur != NULL) {
1007 slab = bin->slabcur;
1008 bin->slabcur = NULL;
1009 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1010 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1011 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1012 }
1013 while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) !=
1014 NULL) {
1015 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1016 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1017 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1018 }
1019 for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
1020 slab = extent_list_first(&bin->slabs_full)) {
1021 arena_bin_slabs_full_remove(arena, bin, slab);
1022 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1023 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1024 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1025 }
1026 if (config_stats) {
1027 bin->stats.curregs = 0;
1028 bin->stats.curslabs = 0;
1029 }
1030 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1031 }
1032
1033 atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
1034 }
1035
1036 static void
arena_destroy_retained(tsdn_t * tsdn,arena_t * arena)1037 arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
1038 /*
1039 * Iterate over the retained extents and destroy them. This gives the
1040 * extent allocator underlying the extent hooks an opportunity to unmap
1041 * all retained memory without having to keep its own metadata
1042 * structures. In practice, virtual memory for dss-allocated extents is
1043 * leaked here, so best practice is to avoid dss for arenas to be
1044 * destroyed, or provide custom extent hooks that track retained
1045 * dss-based extents for later reuse.
1046 */
1047 extent_hooks_t *extent_hooks = extent_hooks_get(arena);
1048 extent_t *extent;
1049 while ((extent = extents_evict(tsdn, arena, &extent_hooks,
1050 &arena->extents_retained, 0)) != NULL) {
1051 extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent);
1052 }
1053 }
1054
1055 void
arena_destroy(tsd_t * tsd,arena_t * arena)1056 arena_destroy(tsd_t *tsd, arena_t *arena) {
1057 assert(base_ind_get(arena->base) >= narenas_auto);
1058 assert(arena_nthreads_get(arena, false) == 0);
1059 assert(arena_nthreads_get(arena, true) == 0);
1060
1061 /*
1062 * No allocations have occurred since arena_reset() was called.
1063 * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
1064 * extents, so only retained extents may remain.
1065 */
1066 assert(extents_npages_get(&arena->extents_dirty) == 0);
1067 assert(extents_npages_get(&arena->extents_muzzy) == 0);
1068
1069 /* Deallocate retained memory. */
1070 arena_destroy_retained(tsd_tsdn(tsd), arena);
1071
1072 /*
1073 * Remove the arena pointer from the arenas array. We rely on the fact
1074 * that there is no way for the application to get a dirty read from the
1075 * arenas array unless there is an inherent race in the application
1076 * involving access of an arena being concurrently destroyed. The
1077 * application must synchronize knowledge of the arena's validity, so as
1078 * long as we use an atomic write to update the arenas array, the
1079 * application will get a clean read any time after it synchronizes
1080 * knowledge that the arena is no longer valid.
1081 */
1082 arena_set(base_ind_get(arena->base), NULL);
1083
1084 /*
1085 * Destroy the base allocator, which manages all metadata ever mapped by
1086 * this arena.
1087 */
1088 base_delete(tsd_tsdn(tsd), arena->base);
1089 }
1090
1091 static extent_t *
arena_slab_alloc_hard(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,const bin_info_t * bin_info,szind_t szind)1092 arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
1093 extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info,
1094 szind_t szind) {
1095 extent_t *slab;
1096 bool zero, commit;
1097
1098 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1099 WITNESS_RANK_CORE, 0);
1100
1101 zero = false;
1102 commit = true;
1103 slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
1104 bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
1105
1106 if (config_stats && slab != NULL) {
1107 arena_stats_mapped_add(tsdn, &arena->stats,
1108 bin_info->slab_size);
1109 }
1110
1111 return slab;
1112 }
1113
1114 static extent_t *
arena_slab_alloc(tsdn_t * tsdn,arena_t * arena,szind_t binind,const bin_info_t * bin_info)1115 arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
1116 const bin_info_t *bin_info) {
1117 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1118 WITNESS_RANK_CORE, 0);
1119
1120 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1121 szind_t szind = sz_size2index(bin_info->reg_size);
1122 bool zero = false;
1123 bool commit = true;
1124 extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
1125 &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
1126 binind, &zero, &commit);
1127 if (slab == NULL) {
1128 slab = extents_alloc(tsdn, arena, &extent_hooks,
1129 &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
1130 true, binind, &zero, &commit);
1131 }
1132 if (slab == NULL) {
1133 slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
1134 bin_info, szind);
1135 if (slab == NULL) {
1136 return NULL;
1137 }
1138 }
1139 assert(extent_slab_get(slab));
1140
1141 /* Initialize slab internals. */
1142 arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1143 extent_nfree_set(slab, bin_info->nregs);
1144 bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
1145
1146 arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
1147
1148 return slab;
1149 }
1150
1151 static extent_t *
arena_bin_nonfull_slab_get(tsdn_t * tsdn,arena_t * arena,bin_t * bin,szind_t binind)1152 arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1153 szind_t binind) {
1154 extent_t *slab;
1155 const bin_info_t *bin_info;
1156
1157 /* Look for a usable slab. */
1158 slab = arena_bin_slabs_nonfull_tryget(bin);
1159 if (slab != NULL) {
1160 return slab;
1161 }
1162 /* No existing slabs have any space available. */
1163
1164 bin_info = &bin_infos[binind];
1165
1166 /* Allocate a new slab. */
1167 malloc_mutex_unlock(tsdn, &bin->lock);
1168 /******************************/
1169 slab = arena_slab_alloc(tsdn, arena, binind, bin_info);
1170 /********************************/
1171 malloc_mutex_lock(tsdn, &bin->lock);
1172 if (slab != NULL) {
1173 if (config_stats) {
1174 bin->stats.nslabs++;
1175 bin->stats.curslabs++;
1176 }
1177 return slab;
1178 }
1179
1180 /*
1181 * arena_slab_alloc() failed, but another thread may have made
1182 * sufficient memory available while this one dropped bin->lock above,
1183 * so search one more time.
1184 */
1185 slab = arena_bin_slabs_nonfull_tryget(bin);
1186 if (slab != NULL) {
1187 return slab;
1188 }
1189
1190 return NULL;
1191 }
1192
1193 /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
1194 static void *
arena_bin_malloc_hard(tsdn_t * tsdn,arena_t * arena,bin_t * bin,szind_t binind)1195 arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1196 szind_t binind) {
1197 const bin_info_t *bin_info;
1198 extent_t *slab;
1199
1200 bin_info = &bin_infos[binind];
1201 if (!arena_is_auto(arena) && bin->slabcur != NULL) {
1202 arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1203 bin->slabcur = NULL;
1204 }
1205 slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind);
1206 if (bin->slabcur != NULL) {
1207 /*
1208 * Another thread updated slabcur while this one ran without the
1209 * bin lock in arena_bin_nonfull_slab_get().
1210 */
1211 if (extent_nfree_get(bin->slabcur) > 0) {
1212 void *ret = arena_slab_reg_alloc(bin->slabcur,
1213 bin_info);
1214 if (slab != NULL) {
1215 /*
1216 * arena_slab_alloc() may have allocated slab,
1217 * or it may have been pulled from
1218 * slabs_nonfull. Therefore it is unsafe to
1219 * make any assumptions about how slab has
1220 * previously been used, and
1221 * arena_bin_lower_slab() must be called, as if
1222 * a region were just deallocated from the slab.
1223 */
1224 if (extent_nfree_get(slab) == bin_info->nregs) {
1225 arena_dalloc_bin_slab(tsdn, arena, slab,
1226 bin);
1227 } else {
1228 arena_bin_lower_slab(tsdn, arena, slab,
1229 bin);
1230 }
1231 }
1232 return ret;
1233 }
1234
1235 arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1236 bin->slabcur = NULL;
1237 }
1238
1239 if (slab == NULL) {
1240 return NULL;
1241 }
1242 bin->slabcur = slab;
1243
1244 assert(extent_nfree_get(bin->slabcur) > 0);
1245
1246 return arena_slab_reg_alloc(slab, bin_info);
1247 }
1248
1249 void
arena_tcache_fill_small(tsdn_t * tsdn,arena_t * arena,tcache_t * tcache,cache_bin_t * tbin,szind_t binind,uint64_t prof_accumbytes)1250 arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
1251 cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
1252 unsigned i, nfill;
1253 bin_t *bin;
1254
1255 assert(tbin->ncached == 0);
1256
1257 if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
1258 prof_idump(tsdn);
1259 }
1260 bin = &arena->bins[binind];
1261 malloc_mutex_lock(tsdn, &bin->lock);
1262 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1263 tcache->lg_fill_div[binind]); i < nfill; i++) {
1264 extent_t *slab;
1265 void *ptr;
1266 if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
1267 0) {
1268 ptr = arena_slab_reg_alloc(slab, &bin_infos[binind]);
1269 } else {
1270 ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind);
1271 }
1272 if (ptr == NULL) {
1273 /*
1274 * OOM. tbin->avail isn't yet filled down to its first
1275 * element, so the successful allocations (if any) must
1276 * be moved just before tbin->avail before bailing out.
1277 */
1278 if (i > 0) {
1279 memmove(tbin->avail - i, tbin->avail - nfill,
1280 i * sizeof(void *));
1281 }
1282 break;
1283 }
1284 if (config_fill && unlikely(opt_junk_alloc)) {
1285 arena_alloc_junk_small(ptr, &bin_infos[binind], true);
1286 }
1287 /* Insert such that low regions get used first. */
1288 *(tbin->avail - nfill + i) = ptr;
1289 }
1290 if (config_stats) {
1291 bin->stats.nmalloc += i;
1292 #if defined(ANDROID_ENABLE_TCACHE_STATS)
1293 bin->stats.nrequests += tbin->tstats.nrequests;
1294 #endif
1295 bin->stats.curregs += i;
1296 bin->stats.nfills++;
1297 #if defined(ANDROID_ENABLE_TCACHE_STATS)
1298 tbin->tstats.nrequests = 0;
1299 #endif
1300 }
1301 malloc_mutex_unlock(tsdn, &bin->lock);
1302 tbin->ncached = i;
1303 arena_decay_tick(tsdn, arena);
1304 }
1305
1306 void
arena_alloc_junk_small(void * ptr,const bin_info_t * bin_info,bool zero)1307 arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) {
1308 if (!zero) {
1309 memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
1310 }
1311 }
1312
1313 static void
arena_dalloc_junk_small_impl(void * ptr,const bin_info_t * bin_info)1314 arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
1315 memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
1316 }
1317 arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
1318 arena_dalloc_junk_small_impl;
1319
1320 static void *
arena_malloc_small(tsdn_t * tsdn,arena_t * arena,szind_t binind,bool zero)1321 arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
1322 void *ret;
1323 bin_t *bin;
1324 size_t usize;
1325 extent_t *slab;
1326
1327 assert(binind < NBINS);
1328 bin = &arena->bins[binind];
1329 usize = sz_index2size(binind);
1330
1331 malloc_mutex_lock(tsdn, &bin->lock);
1332 if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
1333 ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
1334 } else {
1335 ret = arena_bin_malloc_hard(tsdn, arena, bin, binind);
1336 }
1337
1338 if (ret == NULL) {
1339 malloc_mutex_unlock(tsdn, &bin->lock);
1340 return NULL;
1341 }
1342
1343 if (config_stats) {
1344 bin->stats.nmalloc++;
1345 bin->stats.nrequests++;
1346 bin->stats.curregs++;
1347 }
1348 malloc_mutex_unlock(tsdn, &bin->lock);
1349 if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
1350 prof_idump(tsdn);
1351 }
1352
1353 if (!zero) {
1354 if (config_fill) {
1355 if (unlikely(opt_junk_alloc)) {
1356 arena_alloc_junk_small(ret,
1357 &bin_infos[binind], false);
1358 } else if (unlikely(opt_zero)) {
1359 memset(ret, 0, usize);
1360 }
1361 }
1362 } else {
1363 if (config_fill && unlikely(opt_junk_alloc)) {
1364 arena_alloc_junk_small(ret, &bin_infos[binind],
1365 true);
1366 }
1367 memset(ret, 0, usize);
1368 }
1369
1370 arena_decay_tick(tsdn, arena);
1371 return ret;
1372 }
1373
1374 void *
arena_malloc_hard(tsdn_t * tsdn,arena_t * arena,size_t size,szind_t ind,bool zero)1375 arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
1376 bool zero) {
1377 assert(!tsdn_null(tsdn) || arena != NULL);
1378
1379 if (likely(!tsdn_null(tsdn))) {
1380 arena = arena_choose(tsdn_tsd(tsdn), arena);
1381 }
1382 if (unlikely(arena == NULL)) {
1383 return NULL;
1384 }
1385
1386 if (likely(size <= SMALL_MAXCLASS)) {
1387 return arena_malloc_small(tsdn, arena, ind, zero);
1388 }
1389 return large_malloc(tsdn, arena, sz_index2size(ind), zero);
1390 }
1391
1392 void *
arena_palloc(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool zero,tcache_t * tcache)1393 arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
1394 bool zero, tcache_t *tcache) {
1395 void *ret;
1396
1397 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
1398 && (usize & PAGE_MASK) == 0))) {
1399 /* Small; alignment doesn't require special slab placement. */
1400 ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1401 zero, tcache, true);
1402 } else {
1403 if (likely(alignment <= CACHELINE)) {
1404 ret = large_malloc(tsdn, arena, usize, zero);
1405 } else {
1406 ret = large_palloc(tsdn, arena, usize, alignment, zero);
1407 }
1408 }
1409 return ret;
1410 }
1411
1412 void
arena_prof_promote(tsdn_t * tsdn,const void * ptr,size_t usize)1413 arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) {
1414 cassert(config_prof);
1415 assert(ptr != NULL);
1416 assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
1417 assert(usize <= SMALL_MAXCLASS);
1418
1419 rtree_ctx_t rtree_ctx_fallback;
1420 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1421
1422 extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1423 (uintptr_t)ptr, true);
1424 arena_t *arena = extent_arena_get(extent);
1425
1426 szind_t szind = sz_size2index(usize);
1427 extent_szind_set(extent, szind);
1428 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1429 szind, false);
1430
1431 prof_accum_cancel(tsdn, &arena->prof_accum, usize);
1432
1433 assert(isalloc(tsdn, ptr) == usize);
1434 }
1435
1436 static size_t
arena_prof_demote(tsdn_t * tsdn,extent_t * extent,const void * ptr)1437 arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
1438 cassert(config_prof);
1439 assert(ptr != NULL);
1440
1441 extent_szind_set(extent, NBINS);
1442 rtree_ctx_t rtree_ctx_fallback;
1443 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1444 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1445 NBINS, false);
1446
1447 assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
1448
1449 return LARGE_MINCLASS;
1450 }
1451
1452 void
arena_dalloc_promoted(tsdn_t * tsdn,void * ptr,tcache_t * tcache,bool slow_path)1453 arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
1454 bool slow_path) {
1455 cassert(config_prof);
1456 assert(opt_prof);
1457
1458 extent_t *extent = iealloc(tsdn, ptr);
1459 size_t usize = arena_prof_demote(tsdn, extent, ptr);
1460 if (usize <= tcache_maxclass) {
1461 tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
1462 sz_size2index(usize), slow_path);
1463 } else {
1464 large_dalloc(tsdn, extent);
1465 }
1466 }
1467
1468 static void
arena_dissociate_bin_slab(arena_t * arena,extent_t * slab,bin_t * bin)1469 arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
1470 /* Dissociate slab from bin. */
1471 if (slab == bin->slabcur) {
1472 bin->slabcur = NULL;
1473 } else {
1474 szind_t binind = extent_szind_get(slab);
1475 const bin_info_t *bin_info = &bin_infos[binind];
1476
1477 /*
1478 * The following block's conditional is necessary because if the
1479 * slab only contains one region, then it never gets inserted
1480 * into the non-full slabs heap.
1481 */
1482 if (bin_info->nregs == 1) {
1483 arena_bin_slabs_full_remove(arena, bin, slab);
1484 } else {
1485 arena_bin_slabs_nonfull_remove(bin, slab);
1486 }
1487 }
1488 }
1489
1490 static void
arena_dalloc_bin_slab(tsdn_t * tsdn,arena_t * arena,extent_t * slab,bin_t * bin)1491 arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1492 bin_t *bin) {
1493 assert(slab != bin->slabcur);
1494
1495 malloc_mutex_unlock(tsdn, &bin->lock);
1496 /******************************/
1497 arena_slab_dalloc(tsdn, arena, slab);
1498 /****************************/
1499 malloc_mutex_lock(tsdn, &bin->lock);
1500 if (config_stats) {
1501 bin->stats.curslabs--;
1502 }
1503 }
1504
1505 static void
arena_bin_lower_slab(UNUSED tsdn_t * tsdn,arena_t * arena,extent_t * slab,bin_t * bin)1506 arena_bin_lower_slab(UNUSED tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1507 bin_t *bin) {
1508 assert(extent_nfree_get(slab) > 0);
1509
1510 /*
1511 * Make sure that if bin->slabcur is non-NULL, it refers to the
1512 * oldest/lowest non-full slab. It is okay to NULL slabcur out rather
1513 * than proactively keeping it pointing at the oldest/lowest non-full
1514 * slab.
1515 */
1516 if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
1517 /* Switch slabcur. */
1518 if (extent_nfree_get(bin->slabcur) > 0) {
1519 arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
1520 } else {
1521 arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1522 }
1523 bin->slabcur = slab;
1524 if (config_stats) {
1525 bin->stats.reslabs++;
1526 }
1527 } else {
1528 arena_bin_slabs_nonfull_insert(bin, slab);
1529 }
1530 }
1531
1532 static void
arena_dalloc_bin_locked_impl(tsdn_t * tsdn,arena_t * arena,extent_t * slab,void * ptr,bool junked)1533 arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1534 void *ptr, bool junked) {
1535 arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1536 szind_t binind = extent_szind_get(slab);
1537 bin_t *bin = &arena->bins[binind];
1538 const bin_info_t *bin_info = &bin_infos[binind];
1539
1540 if (!junked && config_fill && unlikely(opt_junk_free)) {
1541 arena_dalloc_junk_small(ptr, bin_info);
1542 }
1543
1544 arena_slab_reg_dalloc(slab, slab_data, ptr);
1545 unsigned nfree = extent_nfree_get(slab);
1546 if (nfree == bin_info->nregs) {
1547 arena_dissociate_bin_slab(arena, slab, bin);
1548 arena_dalloc_bin_slab(tsdn, arena, slab, bin);
1549 } else if (nfree == 1 && slab != bin->slabcur) {
1550 arena_bin_slabs_full_remove(arena, bin, slab);
1551 arena_bin_lower_slab(tsdn, arena, slab, bin);
1552 }
1553
1554 if (config_stats) {
1555 bin->stats.ndalloc++;
1556 bin->stats.curregs--;
1557 }
1558 }
1559
1560 void
arena_dalloc_bin_junked_locked(tsdn_t * tsdn,arena_t * arena,extent_t * extent,void * ptr)1561 arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
1562 void *ptr) {
1563 arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true);
1564 }
1565
1566 static void
arena_dalloc_bin(tsdn_t * tsdn,arena_t * arena,extent_t * extent,void * ptr)1567 arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
1568 szind_t binind = extent_szind_get(extent);
1569 bin_t *bin = &arena->bins[binind];
1570
1571 malloc_mutex_lock(tsdn, &bin->lock);
1572 arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false);
1573 malloc_mutex_unlock(tsdn, &bin->lock);
1574 }
1575
1576 void
arena_dalloc_small(tsdn_t * tsdn,void * ptr)1577 arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
1578 extent_t *extent = iealloc(tsdn, ptr);
1579 arena_t *arena = extent_arena_get(extent);
1580
1581 arena_dalloc_bin(tsdn, arena, extent, ptr);
1582 arena_decay_tick(tsdn, arena);
1583 }
1584
1585 bool
arena_ralloc_no_move(tsdn_t * tsdn,void * ptr,size_t oldsize,size_t size,size_t extra,bool zero)1586 arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
1587 size_t extra, bool zero) {
1588 /* Calls with non-zero extra had to clamp extra. */
1589 assert(extra == 0 || size + extra <= LARGE_MAXCLASS);
1590
1591 if (unlikely(size > LARGE_MAXCLASS)) {
1592 return true;
1593 }
1594
1595 extent_t *extent = iealloc(tsdn, ptr);
1596 size_t usize_min = sz_s2u(size);
1597 size_t usize_max = sz_s2u(size + extra);
1598 if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) {
1599 /*
1600 * Avoid moving the allocation if the size class can be left the
1601 * same.
1602 */
1603 assert(bin_infos[sz_size2index(oldsize)].reg_size ==
1604 oldsize);
1605 if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) !=
1606 sz_size2index(oldsize)) && (size > oldsize || usize_max <
1607 oldsize)) {
1608 return true;
1609 }
1610
1611 arena_decay_tick(tsdn, extent_arena_get(extent));
1612 return false;
1613 } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) {
1614 return large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
1615 zero);
1616 }
1617
1618 return true;
1619 }
1620
1621 static void *
arena_ralloc_move_helper(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool zero,tcache_t * tcache)1622 arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
1623 size_t alignment, bool zero, tcache_t *tcache) {
1624 if (alignment == 0) {
1625 return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1626 zero, tcache, true);
1627 }
1628 usize = sz_sa2u(usize, alignment);
1629 if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
1630 return NULL;
1631 }
1632 return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
1633 }
1634
1635 void *
arena_ralloc(tsdn_t * tsdn,arena_t * arena,void * ptr,size_t oldsize,size_t size,size_t alignment,bool zero,tcache_t * tcache)1636 arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
1637 size_t size, size_t alignment, bool zero, tcache_t *tcache) {
1638 size_t usize = sz_s2u(size);
1639 if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) {
1640 return NULL;
1641 }
1642
1643 if (likely(usize <= SMALL_MAXCLASS)) {
1644 /* Try to avoid moving the allocation. */
1645 if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) {
1646 return ptr;
1647 }
1648 }
1649
1650 if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) {
1651 return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize,
1652 alignment, zero, tcache);
1653 }
1654
1655 /*
1656 * size and oldsize are different enough that we need to move the
1657 * object. In that case, fall back to allocating new space and copying.
1658 */
1659 void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
1660 zero, tcache);
1661 if (ret == NULL) {
1662 return NULL;
1663 }
1664
1665 /*
1666 * Junk/zero-filling were already done by
1667 * ipalloc()/arena_malloc().
1668 */
1669
1670 size_t copysize = (usize < oldsize) ? usize : oldsize;
1671 memcpy(ret, ptr, copysize);
1672 isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
1673 return ret;
1674 }
1675
1676 dss_prec_t
arena_dss_prec_get(arena_t * arena)1677 arena_dss_prec_get(arena_t *arena) {
1678 return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
1679 }
1680
1681 bool
arena_dss_prec_set(arena_t * arena,dss_prec_t dss_prec)1682 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
1683 if (!have_dss) {
1684 return (dss_prec != dss_prec_disabled);
1685 }
1686 atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
1687 return false;
1688 }
1689
1690 ssize_t
arena_dirty_decay_ms_default_get(void)1691 arena_dirty_decay_ms_default_get(void) {
1692 return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED);
1693 }
1694
1695 bool
arena_dirty_decay_ms_default_set(ssize_t decay_ms)1696 arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
1697 if (!arena_decay_ms_valid(decay_ms)) {
1698 return true;
1699 }
1700 atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1701 return false;
1702 }
1703
1704 ssize_t
arena_muzzy_decay_ms_default_get(void)1705 arena_muzzy_decay_ms_default_get(void) {
1706 return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED);
1707 }
1708
1709 bool
arena_muzzy_decay_ms_default_set(ssize_t decay_ms)1710 arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
1711 if (!arena_decay_ms_valid(decay_ms)) {
1712 return true;
1713 }
1714 atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1715 return false;
1716 }
1717
1718 bool
arena_retain_grow_limit_get_set(tsd_t * tsd,arena_t * arena,size_t * old_limit,size_t * new_limit)1719 arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
1720 size_t *new_limit) {
1721 assert(opt_retain);
1722
1723 pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
1724 if (new_limit != NULL) {
1725 size_t limit = *new_limit;
1726 /* Grow no more than the new limit. */
1727 if ((new_ind = sz_psz2ind(limit + 1) - 1) >
1728 EXTENT_GROW_MAX_PIND) {
1729 return true;
1730 }
1731 }
1732
1733 malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
1734 if (old_limit != NULL) {
1735 *old_limit = sz_pind2sz(arena->retain_grow_limit);
1736 }
1737 if (new_limit != NULL) {
1738 arena->retain_grow_limit = new_ind;
1739 }
1740 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
1741
1742 return false;
1743 }
1744
1745 unsigned
arena_nthreads_get(arena_t * arena,bool internal)1746 arena_nthreads_get(arena_t *arena, bool internal) {
1747 return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
1748 }
1749
1750 void
arena_nthreads_inc(arena_t * arena,bool internal)1751 arena_nthreads_inc(arena_t *arena, bool internal) {
1752 atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1753 }
1754
1755 void
arena_nthreads_dec(arena_t * arena,bool internal)1756 arena_nthreads_dec(arena_t *arena, bool internal) {
1757 atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1758 }
1759
1760 size_t
arena_extent_sn_next(arena_t * arena)1761 arena_extent_sn_next(arena_t *arena) {
1762 return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
1763 }
1764
1765 arena_t *
arena_new(tsdn_t * tsdn,unsigned ind,extent_hooks_t * extent_hooks)1766 arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
1767 arena_t *arena;
1768 base_t *base;
1769 unsigned i;
1770
1771 if (ind == 0) {
1772 base = b0get();
1773 } else {
1774 base = base_new(tsdn, ind, extent_hooks);
1775 if (base == NULL) {
1776 return NULL;
1777 }
1778 }
1779
1780 arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE);
1781 if (arena == NULL) {
1782 goto label_error;
1783 }
1784
1785 atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
1786 atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
1787 arena->last_thd = NULL;
1788
1789 if (config_stats) {
1790 if (arena_stats_init(tsdn, &arena->stats)) {
1791 goto label_error;
1792 }
1793
1794 ql_new(&arena->tcache_ql);
1795 ql_new(&arena->cache_bin_array_descriptor_ql);
1796 if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
1797 WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
1798 goto label_error;
1799 }
1800 }
1801
1802 if (config_prof) {
1803 if (prof_accum_init(tsdn, &arena->prof_accum)) {
1804 goto label_error;
1805 }
1806 }
1807
1808 if (config_cache_oblivious) {
1809 /*
1810 * A nondeterministic seed based on the address of arena reduces
1811 * the likelihood of lockstep non-uniform cache index
1812 * utilization among identical concurrent processes, but at the
1813 * cost of test repeatability. For debug builds, instead use a
1814 * deterministic seed.
1815 */
1816 atomic_store_zu(&arena->offset_state, config_debug ? ind :
1817 (size_t)(uintptr_t)arena, ATOMIC_RELAXED);
1818 }
1819
1820 atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
1821
1822 atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
1823 ATOMIC_RELAXED);
1824
1825 atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
1826
1827 extent_list_init(&arena->large);
1828 if (malloc_mutex_init(&arena->large_mtx, "arena_large",
1829 WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
1830 goto label_error;
1831 }
1832
1833 /*
1834 * Delay coalescing for dirty extents despite the disruptive effect on
1835 * memory layout for best-fit extent allocation, since cached extents
1836 * are likely to be reused soon after deallocation, and the cost of
1837 * merging/splitting extents is non-trivial.
1838 */
1839 if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
1840 true)) {
1841 goto label_error;
1842 }
1843 /*
1844 * Coalesce muzzy extents immediately, because operations on them are in
1845 * the critical path much less often than for dirty extents.
1846 */
1847 if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
1848 false)) {
1849 goto label_error;
1850 }
1851 /*
1852 * Coalesce retained extents immediately, in part because they will
1853 * never be evicted (and therefore there's no opportunity for delayed
1854 * coalescing), but also because operations on retained extents are not
1855 * in the critical path.
1856 */
1857 if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
1858 false)) {
1859 goto label_error;
1860 }
1861
1862 if (arena_decay_init(&arena->decay_dirty,
1863 arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
1864 goto label_error;
1865 }
1866 if (arena_decay_init(&arena->decay_muzzy,
1867 arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
1868 goto label_error;
1869 }
1870
1871 arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
1872 arena->retain_grow_limit = EXTENT_GROW_MAX_PIND;
1873 if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
1874 WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
1875 goto label_error;
1876 }
1877
1878 extent_avail_new(&arena->extent_avail);
1879 if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
1880 WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
1881 goto label_error;
1882 }
1883
1884 /* Initialize bins. */
1885 for (i = 0; i < NBINS; i++) {
1886 bool err = bin_init(&arena->bins[i]);
1887 if (err) {
1888 goto label_error;
1889 }
1890 }
1891
1892 arena->base = base;
1893 /* Set arena before creating background threads. */
1894 arena_set(ind, arena);
1895
1896 nstime_init(&arena->create_time, 0);
1897 nstime_update(&arena->create_time);
1898
1899 /* We don't support reentrancy for arena 0 bootstrapping. */
1900 if (ind != 0) {
1901 /*
1902 * If we're here, then arena 0 already exists, so bootstrapping
1903 * is done enough that we should have tsd.
1904 */
1905 assert(!tsdn_null(tsdn));
1906 pre_reentrancy(tsdn_tsd(tsdn), arena);
1907 if (hooks_arena_new_hook) {
1908 hooks_arena_new_hook();
1909 }
1910 post_reentrancy(tsdn_tsd(tsdn));
1911 }
1912
1913 return arena;
1914 label_error:
1915 if (ind != 0) {
1916 base_delete(tsdn, base);
1917 }
1918 return NULL;
1919 }
1920
1921 void
arena_boot(void)1922 arena_boot(void) {
1923 arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
1924 arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
1925 #define REGIND_bin_yes(index, reg_size) \
1926 div_init(&arena_binind_div_info[(index)], (reg_size));
1927 #define REGIND_bin_no(index, reg_size)
1928 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
1929 lg_delta_lookup) \
1930 REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta << lg_delta))
1931 SIZE_CLASSES
1932 #undef REGIND_bin_yes
1933 #undef REGIND_bin_no
1934 #undef SC
1935 }
1936
1937 void
arena_prefork0(tsdn_t * tsdn,arena_t * arena)1938 arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
1939 malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
1940 malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
1941 }
1942
1943 void
arena_prefork1(tsdn_t * tsdn,arena_t * arena)1944 arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
1945 if (config_stats) {
1946 malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
1947 }
1948 }
1949
1950 void
arena_prefork2(tsdn_t * tsdn,arena_t * arena)1951 arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
1952 malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx);
1953 }
1954
1955 void
arena_prefork3(tsdn_t * tsdn,arena_t * arena)1956 arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
1957 extents_prefork(tsdn, &arena->extents_dirty);
1958 extents_prefork(tsdn, &arena->extents_muzzy);
1959 extents_prefork(tsdn, &arena->extents_retained);
1960 }
1961
1962 void
arena_prefork4(tsdn_t * tsdn,arena_t * arena)1963 arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
1964 malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
1965 }
1966
1967 void
arena_prefork5(tsdn_t * tsdn,arena_t * arena)1968 arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
1969 base_prefork(tsdn, arena->base);
1970 }
1971
1972 void
arena_prefork6(tsdn_t * tsdn,arena_t * arena)1973 arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
1974 malloc_mutex_prefork(tsdn, &arena->large_mtx);
1975 }
1976
1977 void
arena_prefork7(tsdn_t * tsdn,arena_t * arena)1978 arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
1979 for (unsigned i = 0; i < NBINS; i++) {
1980 bin_prefork(tsdn, &arena->bins[i]);
1981 }
1982 }
1983
1984 void
arena_postfork_parent(tsdn_t * tsdn,arena_t * arena)1985 arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
1986 unsigned i;
1987
1988 for (i = 0; i < NBINS; i++) {
1989 bin_postfork_parent(tsdn, &arena->bins[i]);
1990 }
1991 malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
1992 base_postfork_parent(tsdn, arena->base);
1993 malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
1994 extents_postfork_parent(tsdn, &arena->extents_dirty);
1995 extents_postfork_parent(tsdn, &arena->extents_muzzy);
1996 extents_postfork_parent(tsdn, &arena->extents_retained);
1997 malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
1998 malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
1999 malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
2000 if (config_stats) {
2001 malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
2002 }
2003 }
2004
2005 void
arena_postfork_child(tsdn_t * tsdn,arena_t * arena)2006 arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
2007 unsigned i;
2008
2009 atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
2010 atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
2011 if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
2012 arena_nthreads_inc(arena, false);
2013 }
2014 if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
2015 arena_nthreads_inc(arena, true);
2016 }
2017 if (config_stats) {
2018 ql_new(&arena->tcache_ql);
2019 ql_new(&arena->cache_bin_array_descriptor_ql);
2020 tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
2021 if (tcache != NULL && tcache->arena == arena) {
2022 ql_elm_new(tcache, link);
2023 ql_tail_insert(&arena->tcache_ql, tcache, link);
2024 cache_bin_array_descriptor_init(
2025 &tcache->cache_bin_array_descriptor,
2026 tcache->bins_small, tcache->bins_large);
2027 ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
2028 &tcache->cache_bin_array_descriptor, link);
2029 }
2030 }
2031
2032 for (i = 0; i < NBINS; i++) {
2033 bin_postfork_child(tsdn, &arena->bins[i]);
2034 }
2035 malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
2036 base_postfork_child(tsdn, arena->base);
2037 malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
2038 extents_postfork_child(tsdn, &arena->extents_dirty);
2039 extents_postfork_child(tsdn, &arena->extents_muzzy);
2040 extents_postfork_child(tsdn, &arena->extents_retained);
2041 malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
2042 malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
2043 malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
2044 if (config_stats) {
2045 malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
2046 }
2047 }
2048