1 #define JEMALLOC_CHUNK_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3
4 /******************************************************************************/
5 /* Data. */
6
7 const char *opt_dss = DSS_DEFAULT;
8 size_t opt_lg_chunk = 0;
9
10 /* Used exclusively for gdump triggering. */
11 static size_t curchunks;
12 static size_t highchunks;
13
14 rtree_t chunks_rtree;
15
16 /* Various chunk-related settings. */
17 size_t chunksize;
18 size_t chunksize_mask; /* (chunksize - 1). */
19 size_t chunk_npages;
20
21 static void *chunk_alloc_default(void *new_addr, size_t size,
22 size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
23 static bool chunk_dalloc_default(void *chunk, size_t size, bool committed,
24 unsigned arena_ind);
25 static bool chunk_commit_default(void *chunk, size_t size, size_t offset,
26 size_t length, unsigned arena_ind);
27 static bool chunk_decommit_default(void *chunk, size_t size, size_t offset,
28 size_t length, unsigned arena_ind);
29 static bool chunk_purge_default(void *chunk, size_t size, size_t offset,
30 size_t length, unsigned arena_ind);
31 static bool chunk_split_default(void *chunk, size_t size, size_t size_a,
32 size_t size_b, bool committed, unsigned arena_ind);
33 static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
34 size_t size_b, bool committed, unsigned arena_ind);
35
36 const chunk_hooks_t chunk_hooks_default = {
37 chunk_alloc_default,
38 chunk_dalloc_default,
39 chunk_commit_default,
40 chunk_decommit_default,
41 chunk_purge_default,
42 chunk_split_default,
43 chunk_merge_default
44 };
45
46 /******************************************************************************/
47 /*
48 * Function prototypes for static functions that are referenced prior to
49 * definition.
50 */
51
52 static void chunk_record(tsdn_t *tsdn, arena_t *arena,
53 chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad,
54 extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn,
55 bool zeroed, bool committed);
56
57 /******************************************************************************/
58
59 static chunk_hooks_t
chunk_hooks_get_locked(arena_t * arena)60 chunk_hooks_get_locked(arena_t *arena)
61 {
62
63 return (arena->chunk_hooks);
64 }
65
66 chunk_hooks_t
chunk_hooks_get(tsdn_t * tsdn,arena_t * arena)67 chunk_hooks_get(tsdn_t *tsdn, arena_t *arena)
68 {
69 chunk_hooks_t chunk_hooks;
70
71 malloc_mutex_lock(tsdn, &arena->chunks_mtx);
72 chunk_hooks = chunk_hooks_get_locked(arena);
73 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
74
75 return (chunk_hooks);
76 }
77
78 chunk_hooks_t
chunk_hooks_set(tsdn_t * tsdn,arena_t * arena,const chunk_hooks_t * chunk_hooks)79 chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
80 {
81 chunk_hooks_t old_chunk_hooks;
82
83 malloc_mutex_lock(tsdn, &arena->chunks_mtx);
84 old_chunk_hooks = arena->chunk_hooks;
85 /*
86 * Copy each field atomically so that it is impossible for readers to
87 * see partially updated pointers. There are places where readers only
88 * need one hook function pointer (therefore no need to copy the
89 * entirety of arena->chunk_hooks), and stale reads do not affect
90 * correctness, so they perform unlocked reads.
91 */
92 #define ATOMIC_COPY_HOOK(n) do { \
93 union { \
94 chunk_##n##_t **n; \
95 void **v; \
96 } u; \
97 u.n = &arena->chunk_hooks.n; \
98 atomic_write_p(u.v, chunk_hooks->n); \
99 } while (0)
100 ATOMIC_COPY_HOOK(alloc);
101 ATOMIC_COPY_HOOK(dalloc);
102 ATOMIC_COPY_HOOK(commit);
103 ATOMIC_COPY_HOOK(decommit);
104 ATOMIC_COPY_HOOK(purge);
105 ATOMIC_COPY_HOOK(split);
106 ATOMIC_COPY_HOOK(merge);
107 #undef ATOMIC_COPY_HOOK
108 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
109
110 return (old_chunk_hooks);
111 }
112
113 static void
chunk_hooks_assure_initialized_impl(tsdn_t * tsdn,arena_t * arena,chunk_hooks_t * chunk_hooks,bool locked)114 chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
115 chunk_hooks_t *chunk_hooks, bool locked)
116 {
117 static const chunk_hooks_t uninitialized_hooks =
118 CHUNK_HOOKS_INITIALIZER;
119
120 if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
121 0) {
122 *chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
123 chunk_hooks_get(tsdn, arena);
124 }
125 }
126
127 static void
chunk_hooks_assure_initialized_locked(tsdn_t * tsdn,arena_t * arena,chunk_hooks_t * chunk_hooks)128 chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
129 chunk_hooks_t *chunk_hooks)
130 {
131
132 chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true);
133 }
134
135 static void
chunk_hooks_assure_initialized(tsdn_t * tsdn,arena_t * arena,chunk_hooks_t * chunk_hooks)136 chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
137 chunk_hooks_t *chunk_hooks)
138 {
139
140 chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
141 }
142
143 bool
chunk_register(tsdn_t * tsdn,const void * chunk,const extent_node_t * node)144 chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
145 {
146
147 assert(extent_node_addr_get(node) == chunk);
148
149 if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
150 return (true);
151 if (config_prof && opt_prof) {
152 size_t size = extent_node_size_get(node);
153 size_t nadd = (size == 0) ? 1 : size / chunksize;
154 size_t cur = atomic_add_z(&curchunks, nadd);
155 size_t high = atomic_read_z(&highchunks);
156 while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
157 /*
158 * Don't refresh cur, because it may have decreased
159 * since this thread lost the highchunks update race.
160 */
161 high = atomic_read_z(&highchunks);
162 }
163 if (cur > high && prof_gdump_get_unlocked())
164 prof_gdump(tsdn);
165 }
166
167 return (false);
168 }
169
170 void
chunk_deregister(const void * chunk,const extent_node_t * node)171 chunk_deregister(const void *chunk, const extent_node_t *node)
172 {
173 bool err;
174
175 err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
176 assert(!err);
177 if (config_prof && opt_prof) {
178 size_t size = extent_node_size_get(node);
179 size_t nsub = (size == 0) ? 1 : size / chunksize;
180 assert(atomic_read_z(&curchunks) >= nsub);
181 atomic_sub_z(&curchunks, nsub);
182 }
183 }
184
185 /*
186 * Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that
187 * best fits.
188 */
189 static extent_node_t *
chunk_first_best_fit(arena_t * arena,extent_tree_t * chunks_szsnad,size_t size)190 chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size)
191 {
192 extent_node_t key;
193
194 assert(size == CHUNK_CEILING(size));
195
196 extent_node_init(&key, arena, NULL, size, 0, false, false);
197 return (extent_tree_szsnad_nsearch(chunks_szsnad, &key));
198 }
199
200 static void *
chunk_recycle(tsdn_t * tsdn,arena_t * arena,chunk_hooks_t * chunk_hooks,extent_tree_t * chunks_szsnad,extent_tree_t * chunks_ad,bool cache,void * new_addr,size_t size,size_t alignment,size_t * sn,bool * zero,bool * commit,bool dalloc_node)201 chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
202 extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
203 void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
204 bool *commit, bool dalloc_node)
205 {
206 void *ret;
207 extent_node_t *node;
208 size_t alloc_size, leadsize, trailsize;
209 bool zeroed, committed;
210
211 assert(CHUNK_CEILING(size) == size);
212 assert(alignment > 0);
213 assert(new_addr == NULL || alignment == chunksize);
214 assert(CHUNK_ADDR2BASE(new_addr) == new_addr);
215 /*
216 * Cached chunks use the node linkage embedded in their headers, in
217 * which case dalloc_node is true, and new_addr is non-NULL because
218 * we're operating on a specific chunk.
219 */
220 assert(dalloc_node || new_addr != NULL);
221
222 alloc_size = size + CHUNK_CEILING(alignment) - chunksize;
223 /* Beware size_t wrap-around. */
224 if (alloc_size < size)
225 return (NULL);
226 malloc_mutex_lock(tsdn, &arena->chunks_mtx);
227 chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
228 if (new_addr != NULL) {
229 extent_node_t key;
230 extent_node_init(&key, arena, new_addr, alloc_size, 0, false,
231 false);
232 node = extent_tree_ad_search(chunks_ad, &key);
233 } else {
234 node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size);
235 }
236 if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
237 size)) {
238 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
239 return (NULL);
240 }
241 leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
242 alignment) - (uintptr_t)extent_node_addr_get(node);
243 assert(new_addr == NULL || leadsize == 0);
244 assert(extent_node_size_get(node) >= leadsize + size);
245 trailsize = extent_node_size_get(node) - leadsize - size;
246 ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
247 *sn = extent_node_sn_get(node);
248 zeroed = extent_node_zeroed_get(node);
249 if (zeroed)
250 *zero = true;
251 committed = extent_node_committed_get(node);
252 if (committed)
253 *commit = true;
254 /* Split the lead. */
255 if (leadsize != 0 &&
256 chunk_hooks->split(extent_node_addr_get(node),
257 extent_node_size_get(node), leadsize, size, false, arena->ind)) {
258 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
259 return (NULL);
260 }
261 /* Remove node from the tree. */
262 extent_tree_szsnad_remove(chunks_szsnad, node);
263 extent_tree_ad_remove(chunks_ad, node);
264 arena_chunk_cache_maybe_remove(arena, node, cache);
265 if (leadsize != 0) {
266 /* Insert the leading space as a smaller chunk. */
267 extent_node_size_set(node, leadsize);
268 extent_tree_szsnad_insert(chunks_szsnad, node);
269 extent_tree_ad_insert(chunks_ad, node);
270 arena_chunk_cache_maybe_insert(arena, node, cache);
271 node = NULL;
272 }
273 if (trailsize != 0) {
274 /* Split the trail. */
275 if (chunk_hooks->split(ret, size + trailsize, size,
276 trailsize, false, arena->ind)) {
277 if (dalloc_node && node != NULL)
278 arena_node_dalloc(tsdn, arena, node);
279 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
280 chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad,
281 chunks_ad, cache, ret, size + trailsize, *sn,
282 zeroed, committed);
283 return (NULL);
284 }
285 /* Insert the trailing space as a smaller chunk. */
286 if (node == NULL) {
287 node = arena_node_alloc(tsdn, arena);
288 if (node == NULL) {
289 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
290 chunk_record(tsdn, arena, chunk_hooks,
291 chunks_szsnad, chunks_ad, cache, ret, size
292 + trailsize, *sn, zeroed, committed);
293 return (NULL);
294 }
295 }
296 extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
297 trailsize, *sn, zeroed, committed);
298 extent_tree_szsnad_insert(chunks_szsnad, node);
299 extent_tree_ad_insert(chunks_ad, node);
300 arena_chunk_cache_maybe_insert(arena, node, cache);
301 node = NULL;
302 }
303 if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
304 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
305 chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad,
306 cache, ret, size, *sn, zeroed, committed);
307 return (NULL);
308 }
309 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
310
311 assert(dalloc_node || node != NULL);
312 if (dalloc_node && node != NULL)
313 arena_node_dalloc(tsdn, arena, node);
314 if (*zero) {
315 if (!zeroed)
316 memset(ret, 0, size);
317 else if (config_debug) {
318 size_t i;
319 size_t *p = (size_t *)(uintptr_t)ret;
320
321 for (i = 0; i < size / sizeof(size_t); i++)
322 assert(p[i] == 0);
323 }
324 if (config_valgrind)
325 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
326 }
327 return (ret);
328 }
329
330 /*
331 * If the caller specifies (!*zero), it is still possible to receive zeroed
332 * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
333 * advantage of this to avoid demanding zeroed chunks, but taking advantage of
334 * them if they are returned.
335 */
336 static void *
chunk_alloc_core(tsdn_t * tsdn,arena_t * arena,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit,dss_prec_t dss_prec)337 chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
338 size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
339 {
340 void *ret;
341
342 assert(size != 0);
343 assert((size & chunksize_mask) == 0);
344 assert(alignment != 0);
345 assert((alignment & chunksize_mask) == 0);
346
347 /* "primary" dss. */
348 if (have_dss && dss_prec == dss_prec_primary && (ret =
349 chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
350 commit)) != NULL)
351 return (ret);
352 /* mmap. */
353 if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
354 NULL)
355 return (ret);
356 /* "secondary" dss. */
357 if (have_dss && dss_prec == dss_prec_secondary && (ret =
358 chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
359 commit)) != NULL)
360 return (ret);
361
362 /* All strategies for allocation failed. */
363 return (NULL);
364 }
365
366 void *
chunk_alloc_base(size_t size)367 chunk_alloc_base(size_t size)
368 {
369 void *ret;
370 bool zero, commit;
371
372 /*
373 * Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
374 * because it's critical that chunk_alloc_base() return untouched
375 * demand-zeroed virtual memory.
376 */
377 zero = true;
378 commit = true;
379 ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
380 if (ret == NULL)
381 return (NULL);
382 if (config_valgrind)
383 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
384
385 return (ret);
386 }
387
388 void *
chunk_alloc_cache(tsdn_t * tsdn,arena_t * arena,chunk_hooks_t * chunk_hooks,void * new_addr,size_t size,size_t alignment,size_t * sn,bool * zero,bool * commit,bool dalloc_node)389 chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
390 void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
391 bool *commit, bool dalloc_node)
392 {
393 void *ret;
394
395 assert(size != 0);
396 assert((size & chunksize_mask) == 0);
397 assert(alignment != 0);
398 assert((alignment & chunksize_mask) == 0);
399
400 ret = chunk_recycle(tsdn, arena, chunk_hooks,
401 &arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true,
402 new_addr, size, alignment, sn, zero, commit, dalloc_node);
403 if (ret == NULL)
404 return (NULL);
405 if (config_valgrind)
406 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
407 return (ret);
408 }
409
410 static arena_t *
chunk_arena_get(tsdn_t * tsdn,unsigned arena_ind)411 chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
412 {
413 arena_t *arena;
414
415 arena = arena_get(tsdn, arena_ind, false);
416 /*
417 * The arena we're allocating on behalf of must have been initialized
418 * already.
419 */
420 assert(arena != NULL);
421 return (arena);
422 }
423
424 static void *
chunk_alloc_default_impl(tsdn_t * tsdn,arena_t * arena,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit)425 chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
426 size_t size, size_t alignment, bool *zero, bool *commit)
427 {
428 void *ret;
429
430 ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
431 commit, arena->dss_prec);
432 if (ret == NULL)
433 return (NULL);
434 if (config_valgrind)
435 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
436
437 return (ret);
438 }
439
440 static void *
chunk_alloc_default(void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit,unsigned arena_ind)441 chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
442 bool *commit, unsigned arena_ind)
443 {
444 tsdn_t *tsdn;
445 arena_t *arena;
446
447 tsdn = tsdn_fetch();
448 arena = chunk_arena_get(tsdn, arena_ind);
449
450 return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
451 zero, commit));
452 }
453
454 static void *
chunk_alloc_retained(tsdn_t * tsdn,arena_t * arena,chunk_hooks_t * chunk_hooks,void * new_addr,size_t size,size_t alignment,size_t * sn,bool * zero,bool * commit)455 chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
456 void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
457 bool *commit)
458 {
459 void *ret;
460
461 assert(size != 0);
462 assert((size & chunksize_mask) == 0);
463 assert(alignment != 0);
464 assert((alignment & chunksize_mask) == 0);
465
466 ret = chunk_recycle(tsdn, arena, chunk_hooks,
467 &arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false,
468 new_addr, size, alignment, sn, zero, commit, true);
469
470 if (config_stats && ret != NULL)
471 arena->stats.retained -= size;
472
473 return (ret);
474 }
475
476 void *
chunk_alloc_wrapper(tsdn_t * tsdn,arena_t * arena,chunk_hooks_t * chunk_hooks,void * new_addr,size_t size,size_t alignment,size_t * sn,bool * zero,bool * commit)477 chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
478 void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
479 bool *commit)
480 {
481 void *ret;
482
483 chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
484
485 ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
486 alignment, sn, zero, commit);
487 if (ret == NULL) {
488 if (chunk_hooks->alloc == chunk_alloc_default) {
489 /* Call directly to propagate tsdn. */
490 ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
491 size, alignment, zero, commit);
492 } else {
493 ret = chunk_hooks->alloc(new_addr, size, alignment,
494 zero, commit, arena->ind);
495 }
496
497 if (ret == NULL)
498 return (NULL);
499
500 *sn = arena_extent_sn_next(arena);
501
502 if (config_valgrind && chunk_hooks->alloc !=
503 chunk_alloc_default)
504 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
505 }
506
507 return (ret);
508 }
509
510 static void
chunk_record(tsdn_t * tsdn,arena_t * arena,chunk_hooks_t * chunk_hooks,extent_tree_t * chunks_szsnad,extent_tree_t * chunks_ad,bool cache,void * chunk,size_t size,size_t sn,bool zeroed,bool committed)511 chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
512 extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
513 void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
514 {
515 bool unzeroed;
516 extent_node_t *node, *prev;
517 extent_node_t key;
518
519 assert(!cache || !zeroed);
520 unzeroed = cache || !zeroed;
521 JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
522
523 malloc_mutex_lock(tsdn, &arena->chunks_mtx);
524 chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
525 extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0,
526 false, false);
527 node = extent_tree_ad_nsearch(chunks_ad, &key);
528 /* Try to coalesce forward. */
529 if (node != NULL && extent_node_addr_get(node) ==
530 extent_node_addr_get(&key) && extent_node_committed_get(node) ==
531 committed && !chunk_hooks->merge(chunk, size,
532 extent_node_addr_get(node), extent_node_size_get(node), false,
533 arena->ind)) {
534 /*
535 * Coalesce chunk with the following address range. This does
536 * not change the position within chunks_ad, so only
537 * remove/insert from/into chunks_szsnad.
538 */
539 extent_tree_szsnad_remove(chunks_szsnad, node);
540 arena_chunk_cache_maybe_remove(arena, node, cache);
541 extent_node_addr_set(node, chunk);
542 extent_node_size_set(node, size + extent_node_size_get(node));
543 if (sn < extent_node_sn_get(node))
544 extent_node_sn_set(node, sn);
545 extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
546 !unzeroed);
547 extent_tree_szsnad_insert(chunks_szsnad, node);
548 arena_chunk_cache_maybe_insert(arena, node, cache);
549 } else {
550 /* Coalescing forward failed, so insert a new node. */
551 node = arena_node_alloc(tsdn, arena);
552 if (node == NULL) {
553 /*
554 * Node allocation failed, which is an exceedingly
555 * unlikely failure. Leak chunk after making sure its
556 * pages have already been purged, so that this is only
557 * a virtual memory leak.
558 */
559 if (cache) {
560 chunk_purge_wrapper(tsdn, arena, chunk_hooks,
561 chunk, size, 0, size);
562 }
563 goto label_return;
564 }
565 extent_node_init(node, arena, chunk, size, sn, !unzeroed,
566 committed);
567 extent_tree_ad_insert(chunks_ad, node);
568 extent_tree_szsnad_insert(chunks_szsnad, node);
569 arena_chunk_cache_maybe_insert(arena, node, cache);
570 }
571
572 /* Try to coalesce backward. */
573 prev = extent_tree_ad_prev(chunks_ad, node);
574 if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
575 extent_node_size_get(prev)) == chunk &&
576 extent_node_committed_get(prev) == committed &&
577 !chunk_hooks->merge(extent_node_addr_get(prev),
578 extent_node_size_get(prev), chunk, size, false, arena->ind)) {
579 /*
580 * Coalesce chunk with the previous address range. This does
581 * not change the position within chunks_ad, so only
582 * remove/insert node from/into chunks_szsnad.
583 */
584 extent_tree_szsnad_remove(chunks_szsnad, prev);
585 extent_tree_ad_remove(chunks_ad, prev);
586 arena_chunk_cache_maybe_remove(arena, prev, cache);
587 extent_tree_szsnad_remove(chunks_szsnad, node);
588 arena_chunk_cache_maybe_remove(arena, node, cache);
589 extent_node_addr_set(node, extent_node_addr_get(prev));
590 extent_node_size_set(node, extent_node_size_get(prev) +
591 extent_node_size_get(node));
592 if (extent_node_sn_get(prev) < extent_node_sn_get(node))
593 extent_node_sn_set(node, extent_node_sn_get(prev));
594 extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
595 extent_node_zeroed_get(node));
596 extent_tree_szsnad_insert(chunks_szsnad, node);
597 arena_chunk_cache_maybe_insert(arena, node, cache);
598
599 arena_node_dalloc(tsdn, arena, prev);
600 }
601
602 label_return:
603 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
604 }
605
606 void
chunk_dalloc_cache(tsdn_t * tsdn,arena_t * arena,chunk_hooks_t * chunk_hooks,void * chunk,size_t size,size_t sn,bool committed)607 chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
608 void *chunk, size_t size, size_t sn, bool committed)
609 {
610
611 assert(chunk != NULL);
612 assert(CHUNK_ADDR2BASE(chunk) == chunk);
613 assert(size != 0);
614 assert((size & chunksize_mask) == 0);
615
616 chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached,
617 &arena->chunks_ad_cached, true, chunk, size, sn, false,
618 committed);
619 arena_maybe_purge(tsdn, arena);
620 }
621
622 static bool
chunk_dalloc_default_impl(void * chunk,size_t size)623 chunk_dalloc_default_impl(void *chunk, size_t size)
624 {
625
626 if (!have_dss || !chunk_in_dss(chunk))
627 return (chunk_dalloc_mmap(chunk, size));
628 return (true);
629 }
630
631 static bool
chunk_dalloc_default(void * chunk,size_t size,bool committed,unsigned arena_ind)632 chunk_dalloc_default(void *chunk, size_t size, bool committed,
633 unsigned arena_ind)
634 {
635
636 return (chunk_dalloc_default_impl(chunk, size));
637 }
638
639 void
chunk_dalloc_wrapper(tsdn_t * tsdn,arena_t * arena,chunk_hooks_t * chunk_hooks,void * chunk,size_t size,size_t sn,bool zeroed,bool committed)640 chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
641 void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
642 {
643 bool err;
644
645 assert(chunk != NULL);
646 assert(CHUNK_ADDR2BASE(chunk) == chunk);
647 assert(size != 0);
648 assert((size & chunksize_mask) == 0);
649
650 chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
651 /* Try to deallocate. */
652 if (chunk_hooks->dalloc == chunk_dalloc_default) {
653 /* Call directly to propagate tsdn. */
654 err = chunk_dalloc_default_impl(chunk, size);
655 } else
656 err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
657
658 if (!err)
659 return;
660 /* Try to decommit; purge if that fails. */
661 if (committed) {
662 committed = chunk_hooks->decommit(chunk, size, 0, size,
663 arena->ind);
664 }
665 zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
666 arena->ind);
667 chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained,
668 &arena->chunks_ad_retained, false, chunk, size, sn, zeroed,
669 committed);
670
671 if (config_stats)
672 arena->stats.retained += size;
673 }
674
675 static bool
chunk_commit_default(void * chunk,size_t size,size_t offset,size_t length,unsigned arena_ind)676 chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length,
677 unsigned arena_ind)
678 {
679
680 return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset),
681 length));
682 }
683
684 static bool
chunk_decommit_default(void * chunk,size_t size,size_t offset,size_t length,unsigned arena_ind)685 chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
686 unsigned arena_ind)
687 {
688
689 return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset),
690 length));
691 }
692
693 static bool
chunk_purge_default(void * chunk,size_t size,size_t offset,size_t length,unsigned arena_ind)694 chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
695 unsigned arena_ind)
696 {
697
698 assert(chunk != NULL);
699 assert(CHUNK_ADDR2BASE(chunk) == chunk);
700 assert((offset & PAGE_MASK) == 0);
701 assert(length != 0);
702 assert((length & PAGE_MASK) == 0);
703
704 return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset),
705 length));
706 }
707
708 bool
chunk_purge_wrapper(tsdn_t * tsdn,arena_t * arena,chunk_hooks_t * chunk_hooks,void * chunk,size_t size,size_t offset,size_t length)709 chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
710 void *chunk, size_t size, size_t offset, size_t length)
711 {
712
713 chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
714 return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
715 }
716
717 static bool
chunk_split_default(void * chunk,size_t size,size_t size_a,size_t size_b,bool committed,unsigned arena_ind)718 chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
719 bool committed, unsigned arena_ind)
720 {
721
722 if (!maps_coalesce)
723 return (true);
724 return (false);
725 }
726
727 static bool
chunk_merge_default_impl(void * chunk_a,void * chunk_b)728 chunk_merge_default_impl(void *chunk_a, void *chunk_b)
729 {
730
731 if (!maps_coalesce)
732 return (true);
733 if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b))
734 return (true);
735
736 return (false);
737 }
738
739 static bool
chunk_merge_default(void * chunk_a,size_t size_a,void * chunk_b,size_t size_b,bool committed,unsigned arena_ind)740 chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
741 bool committed, unsigned arena_ind)
742 {
743
744 return (chunk_merge_default_impl(chunk_a, chunk_b));
745 }
746
747 static rtree_node_elm_t *
chunks_rtree_node_alloc(size_t nelms)748 chunks_rtree_node_alloc(size_t nelms)
749 {
750
751 return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms *
752 sizeof(rtree_node_elm_t)));
753 }
754
755 bool
chunk_boot(void)756 chunk_boot(void)
757 {
758 #ifdef _WIN32
759 SYSTEM_INFO info;
760 GetSystemInfo(&info);
761
762 /*
763 * Verify actual page size is equal to or an integral multiple of
764 * configured page size.
765 */
766 if (info.dwPageSize & ((1U << LG_PAGE) - 1))
767 return (true);
768
769 /*
770 * Configure chunksize (if not set) to match granularity (usually 64K),
771 * so pages_map will always take fast path.
772 */
773 if (!opt_lg_chunk) {
774 opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
775 - 1;
776 }
777 #else
778 if (!opt_lg_chunk)
779 opt_lg_chunk = LG_CHUNK_DEFAULT;
780 #endif
781
782 /* Set variables according to the value of opt_lg_chunk. */
783 chunksize = (ZU(1) << opt_lg_chunk);
784 assert(chunksize >= PAGE);
785 chunksize_mask = chunksize - 1;
786 chunk_npages = (chunksize >> LG_PAGE);
787
788 if (have_dss)
789 chunk_dss_boot();
790 if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
791 opt_lg_chunk), chunks_rtree_node_alloc, NULL))
792 return (true);
793
794 return (false);
795 }
796