• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define	JEMALLOC_CHUNK_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 const char	*opt_dss = DSS_DEFAULT;
8 size_t		opt_lg_chunk = 0;
9 
10 /* Used exclusively for gdump triggering. */
11 static size_t	curchunks;
12 static size_t	highchunks;
13 
14 rtree_t		chunks_rtree;
15 
16 /* Various chunk-related settings. */
17 size_t		chunksize;
18 size_t		chunksize_mask; /* (chunksize - 1). */
19 size_t		chunk_npages;
20 
21 static void	*chunk_alloc_default(void *new_addr, size_t size,
22     size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
23 static bool	chunk_dalloc_default(void *chunk, size_t size, bool committed,
24     unsigned arena_ind);
25 static bool	chunk_commit_default(void *chunk, size_t size, size_t offset,
26     size_t length, unsigned arena_ind);
27 static bool	chunk_decommit_default(void *chunk, size_t size, size_t offset,
28     size_t length, unsigned arena_ind);
29 static bool	chunk_purge_default(void *chunk, size_t size, size_t offset,
30     size_t length, unsigned arena_ind);
31 static bool	chunk_split_default(void *chunk, size_t size, size_t size_a,
32     size_t size_b, bool committed, unsigned arena_ind);
33 static bool	chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
34     size_t size_b, bool committed, unsigned arena_ind);
35 
36 const chunk_hooks_t	chunk_hooks_default = {
37 	chunk_alloc_default,
38 	chunk_dalloc_default,
39 	chunk_commit_default,
40 	chunk_decommit_default,
41 	chunk_purge_default,
42 	chunk_split_default,
43 	chunk_merge_default
44 };
45 
46 /******************************************************************************/
47 /*
48  * Function prototypes for static functions that are referenced prior to
49  * definition.
50  */
51 
52 static void	chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
53     extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
54     void *chunk, size_t size, bool zeroed, bool committed);
55 
56 /******************************************************************************/
57 
58 static chunk_hooks_t
chunk_hooks_get_locked(arena_t * arena)59 chunk_hooks_get_locked(arena_t *arena)
60 {
61 
62 	return (arena->chunk_hooks);
63 }
64 
65 chunk_hooks_t
chunk_hooks_get(arena_t * arena)66 chunk_hooks_get(arena_t *arena)
67 {
68 	chunk_hooks_t chunk_hooks;
69 
70 	malloc_mutex_lock(&arena->chunks_mtx);
71 	chunk_hooks = chunk_hooks_get_locked(arena);
72 	malloc_mutex_unlock(&arena->chunks_mtx);
73 
74 	return (chunk_hooks);
75 }
76 
77 chunk_hooks_t
chunk_hooks_set(arena_t * arena,const chunk_hooks_t * chunk_hooks)78 chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
79 {
80 	chunk_hooks_t old_chunk_hooks;
81 
82 	malloc_mutex_lock(&arena->chunks_mtx);
83 	old_chunk_hooks = arena->chunk_hooks;
84 	/*
85 	 * Copy each field atomically so that it is impossible for readers to
86 	 * see partially updated pointers.  There are places where readers only
87 	 * need one hook function pointer (therefore no need to copy the
88 	 * entirety of arena->chunk_hooks), and stale reads do not affect
89 	 * correctness, so they perform unlocked reads.
90 	 */
91 #define	ATOMIC_COPY_HOOK(n) do {					\
92 	union {								\
93 		chunk_##n##_t	**n;					\
94 		void		**v;					\
95 	} u;								\
96 	u.n = &arena->chunk_hooks.n;					\
97 	atomic_write_p(u.v, chunk_hooks->n);				\
98 } while (0)
99 	ATOMIC_COPY_HOOK(alloc);
100 	ATOMIC_COPY_HOOK(dalloc);
101 	ATOMIC_COPY_HOOK(commit);
102 	ATOMIC_COPY_HOOK(decommit);
103 	ATOMIC_COPY_HOOK(purge);
104 	ATOMIC_COPY_HOOK(split);
105 	ATOMIC_COPY_HOOK(merge);
106 #undef ATOMIC_COPY_HOOK
107 	malloc_mutex_unlock(&arena->chunks_mtx);
108 
109 	return (old_chunk_hooks);
110 }
111 
112 static void
chunk_hooks_assure_initialized_impl(arena_t * arena,chunk_hooks_t * chunk_hooks,bool locked)113 chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
114     bool locked)
115 {
116 	static const chunk_hooks_t uninitialized_hooks =
117 	    CHUNK_HOOKS_INITIALIZER;
118 
119 	if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
120 	    0) {
121 		*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
122 		    chunk_hooks_get(arena);
123 	}
124 }
125 
126 static void
chunk_hooks_assure_initialized_locked(arena_t * arena,chunk_hooks_t * chunk_hooks)127 chunk_hooks_assure_initialized_locked(arena_t *arena,
128     chunk_hooks_t *chunk_hooks)
129 {
130 
131 	chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
132 }
133 
134 static void
chunk_hooks_assure_initialized(arena_t * arena,chunk_hooks_t * chunk_hooks)135 chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
136 {
137 
138 	chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
139 }
140 
141 bool
chunk_register(const void * chunk,const extent_node_t * node)142 chunk_register(const void *chunk, const extent_node_t *node)
143 {
144 
145 	assert(extent_node_addr_get(node) == chunk);
146 
147 	if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
148 		return (true);
149 	if (config_prof && opt_prof) {
150 		size_t size = extent_node_size_get(node);
151 		size_t nadd = (size == 0) ? 1 : size / chunksize;
152 		size_t cur = atomic_add_z(&curchunks, nadd);
153 		size_t high = atomic_read_z(&highchunks);
154 		while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
155 			/*
156 			 * Don't refresh cur, because it may have decreased
157 			 * since this thread lost the highchunks update race.
158 			 */
159 			high = atomic_read_z(&highchunks);
160 		}
161 		if (cur > high && prof_gdump_get_unlocked())
162 			prof_gdump();
163 	}
164 
165 	return (false);
166 }
167 
168 void
chunk_deregister(const void * chunk,const extent_node_t * node)169 chunk_deregister(const void *chunk, const extent_node_t *node)
170 {
171 	bool err;
172 
173 	err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
174 	assert(!err);
175 	if (config_prof && opt_prof) {
176 		size_t size = extent_node_size_get(node);
177 		size_t nsub = (size == 0) ? 1 : size / chunksize;
178 		assert(atomic_read_z(&curchunks) >= nsub);
179 		atomic_sub_z(&curchunks, nsub);
180 	}
181 }
182 
183 /*
184  * Do first-best-fit chunk selection, i.e. select the lowest chunk that best
185  * fits.
186  */
187 static extent_node_t *
chunk_first_best_fit(arena_t * arena,extent_tree_t * chunks_szad,extent_tree_t * chunks_ad,size_t size)188 chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
189     extent_tree_t *chunks_ad, size_t size)
190 {
191 	extent_node_t key;
192 
193 	assert(size == CHUNK_CEILING(size));
194 
195 	extent_node_init(&key, arena, NULL, size, false, false);
196 	return (extent_tree_szad_nsearch(chunks_szad, &key));
197 }
198 
199 static void *
chunk_recycle(arena_t * arena,chunk_hooks_t * chunk_hooks,extent_tree_t * chunks_szad,extent_tree_t * chunks_ad,bool cache,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit,bool dalloc_node)200 chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
201     extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
202     void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
203     bool dalloc_node)
204 {
205 	void *ret;
206 	extent_node_t *node;
207 	size_t alloc_size, leadsize, trailsize;
208 	bool zeroed, committed;
209 
210 	assert(new_addr == NULL || alignment == chunksize);
211 	/*
212 	 * Cached chunks use the node linkage embedded in their headers, in
213 	 * which case dalloc_node is true, and new_addr is non-NULL because
214 	 * we're operating on a specific chunk.
215 	 */
216 	assert(dalloc_node || new_addr != NULL);
217 
218 	alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
219 	/* Beware size_t wrap-around. */
220 	if (alloc_size < size)
221 		return (NULL);
222 	malloc_mutex_lock(&arena->chunks_mtx);
223 	chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
224 	if (new_addr != NULL) {
225 		extent_node_t key;
226 		extent_node_init(&key, arena, new_addr, alloc_size, false,
227 		    false);
228 		node = extent_tree_ad_search(chunks_ad, &key);
229 	} else {
230 		node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
231 		    alloc_size);
232 	}
233 	if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
234 	    size)) {
235 		malloc_mutex_unlock(&arena->chunks_mtx);
236 		return (NULL);
237 	}
238 	leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
239 	    alignment) - (uintptr_t)extent_node_addr_get(node);
240 	assert(new_addr == NULL || leadsize == 0);
241 	assert(extent_node_size_get(node) >= leadsize + size);
242 	trailsize = extent_node_size_get(node) - leadsize - size;
243 	ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
244 	zeroed = extent_node_zeroed_get(node);
245 	if (zeroed)
246 		*zero = true;
247 	committed = extent_node_committed_get(node);
248 	if (committed)
249 		*commit = true;
250 	/* Split the lead. */
251 	if (leadsize != 0 &&
252 	    chunk_hooks->split(extent_node_addr_get(node),
253 	    extent_node_size_get(node), leadsize, size, false, arena->ind)) {
254 		malloc_mutex_unlock(&arena->chunks_mtx);
255 		return (NULL);
256 	}
257 	/* Remove node from the tree. */
258 	extent_tree_szad_remove(chunks_szad, node);
259 	extent_tree_ad_remove(chunks_ad, node);
260 	arena_chunk_cache_maybe_remove(arena, node, cache);
261 	if (leadsize != 0) {
262 		/* Insert the leading space as a smaller chunk. */
263 		extent_node_size_set(node, leadsize);
264 		extent_tree_szad_insert(chunks_szad, node);
265 		extent_tree_ad_insert(chunks_ad, node);
266 		arena_chunk_cache_maybe_insert(arena, node, cache);
267 		node = NULL;
268 	}
269 	if (trailsize != 0) {
270 		/* Split the trail. */
271 		if (chunk_hooks->split(ret, size + trailsize, size,
272 		    trailsize, false, arena->ind)) {
273 			if (dalloc_node && node != NULL)
274 				arena_node_dalloc(arena, node);
275 			malloc_mutex_unlock(&arena->chunks_mtx);
276 			chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
277 			    cache, ret, size + trailsize, zeroed, committed);
278 			return (NULL);
279 		}
280 		/* Insert the trailing space as a smaller chunk. */
281 		if (node == NULL) {
282 			node = arena_node_alloc(arena);
283 			if (node == NULL) {
284 				malloc_mutex_unlock(&arena->chunks_mtx);
285 				chunk_record(arena, chunk_hooks, chunks_szad,
286 				    chunks_ad, cache, ret, size + trailsize,
287 				    zeroed, committed);
288 				return (NULL);
289 			}
290 		}
291 		extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
292 		    trailsize, zeroed, committed);
293 		extent_tree_szad_insert(chunks_szad, node);
294 		extent_tree_ad_insert(chunks_ad, node);
295 		arena_chunk_cache_maybe_insert(arena, node, cache);
296 		node = NULL;
297 	}
298 	if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
299 		malloc_mutex_unlock(&arena->chunks_mtx);
300 		chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
301 		    ret, size, zeroed, committed);
302 		return (NULL);
303 	}
304 	malloc_mutex_unlock(&arena->chunks_mtx);
305 
306 	assert(dalloc_node || node != NULL);
307 	if (dalloc_node && node != NULL)
308 		arena_node_dalloc(arena, node);
309 	if (*zero) {
310 		if (!zeroed)
311 			memset(ret, 0, size);
312 		else if (config_debug) {
313 			size_t i;
314 			size_t *p = (size_t *)(uintptr_t)ret;
315 
316 			JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
317 			for (i = 0; i < size / sizeof(size_t); i++)
318 				assert(p[i] == 0);
319 		}
320 	}
321 	return (ret);
322 }
323 
324 /*
325  * If the caller specifies (!*zero), it is still possible to receive zeroed
326  * memory, in which case *zero is toggled to true.  arena_chunk_alloc() takes
327  * advantage of this to avoid demanding zeroed chunks, but taking advantage of
328  * them if they are returned.
329  */
330 static void *
chunk_alloc_core(arena_t * arena,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit,dss_prec_t dss_prec)331 chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
332     bool *zero, bool *commit, dss_prec_t dss_prec)
333 {
334 	void *ret;
335 
336 	assert(size != 0);
337 	assert((size & chunksize_mask) == 0);
338 	assert(alignment != 0);
339 	assert((alignment & chunksize_mask) == 0);
340 
341 	/* "primary" dss. */
342 	if (have_dss && dss_prec == dss_prec_primary && (ret =
343 	    chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
344 	    NULL)
345 		return (ret);
346 	/* mmap. */
347 	if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
348 	    NULL)
349 		return (ret);
350 	/* "secondary" dss. */
351 	if (have_dss && dss_prec == dss_prec_secondary && (ret =
352 	    chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
353 	    NULL)
354 		return (ret);
355 
356 	/* All strategies for allocation failed. */
357 	return (NULL);
358 }
359 
360 void *
chunk_alloc_base(size_t size)361 chunk_alloc_base(size_t size)
362 {
363 	void *ret;
364 	bool zero, commit;
365 
366 	/*
367 	 * Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
368 	 * because it's critical that chunk_alloc_base() return untouched
369 	 * demand-zeroed virtual memory.
370 	 */
371 	zero = true;
372 	commit = true;
373 	ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
374 	if (ret == NULL)
375 		return (NULL);
376 	if (config_valgrind)
377 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
378 
379 	return (ret);
380 }
381 
382 void *
chunk_alloc_cache(arena_t * arena,chunk_hooks_t * chunk_hooks,void * new_addr,size_t size,size_t alignment,bool * zero,bool dalloc_node)383 chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
384     size_t size, size_t alignment, bool *zero, bool dalloc_node)
385 {
386 	void *ret;
387 	bool commit;
388 
389 	assert(size != 0);
390 	assert((size & chunksize_mask) == 0);
391 	assert(alignment != 0);
392 	assert((alignment & chunksize_mask) == 0);
393 
394 	commit = true;
395 	ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
396 	    &arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
397 	    &commit, dalloc_node);
398 	if (ret == NULL)
399 		return (NULL);
400 	assert(commit);
401 	if (config_valgrind)
402 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
403 	return (ret);
404 }
405 
406 static arena_t *
chunk_arena_get(unsigned arena_ind)407 chunk_arena_get(unsigned arena_ind)
408 {
409 	arena_t *arena;
410 
411 	arena = arena_get(arena_ind, false);
412 	/*
413 	 * The arena we're allocating on behalf of must have been initialized
414 	 * already.
415 	 */
416 	assert(arena != NULL);
417 	return (arena);
418 }
419 
420 static void *
chunk_alloc_default(void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit,unsigned arena_ind)421 chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
422     bool *commit, unsigned arena_ind)
423 {
424 	void *ret;
425 	arena_t *arena;
426 
427 	arena = chunk_arena_get(arena_ind);
428 	ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, commit,
429 	    arena->dss_prec);
430 	if (ret == NULL)
431 		return (NULL);
432 	if (config_valgrind)
433 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
434 
435 	return (ret);
436 }
437 
438 static void *
chunk_alloc_retained(arena_t * arena,chunk_hooks_t * chunk_hooks,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit)439 chunk_alloc_retained(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
440     size_t size, size_t alignment, bool *zero, bool *commit)
441 {
442 
443 	assert(size != 0);
444 	assert((size & chunksize_mask) == 0);
445 	assert(alignment != 0);
446 	assert((alignment & chunksize_mask) == 0);
447 
448 	return (chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_retained,
449 	    &arena->chunks_ad_retained, false, new_addr, size, alignment, zero,
450 	    commit, true));
451 }
452 
453 void *
chunk_alloc_wrapper(arena_t * arena,chunk_hooks_t * chunk_hooks,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit)454 chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
455     size_t size, size_t alignment, bool *zero, bool *commit)
456 {
457 	void *ret;
458 
459 	chunk_hooks_assure_initialized(arena, chunk_hooks);
460 
461 	ret = chunk_alloc_retained(arena, chunk_hooks, new_addr, size,
462 	    alignment, zero, commit);
463 	if (ret == NULL) {
464 		ret = chunk_hooks->alloc(new_addr, size, alignment, zero,
465 		    commit, arena->ind);
466 		if (ret == NULL)
467 			return (NULL);
468 	}
469 
470 	if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
471 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
472 	return (ret);
473 }
474 
475 static void
chunk_record(arena_t * arena,chunk_hooks_t * chunk_hooks,extent_tree_t * chunks_szad,extent_tree_t * chunks_ad,bool cache,void * chunk,size_t size,bool zeroed,bool committed)476 chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
477     extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
478     void *chunk, size_t size, bool zeroed, bool committed)
479 {
480 	bool unzeroed;
481 	extent_node_t *node, *prev;
482 	extent_node_t key;
483 
484 	assert(!cache || !zeroed);
485 	unzeroed = cache || !zeroed;
486 	JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
487 
488 	malloc_mutex_lock(&arena->chunks_mtx);
489 	chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
490 	extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
491 	    false, false);
492 	node = extent_tree_ad_nsearch(chunks_ad, &key);
493 	/* Try to coalesce forward. */
494 	if (node != NULL && extent_node_addr_get(node) ==
495 	    extent_node_addr_get(&key) && extent_node_committed_get(node) ==
496 	    committed && !chunk_hooks->merge(chunk, size,
497 	    extent_node_addr_get(node), extent_node_size_get(node), false,
498 	    arena->ind)) {
499 		/*
500 		 * Coalesce chunk with the following address range.  This does
501 		 * not change the position within chunks_ad, so only
502 		 * remove/insert from/into chunks_szad.
503 		 */
504 		extent_tree_szad_remove(chunks_szad, node);
505 		arena_chunk_cache_maybe_remove(arena, node, cache);
506 		extent_node_addr_set(node, chunk);
507 		extent_node_size_set(node, size + extent_node_size_get(node));
508 		extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
509 		    !unzeroed);
510 		extent_tree_szad_insert(chunks_szad, node);
511 		arena_chunk_cache_maybe_insert(arena, node, cache);
512 	} else {
513 		/* Coalescing forward failed, so insert a new node. */
514 		node = arena_node_alloc(arena);
515 		if (node == NULL) {
516 			/*
517 			 * Node allocation failed, which is an exceedingly
518 			 * unlikely failure.  Leak chunk after making sure its
519 			 * pages have already been purged, so that this is only
520 			 * a virtual memory leak.
521 			 */
522 			if (cache) {
523 				chunk_purge_wrapper(arena, chunk_hooks, chunk,
524 				    size, 0, size);
525 			}
526 			goto label_return;
527 		}
528 		extent_node_init(node, arena, chunk, size, !unzeroed,
529 		    committed);
530 		extent_tree_ad_insert(chunks_ad, node);
531 		extent_tree_szad_insert(chunks_szad, node);
532 		arena_chunk_cache_maybe_insert(arena, node, cache);
533 	}
534 
535 	/* Try to coalesce backward. */
536 	prev = extent_tree_ad_prev(chunks_ad, node);
537 	if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
538 	    extent_node_size_get(prev)) == chunk &&
539 	    extent_node_committed_get(prev) == committed &&
540 	    !chunk_hooks->merge(extent_node_addr_get(prev),
541 	    extent_node_size_get(prev), chunk, size, false, arena->ind)) {
542 		/*
543 		 * Coalesce chunk with the previous address range.  This does
544 		 * not change the position within chunks_ad, so only
545 		 * remove/insert node from/into chunks_szad.
546 		 */
547 		extent_tree_szad_remove(chunks_szad, prev);
548 		extent_tree_ad_remove(chunks_ad, prev);
549 		arena_chunk_cache_maybe_remove(arena, prev, cache);
550 		extent_tree_szad_remove(chunks_szad, node);
551 		arena_chunk_cache_maybe_remove(arena, node, cache);
552 		extent_node_addr_set(node, extent_node_addr_get(prev));
553 		extent_node_size_set(node, extent_node_size_get(prev) +
554 		    extent_node_size_get(node));
555 		extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
556 		    extent_node_zeroed_get(node));
557 		extent_tree_szad_insert(chunks_szad, node);
558 		arena_chunk_cache_maybe_insert(arena, node, cache);
559 
560 		arena_node_dalloc(arena, prev);
561 	}
562 
563 label_return:
564 	malloc_mutex_unlock(&arena->chunks_mtx);
565 }
566 
567 void
chunk_dalloc_cache(arena_t * arena,chunk_hooks_t * chunk_hooks,void * chunk,size_t size,bool committed)568 chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
569     size_t size, bool committed)
570 {
571 
572 	assert(chunk != NULL);
573 	assert(CHUNK_ADDR2BASE(chunk) == chunk);
574 	assert(size != 0);
575 	assert((size & chunksize_mask) == 0);
576 
577 	chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
578 	    &arena->chunks_ad_cached, true, chunk, size, false, committed);
579 	arena_maybe_purge(arena);
580 }
581 
582 static bool
chunk_dalloc_default(void * chunk,size_t size,bool committed,unsigned arena_ind)583 chunk_dalloc_default(void *chunk, size_t size, bool committed,
584     unsigned arena_ind)
585 {
586 
587 	if (!have_dss || !chunk_in_dss(chunk))
588 		return (chunk_dalloc_mmap(chunk, size));
589 	return (true);
590 }
591 
592 void
chunk_dalloc_wrapper(arena_t * arena,chunk_hooks_t * chunk_hooks,void * chunk,size_t size,bool zeroed,bool committed)593 chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
594     size_t size, bool zeroed, bool committed)
595 {
596 
597 	assert(chunk != NULL);
598 	assert(CHUNK_ADDR2BASE(chunk) == chunk);
599 	assert(size != 0);
600 	assert((size & chunksize_mask) == 0);
601 
602 	chunk_hooks_assure_initialized(arena, chunk_hooks);
603 	/* Try to deallocate. */
604 	if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
605 		return;
606 	/* Try to decommit; purge if that fails. */
607 	if (committed) {
608 		committed = chunk_hooks->decommit(chunk, size, 0, size,
609 		    arena->ind);
610 	}
611 	zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
612 	    arena->ind);
613 	chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
614 	    &arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
615 }
616 
617 static bool
chunk_commit_default(void * chunk,size_t size,size_t offset,size_t length,unsigned arena_ind)618 chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length,
619     unsigned arena_ind)
620 {
621 
622 	return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset),
623 	    length));
624 }
625 
626 static bool
chunk_decommit_default(void * chunk,size_t size,size_t offset,size_t length,unsigned arena_ind)627 chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
628     unsigned arena_ind)
629 {
630 
631 	return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset),
632 	    length));
633 }
634 
635 static bool
chunk_purge_default(void * chunk,size_t size,size_t offset,size_t length,unsigned arena_ind)636 chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
637     unsigned arena_ind)
638 {
639 
640 	assert(chunk != NULL);
641 	assert(CHUNK_ADDR2BASE(chunk) == chunk);
642 	assert((offset & PAGE_MASK) == 0);
643 	assert(length != 0);
644 	assert((length & PAGE_MASK) == 0);
645 
646 	return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset),
647 	    length));
648 }
649 
650 bool
chunk_purge_wrapper(arena_t * arena,chunk_hooks_t * chunk_hooks,void * chunk,size_t size,size_t offset,size_t length)651 chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
652     size_t size, size_t offset, size_t length)
653 {
654 
655 	chunk_hooks_assure_initialized(arena, chunk_hooks);
656 	return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
657 }
658 
659 static bool
chunk_split_default(void * chunk,size_t size,size_t size_a,size_t size_b,bool committed,unsigned arena_ind)660 chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
661     bool committed, unsigned arena_ind)
662 {
663 
664 	if (!maps_coalesce)
665 		return (true);
666 	return (false);
667 }
668 
669 static bool
chunk_merge_default(void * chunk_a,size_t size_a,void * chunk_b,size_t size_b,bool committed,unsigned arena_ind)670 chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
671     bool committed, unsigned arena_ind)
672 {
673 
674 	if (!maps_coalesce)
675 		return (true);
676 	if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
677 		return (true);
678 
679 	return (false);
680 }
681 
682 static rtree_node_elm_t *
chunks_rtree_node_alloc(size_t nelms)683 chunks_rtree_node_alloc(size_t nelms)
684 {
685 
686 	return ((rtree_node_elm_t *)base_alloc(nelms *
687 	    sizeof(rtree_node_elm_t)));
688 }
689 
690 bool
chunk_boot(void)691 chunk_boot(void)
692 {
693 #ifdef _WIN32
694 	SYSTEM_INFO info;
695 	GetSystemInfo(&info);
696 
697 	/*
698 	 * Verify actual page size is equal to or an integral multiple of
699 	 * configured page size.
700 	 */
701 	if (info.dwPageSize & ((1U << LG_PAGE) - 1))
702 		return (true);
703 
704 	/*
705 	 * Configure chunksize (if not set) to match granularity (usually 64K),
706 	 * so pages_map will always take fast path.
707 	 */
708 	if (!opt_lg_chunk) {
709 		opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
710 		    - 1;
711 	}
712 #else
713 	if (!opt_lg_chunk)
714 		opt_lg_chunk = LG_CHUNK_DEFAULT;
715 #endif
716 
717 	/* Set variables according to the value of opt_lg_chunk. */
718 	chunksize = (ZU(1) << opt_lg_chunk);
719 	assert(chunksize >= PAGE);
720 	chunksize_mask = chunksize - 1;
721 	chunk_npages = (chunksize >> LG_PAGE);
722 
723 	if (have_dss && chunk_dss_boot())
724 		return (true);
725 	if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
726 	    opt_lg_chunk), chunks_rtree_node_alloc, NULL))
727 		return (true);
728 
729 	return (false);
730 }
731 
732 void
chunk_prefork(void)733 chunk_prefork(void)
734 {
735 
736 	chunk_dss_prefork();
737 }
738 
739 void
chunk_postfork_parent(void)740 chunk_postfork_parent(void)
741 {
742 
743 	chunk_dss_postfork_parent();
744 }
745 
746 void
chunk_postfork_child(void)747 chunk_postfork_child(void)
748 {
749 
750 	chunk_dss_postfork_child();
751 }
752