• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define	JEMALLOC_HUGE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 
6 static extent_node_t *
huge_node_get(const void * ptr)7 huge_node_get(const void *ptr)
8 {
9 	extent_node_t *node;
10 
11 	node = chunk_lookup(ptr, true);
12 	assert(!extent_node_achunk_get(node));
13 
14 	return (node);
15 }
16 
17 static bool
huge_node_set(const void * ptr,extent_node_t * node)18 huge_node_set(const void *ptr, extent_node_t *node)
19 {
20 
21 	assert(extent_node_addr_get(node) == ptr);
22 	assert(!extent_node_achunk_get(node));
23 	return (chunk_register(ptr, node));
24 }
25 
26 static void
huge_node_unset(const void * ptr,const extent_node_t * node)27 huge_node_unset(const void *ptr, const extent_node_t *node)
28 {
29 
30 	chunk_deregister(ptr, node);
31 }
32 
33 void *
huge_malloc(tsd_t * tsd,arena_t * arena,size_t usize,bool zero,tcache_t * tcache)34 huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero,
35     tcache_t *tcache)
36 {
37 
38 	assert(usize == s2u(usize));
39 
40 	return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
41 }
42 
43 void *
huge_palloc(tsd_t * tsd,arena_t * arena,size_t usize,size_t alignment,bool zero,tcache_t * tcache)44 huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
45     bool zero, tcache_t *tcache)
46 {
47 	void *ret;
48 	size_t ausize;
49 	extent_node_t *node;
50 	bool is_zeroed;
51 
52 	/* Allocate one or more contiguous chunks for this request. */
53 
54 	ausize = sa2u(usize, alignment);
55 	if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
56 		return (NULL);
57 	assert(ausize >= chunksize);
58 
59 	/* Allocate an extent node with which to track the chunk. */
60 	node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
61 	    CACHELINE, false, tcache, true, arena);
62 	if (node == NULL)
63 		return (NULL);
64 
65 	/*
66 	 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
67 	 * it is possible to make correct junk/zero fill decisions below.
68 	 */
69 	is_zeroed = zero;
70 	/* ANDROID change */
71 #if !defined(__LP64__)
72 	/* On 32 bit systems, using a per arena cache can exhaust
73 	 * virtual address space. Force all huge allocations to
74 	 * always take place in the first arena.
75 	 */
76 	extern arena_t *a0get(void);
77 	arena = a0get();
78 #else
79 	arena = arena_choose(tsd, arena);
80 #endif
81 	/* End ANDROID change */
82 	if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
83 	    usize, alignment, &is_zeroed)) == NULL) {
84 		idalloctm(tsd, node, tcache, true, true);
85 		return (NULL);
86 	}
87 
88 	extent_node_init(node, arena, ret, usize, is_zeroed, true);
89 
90 	if (huge_node_set(ret, node)) {
91 		arena_chunk_dalloc_huge(arena, ret, usize);
92 		idalloctm(tsd, node, tcache, true, true);
93 		return (NULL);
94 	}
95 
96 	/* Insert node into huge. */
97 	malloc_mutex_lock(&arena->huge_mtx);
98 	ql_elm_new(node, ql_link);
99 	ql_tail_insert(&arena->huge, node, ql_link);
100 	malloc_mutex_unlock(&arena->huge_mtx);
101 
102 	if (zero || (config_fill && unlikely(opt_zero))) {
103 		if (!is_zeroed)
104 			memset(ret, 0, usize);
105 	} else if (config_fill && unlikely(opt_junk_alloc))
106 		memset(ret, 0xa5, usize);
107 
108 	arena_decay_tick(tsd, arena);
109 	return (ret);
110 }
111 
112 #ifdef JEMALLOC_JET
113 #undef huge_dalloc_junk
114 #define	huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
115 #endif
116 static void
huge_dalloc_junk(void * ptr,size_t usize)117 huge_dalloc_junk(void *ptr, size_t usize)
118 {
119 
120 	if (config_fill && have_dss && unlikely(opt_junk_free)) {
121 		/*
122 		 * Only bother junk filling if the chunk isn't about to be
123 		 * unmapped.
124 		 */
125 		if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
126 			memset(ptr, 0x5a, usize);
127 	}
128 }
129 #ifdef JEMALLOC_JET
130 #undef huge_dalloc_junk
131 #define	huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
132 huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
133 #endif
134 
135 static void
huge_ralloc_no_move_similar(void * ptr,size_t oldsize,size_t usize_min,size_t usize_max,bool zero)136 huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
137     size_t usize_max, bool zero)
138 {
139 	size_t usize, usize_next;
140 	extent_node_t *node;
141 	arena_t *arena;
142 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
143 	bool pre_zeroed, post_zeroed;
144 
145 	/* Increase usize to incorporate extra. */
146 	for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
147 	    <= oldsize; usize = usize_next)
148 		; /* Do nothing. */
149 
150 	if (oldsize == usize)
151 		return;
152 
153 	node = huge_node_get(ptr);
154 	arena = extent_node_arena_get(node);
155 	pre_zeroed = extent_node_zeroed_get(node);
156 
157 	/* Fill if necessary (shrinking). */
158 	if (oldsize > usize) {
159 		size_t sdiff = oldsize - usize;
160 		if (config_fill && unlikely(opt_junk_free)) {
161 			memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
162 			post_zeroed = false;
163 		} else {
164 			post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
165 			    ptr, CHUNK_CEILING(oldsize), usize, sdiff);
166 		}
167 	} else
168 		post_zeroed = pre_zeroed;
169 
170 	malloc_mutex_lock(&arena->huge_mtx);
171 	/* Update the size of the huge allocation. */
172 	assert(extent_node_size_get(node) != usize);
173 	extent_node_size_set(node, usize);
174 	/* Update zeroed. */
175 	extent_node_zeroed_set(node, post_zeroed);
176 	malloc_mutex_unlock(&arena->huge_mtx);
177 
178 	arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
179 
180 	/* Fill if necessary (growing). */
181 	if (oldsize < usize) {
182 		if (zero || (config_fill && unlikely(opt_zero))) {
183 			if (!pre_zeroed) {
184 				memset((void *)((uintptr_t)ptr + oldsize), 0,
185 				    usize - oldsize);
186 			}
187 		} else if (config_fill && unlikely(opt_junk_alloc)) {
188 			memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
189 			    oldsize);
190 		}
191 	}
192 }
193 
194 static bool
huge_ralloc_no_move_shrink(void * ptr,size_t oldsize,size_t usize)195 huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
196 {
197 	extent_node_t *node;
198 	arena_t *arena;
199 	chunk_hooks_t chunk_hooks;
200 	size_t cdiff;
201 	bool pre_zeroed, post_zeroed;
202 
203 	node = huge_node_get(ptr);
204 	arena = extent_node_arena_get(node);
205 	pre_zeroed = extent_node_zeroed_get(node);
206 	chunk_hooks = chunk_hooks_get(arena);
207 
208 	assert(oldsize > usize);
209 
210 	/* Split excess chunks. */
211 	cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
212 	if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
213 	    CHUNK_CEILING(usize), cdiff, true, arena->ind))
214 		return (true);
215 
216 	if (oldsize > usize) {
217 		size_t sdiff = oldsize - usize;
218 		if (config_fill && unlikely(opt_junk_free)) {
219 			huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
220 			    sdiff);
221 			post_zeroed = false;
222 		} else {
223 			post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
224 			    CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
225 			    CHUNK_CEILING(oldsize),
226 			    CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
227 		}
228 	} else
229 		post_zeroed = pre_zeroed;
230 
231 	malloc_mutex_lock(&arena->huge_mtx);
232 	/* Update the size of the huge allocation. */
233 	extent_node_size_set(node, usize);
234 	/* Update zeroed. */
235 	extent_node_zeroed_set(node, post_zeroed);
236 	malloc_mutex_unlock(&arena->huge_mtx);
237 
238 	/* Zap the excess chunks. */
239 	arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
240 
241 	return (false);
242 }
243 
244 static bool
huge_ralloc_no_move_expand(void * ptr,size_t oldsize,size_t usize,bool zero)245 huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
246 	extent_node_t *node;
247 	arena_t *arena;
248 	bool is_zeroed_subchunk, is_zeroed_chunk;
249 
250 	node = huge_node_get(ptr);
251 	arena = extent_node_arena_get(node);
252 	malloc_mutex_lock(&arena->huge_mtx);
253 	is_zeroed_subchunk = extent_node_zeroed_get(node);
254 	malloc_mutex_unlock(&arena->huge_mtx);
255 
256 	/*
257 	 * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
258 	 * that it is possible to make correct junk/zero fill decisions below.
259 	 */
260 	is_zeroed_chunk = zero;
261 
262 	if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
263 	     &is_zeroed_chunk))
264 		return (true);
265 
266 	malloc_mutex_lock(&arena->huge_mtx);
267 	/* Update the size of the huge allocation. */
268 	extent_node_size_set(node, usize);
269 	malloc_mutex_unlock(&arena->huge_mtx);
270 
271 	if (zero || (config_fill && unlikely(opt_zero))) {
272 		if (!is_zeroed_subchunk) {
273 			memset((void *)((uintptr_t)ptr + oldsize), 0,
274 			    CHUNK_CEILING(oldsize) - oldsize);
275 		}
276 		if (!is_zeroed_chunk) {
277 			memset((void *)((uintptr_t)ptr +
278 			    CHUNK_CEILING(oldsize)), 0, usize -
279 			    CHUNK_CEILING(oldsize));
280 		}
281 	} else if (config_fill && unlikely(opt_junk_alloc)) {
282 		memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
283 		    oldsize);
284 	}
285 
286 	return (false);
287 }
288 
289 bool
huge_ralloc_no_move(tsd_t * tsd,void * ptr,size_t oldsize,size_t usize_min,size_t usize_max,bool zero)290 huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
291     size_t usize_max, bool zero)
292 {
293 
294 	assert(s2u(oldsize) == oldsize);
295 	/* The following should have been caught by callers. */
296 	assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
297 
298 	/* Both allocations must be huge to avoid a move. */
299 	if (oldsize < chunksize || usize_max < chunksize)
300 		return (true);
301 
302 	if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
303 		/* Attempt to expand the allocation in-place. */
304 		if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max,
305 		    zero)) {
306 			arena_decay_tick(tsd, huge_aalloc(ptr));
307 			return (false);
308 		}
309 		/* Try again, this time with usize_min. */
310 		if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
311 		    CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
312 		    oldsize, usize_min, zero)) {
313 			arena_decay_tick(tsd, huge_aalloc(ptr));
314 			return (false);
315 		}
316 	}
317 
318 	/*
319 	 * Avoid moving the allocation if the existing chunk size accommodates
320 	 * the new size.
321 	 */
322 	if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
323 	    && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
324 		huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
325 		    zero);
326 		arena_decay_tick(tsd, huge_aalloc(ptr));
327 		return (false);
328 	}
329 
330 	/* Attempt to shrink the allocation in-place. */
331 	if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
332 		if (!huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)) {
333 			arena_decay_tick(tsd, huge_aalloc(ptr));
334 			return (false);
335 		}
336 	}
337 	return (true);
338 }
339 
340 static void *
huge_ralloc_move_helper(tsd_t * tsd,arena_t * arena,size_t usize,size_t alignment,bool zero,tcache_t * tcache)341 huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
342     size_t alignment, bool zero, tcache_t *tcache)
343 {
344 
345 	if (alignment <= chunksize)
346 		return (huge_malloc(tsd, arena, usize, zero, tcache));
347 	return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
348 }
349 
350 void *
huge_ralloc(tsd_t * tsd,arena_t * arena,void * ptr,size_t oldsize,size_t usize,size_t alignment,bool zero,tcache_t * tcache)351 huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
352     size_t alignment, bool zero, tcache_t *tcache)
353 {
354 	void *ret;
355 	size_t copysize;
356 
357 	/* The following should have been caught by callers. */
358 	assert(usize > 0 && usize <= HUGE_MAXCLASS);
359 
360 	/* Try to avoid moving the allocation. */
361 	if (!huge_ralloc_no_move(tsd, ptr, oldsize, usize, usize, zero))
362 		return (ptr);
363 
364 	/*
365 	 * usize and oldsize are different enough that we need to use a
366 	 * different size class.  In that case, fall back to allocating new
367 	 * space and copying.
368 	 */
369 	ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
370 	    tcache);
371 	if (ret == NULL)
372 		return (NULL);
373 
374 	copysize = (usize < oldsize) ? usize : oldsize;
375 	memcpy(ret, ptr, copysize);
376 	isqalloc(tsd, ptr, oldsize, tcache);
377 	return (ret);
378 }
379 
380 void
huge_dalloc(tsd_t * tsd,void * ptr,tcache_t * tcache)381 huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
382 {
383 	extent_node_t *node;
384 	arena_t *arena;
385 
386 	node = huge_node_get(ptr);
387 	arena = extent_node_arena_get(node);
388 	huge_node_unset(ptr, node);
389 	malloc_mutex_lock(&arena->huge_mtx);
390 	ql_remove(&arena->huge, node, ql_link);
391 	malloc_mutex_unlock(&arena->huge_mtx);
392 
393 	huge_dalloc_junk(extent_node_addr_get(node),
394 	    extent_node_size_get(node));
395 	arena_chunk_dalloc_huge(extent_node_arena_get(node),
396 	    extent_node_addr_get(node), extent_node_size_get(node));
397 	idalloctm(tsd, node, tcache, true, true);
398 
399 	arena_decay_tick(tsd, arena);
400 }
401 
402 arena_t *
huge_aalloc(const void * ptr)403 huge_aalloc(const void *ptr)
404 {
405 
406 	return (extent_node_arena_get(huge_node_get(ptr)));
407 }
408 
409 size_t
huge_salloc(const void * ptr)410 huge_salloc(const void *ptr)
411 {
412 	size_t size;
413 	extent_node_t *node;
414 	arena_t *arena;
415 
416 	node = huge_node_get(ptr);
417 	arena = extent_node_arena_get(node);
418 	malloc_mutex_lock(&arena->huge_mtx);
419 	size = extent_node_size_get(node);
420 	malloc_mutex_unlock(&arena->huge_mtx);
421 
422 	return (size);
423 }
424 
425 prof_tctx_t *
huge_prof_tctx_get(const void * ptr)426 huge_prof_tctx_get(const void *ptr)
427 {
428 	prof_tctx_t *tctx;
429 	extent_node_t *node;
430 	arena_t *arena;
431 
432 	node = huge_node_get(ptr);
433 	arena = extent_node_arena_get(node);
434 	malloc_mutex_lock(&arena->huge_mtx);
435 	tctx = extent_node_prof_tctx_get(node);
436 	malloc_mutex_unlock(&arena->huge_mtx);
437 
438 	return (tctx);
439 }
440 
441 void
huge_prof_tctx_set(const void * ptr,prof_tctx_t * tctx)442 huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
443 {
444 	extent_node_t *node;
445 	arena_t *arena;
446 
447 	node = huge_node_get(ptr);
448 	arena = extent_node_arena_get(node);
449 	malloc_mutex_lock(&arena->huge_mtx);
450 	extent_node_prof_tctx_set(node, tctx);
451 	malloc_mutex_unlock(&arena->huge_mtx);
452 }
453 
454 void
huge_prof_tctx_reset(const void * ptr)455 huge_prof_tctx_reset(const void *ptr)
456 {
457 
458 	huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
459 }
460