• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define	JEMALLOC_HUGE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 
6 static extent_node_t *
huge_node_get(const void * ptr)7 huge_node_get(const void *ptr)
8 {
9 	extent_node_t *node;
10 
11 	node = chunk_lookup(ptr, true);
12 	assert(!extent_node_achunk_get(node));
13 
14 	return (node);
15 }
16 
17 static bool
huge_node_set(const void * ptr,extent_node_t * node)18 huge_node_set(const void *ptr, extent_node_t *node)
19 {
20 
21 	assert(extent_node_addr_get(node) == ptr);
22 	assert(!extent_node_achunk_get(node));
23 	return (chunk_register(ptr, node));
24 }
25 
26 static void
huge_node_unset(const void * ptr,const extent_node_t * node)27 huge_node_unset(const void *ptr, const extent_node_t *node)
28 {
29 
30 	chunk_deregister(ptr, node);
31 }
32 
33 void *
huge_malloc(tsd_t * tsd,arena_t * arena,size_t size,bool zero,tcache_t * tcache)34 huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
35     tcache_t *tcache)
36 {
37 	size_t usize;
38 
39 	usize = s2u(size);
40 	if (usize == 0) {
41 		/* size_t overflow. */
42 		return (NULL);
43 	}
44 
45 	return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
46 }
47 
48 void *
huge_palloc(tsd_t * tsd,arena_t * arena,size_t usize,size_t alignment,bool zero,tcache_t * tcache)49 huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
50     bool zero, tcache_t *tcache)
51 {
52 	void *ret;
53 	extent_node_t *node;
54 	bool is_zeroed;
55 
56 	/* Allocate one or more contiguous chunks for this request. */
57 
58 	/* Allocate an extent node with which to track the chunk. */
59 	node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
60 	    CACHELINE, false, tcache, true, arena);
61 	if (node == NULL)
62 		return (NULL);
63 
64 	/*
65 	 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
66 	 * it is possible to make correct junk/zero fill decisions below.
67 	 */
68 	is_zeroed = zero;
69 	/* ANDROID change */
70 #if !defined(__LP64__)
71 	/* On 32 bit systems, using a per arena cache can exhaust
72 	 * virtual address space. Force all huge allocations to
73 	 * always take place in the first arena.
74 	 */
75 	arena = a0get();
76 #else
77 	arena = arena_choose(tsd, arena);
78 #endif
79 	/* End ANDROID change */
80 	if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
81 	    usize, alignment, &is_zeroed)) == NULL) {
82 		idalloctm(tsd, node, tcache, true);
83 		return (NULL);
84 	}
85 
86 	extent_node_init(node, arena, ret, usize, is_zeroed);
87 
88 	if (huge_node_set(ret, node)) {
89 		arena_chunk_dalloc_huge(arena, ret, usize);
90 		idalloctm(tsd, node, tcache, true);
91 		return (NULL);
92 	}
93 
94 	/* Insert node into huge. */
95 	malloc_mutex_lock(&arena->huge_mtx);
96 	ql_elm_new(node, ql_link);
97 	ql_tail_insert(&arena->huge, node, ql_link);
98 	malloc_mutex_unlock(&arena->huge_mtx);
99 
100 	if (zero || (config_fill && unlikely(opt_zero))) {
101 		if (!is_zeroed)
102 			memset(ret, 0, usize);
103 	} else if (config_fill && unlikely(opt_junk_alloc))
104 		memset(ret, 0xa5, usize);
105 
106 	return (ret);
107 }
108 
109 #ifdef JEMALLOC_JET
110 #undef huge_dalloc_junk
111 #define	huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
112 #endif
113 static void
huge_dalloc_junk(void * ptr,size_t usize)114 huge_dalloc_junk(void *ptr, size_t usize)
115 {
116 
117 	if (config_fill && have_dss && unlikely(opt_junk_free)) {
118 		/*
119 		 * Only bother junk filling if the chunk isn't about to be
120 		 * unmapped.
121 		 */
122 		if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
123 			memset(ptr, 0x5a, usize);
124 	}
125 }
126 #ifdef JEMALLOC_JET
127 #undef huge_dalloc_junk
128 #define	huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
129 huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
130 #endif
131 
132 static void
huge_ralloc_no_move_similar(void * ptr,size_t oldsize,size_t usize,size_t size,size_t extra,bool zero)133 huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
134     size_t size, size_t extra, bool zero)
135 {
136 	size_t usize_next;
137 	extent_node_t *node;
138 	arena_t *arena;
139 	chunk_purge_t *chunk_purge;
140 	bool zeroed;
141 
142 	/* Increase usize to incorporate extra. */
143 	while (usize < s2u(size+extra) && (usize_next = s2u(usize+1)) < oldsize)
144 		usize = usize_next;
145 
146 	if (oldsize == usize)
147 		return;
148 
149 	node = huge_node_get(ptr);
150 	arena = extent_node_arena_get(node);
151 
152 	malloc_mutex_lock(&arena->lock);
153 	chunk_purge = arena->chunk_purge;
154 	malloc_mutex_unlock(&arena->lock);
155 
156 	/* Fill if necessary (shrinking). */
157 	if (oldsize > usize) {
158 		size_t sdiff = oldsize - usize;
159 		zeroed = !chunk_purge_wrapper(arena, chunk_purge, ptr, usize,
160 		    sdiff);
161 		if (config_fill && unlikely(opt_junk_free)) {
162 			memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
163 			zeroed = false;
164 		}
165 	} else
166 		zeroed = true;
167 
168 	malloc_mutex_lock(&arena->huge_mtx);
169 	/* Update the size of the huge allocation. */
170 	assert(extent_node_size_get(node) != usize);
171 	extent_node_size_set(node, usize);
172 	/* Clear node's zeroed field if zeroing failed above. */
173 	extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed);
174 	malloc_mutex_unlock(&arena->huge_mtx);
175 
176 	arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
177 
178 	/* Fill if necessary (growing). */
179 	if (oldsize < usize) {
180 		if (zero || (config_fill && unlikely(opt_zero))) {
181 			if (!zeroed) {
182 				memset((void *)((uintptr_t)ptr + oldsize), 0,
183 				    usize - oldsize);
184 			}
185 		} else if (config_fill && unlikely(opt_junk_alloc)) {
186 			memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
187 			    oldsize);
188 		}
189 	}
190 }
191 
192 static void
huge_ralloc_no_move_shrink(void * ptr,size_t oldsize,size_t usize)193 huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
194 {
195 	extent_node_t *node;
196 	arena_t *arena;
197 	chunk_purge_t *chunk_purge;
198 	bool zeroed;
199 
200 	node = huge_node_get(ptr);
201 	arena = extent_node_arena_get(node);
202 
203 	malloc_mutex_lock(&arena->lock);
204 	chunk_purge = arena->chunk_purge;
205 	malloc_mutex_unlock(&arena->lock);
206 
207 	if (oldsize > usize) {
208 		size_t sdiff = oldsize - usize;
209 		zeroed = !chunk_purge_wrapper(arena, chunk_purge,
210 		    CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
211 		    CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
212 		if (config_fill && unlikely(opt_junk_free)) {
213 			huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
214 			    sdiff);
215 			zeroed = false;
216 		}
217 	} else
218 		zeroed = true;
219 
220 	malloc_mutex_lock(&arena->huge_mtx);
221 	/* Update the size of the huge allocation. */
222 	extent_node_size_set(node, usize);
223 	/* Clear node's zeroed field if zeroing failed above. */
224 	extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed);
225 	malloc_mutex_unlock(&arena->huge_mtx);
226 
227 	/* Zap the excess chunks. */
228 	arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
229 }
230 
231 static bool
huge_ralloc_no_move_expand(void * ptr,size_t oldsize,size_t size,bool zero)232 huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
233 	size_t usize;
234 	extent_node_t *node;
235 	arena_t *arena;
236 	bool is_zeroed_subchunk, is_zeroed_chunk;
237 
238 	usize = s2u(size);
239 	if (usize == 0) {
240 		/* size_t overflow. */
241 		return (true);
242 	}
243 
244 	node = huge_node_get(ptr);
245 	arena = extent_node_arena_get(node);
246 	malloc_mutex_lock(&arena->huge_mtx);
247 	is_zeroed_subchunk = extent_node_zeroed_get(node);
248 	malloc_mutex_unlock(&arena->huge_mtx);
249 
250 	/*
251 	 * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
252 	 * that it is possible to make correct junk/zero fill decisions below.
253 	 */
254 	is_zeroed_chunk = zero;
255 
256 	if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
257 	     &is_zeroed_chunk))
258 		return (true);
259 
260 	malloc_mutex_lock(&arena->huge_mtx);
261 	/* Update the size of the huge allocation. */
262 	extent_node_size_set(node, usize);
263 	malloc_mutex_unlock(&arena->huge_mtx);
264 
265 	if (zero || (config_fill && unlikely(opt_zero))) {
266 		if (!is_zeroed_subchunk) {
267 			memset((void *)((uintptr_t)ptr + oldsize), 0,
268 			    CHUNK_CEILING(oldsize) - oldsize);
269 		}
270 		if (!is_zeroed_chunk) {
271 			memset((void *)((uintptr_t)ptr +
272 			    CHUNK_CEILING(oldsize)), 0, usize -
273 			    CHUNK_CEILING(oldsize));
274 		}
275 	} else if (config_fill && unlikely(opt_junk_alloc)) {
276 		memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
277 		    oldsize);
278 	}
279 
280 	return (false);
281 }
282 
283 bool
huge_ralloc_no_move(void * ptr,size_t oldsize,size_t size,size_t extra,bool zero)284 huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
285     bool zero)
286 {
287 	size_t usize;
288 
289 	/* Both allocations must be huge to avoid a move. */
290 	if (oldsize < chunksize)
291 		return (true);
292 
293 	assert(s2u(oldsize) == oldsize);
294 	usize = s2u(size);
295 	if (usize == 0) {
296 		/* size_t overflow. */
297 		return (true);
298 	}
299 
300 	/*
301 	 * Avoid moving the allocation if the existing chunk size accommodates
302 	 * the new size.
303 	 */
304 	if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)
305 	    && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
306 		huge_ralloc_no_move_similar(ptr, oldsize, usize, size, extra,
307 		    zero);
308 		return (false);
309 	}
310 
311 	/* Shrink the allocation in-place. */
312 	if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)) {
313 		huge_ralloc_no_move_shrink(ptr, oldsize, usize);
314 		return (false);
315 	}
316 
317 	/* Attempt to expand the allocation in-place. */
318 	if (huge_ralloc_no_move_expand(ptr, oldsize, size + extra, zero)) {
319 		if (extra == 0)
320 			return (true);
321 
322 		/* Try again, this time without extra. */
323 		return (huge_ralloc_no_move_expand(ptr, oldsize, size, zero));
324 	}
325 	return (false);
326 }
327 
328 void *
huge_ralloc(tsd_t * tsd,arena_t * arena,void * ptr,size_t oldsize,size_t size,size_t extra,size_t alignment,bool zero,tcache_t * tcache)329 huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
330     size_t extra, size_t alignment, bool zero, tcache_t *tcache)
331 {
332 	void *ret;
333 	size_t copysize;
334 
335 	/* Try to avoid moving the allocation. */
336 	if (!huge_ralloc_no_move(ptr, oldsize, size, extra, zero))
337 		return (ptr);
338 
339 	/*
340 	 * size and oldsize are different enough that we need to use a
341 	 * different size class.  In that case, fall back to allocating new
342 	 * space and copying.
343 	 */
344 	if (alignment > chunksize) {
345 		ret = huge_palloc(tsd, arena, size + extra, alignment, zero,
346 		    tcache);
347 	} else
348 		ret = huge_malloc(tsd, arena, size + extra, zero, tcache);
349 
350 	if (ret == NULL) {
351 		if (extra == 0)
352 			return (NULL);
353 		/* Try again, this time without extra. */
354 		if (alignment > chunksize) {
355 			ret = huge_palloc(tsd, arena, size, alignment, zero,
356 			    tcache);
357 		} else
358 			ret = huge_malloc(tsd, arena, size, zero, tcache);
359 
360 		if (ret == NULL)
361 			return (NULL);
362 	}
363 
364 	/*
365 	 * Copy at most size bytes (not size+extra), since the caller has no
366 	 * expectation that the extra bytes will be reliably preserved.
367 	 */
368 	copysize = (size < oldsize) ? size : oldsize;
369 	memcpy(ret, ptr, copysize);
370 	isqalloc(tsd, ptr, oldsize, tcache);
371 	return (ret);
372 }
373 
374 void
huge_dalloc(tsd_t * tsd,void * ptr,tcache_t * tcache)375 huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
376 {
377 	extent_node_t *node;
378 	arena_t *arena;
379 
380 	node = huge_node_get(ptr);
381 	arena = extent_node_arena_get(node);
382 	huge_node_unset(ptr, node);
383 	malloc_mutex_lock(&arena->huge_mtx);
384 	ql_remove(&arena->huge, node, ql_link);
385 	malloc_mutex_unlock(&arena->huge_mtx);
386 
387 	huge_dalloc_junk(extent_node_addr_get(node),
388 	    extent_node_size_get(node));
389 	arena_chunk_dalloc_huge(extent_node_arena_get(node),
390 	    extent_node_addr_get(node), extent_node_size_get(node));
391 	idalloctm(tsd, node, tcache, true);
392 }
393 
394 arena_t *
huge_aalloc(const void * ptr)395 huge_aalloc(const void *ptr)
396 {
397 
398 	return (extent_node_arena_get(huge_node_get(ptr)));
399 }
400 
401 size_t
huge_salloc(const void * ptr)402 huge_salloc(const void *ptr)
403 {
404 	size_t size;
405 	extent_node_t *node;
406 	arena_t *arena;
407 
408 	node = huge_node_get(ptr);
409 	arena = extent_node_arena_get(node);
410 	malloc_mutex_lock(&arena->huge_mtx);
411 	size = extent_node_size_get(node);
412 	malloc_mutex_unlock(&arena->huge_mtx);
413 
414 	return (size);
415 }
416 
417 prof_tctx_t *
huge_prof_tctx_get(const void * ptr)418 huge_prof_tctx_get(const void *ptr)
419 {
420 	prof_tctx_t *tctx;
421 	extent_node_t *node;
422 	arena_t *arena;
423 
424 	node = huge_node_get(ptr);
425 	arena = extent_node_arena_get(node);
426 	malloc_mutex_lock(&arena->huge_mtx);
427 	tctx = extent_node_prof_tctx_get(node);
428 	malloc_mutex_unlock(&arena->huge_mtx);
429 
430 	return (tctx);
431 }
432 
433 void
huge_prof_tctx_set(const void * ptr,prof_tctx_t * tctx)434 huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
435 {
436 	extent_node_t *node;
437 	arena_t *arena;
438 
439 	node = huge_node_get(ptr);
440 	arena = extent_node_arena_get(node);
441 	malloc_mutex_lock(&arena->huge_mtx);
442 	extent_node_prof_tctx_set(node, tctx);
443 	malloc_mutex_unlock(&arena->huge_mtx);
444 }
445