• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define	JEMALLOC_CHUNK_MMAP_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 
6 static void *
chunk_alloc_mmap_slow(size_t size,size_t alignment,bool * zero,bool * commit)7 chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
8 {
9 	void *ret;
10 	size_t alloc_size;
11 
12 	alloc_size = size + alignment - PAGE;
13 	/* Beware size_t wrap-around. */
14 	if (alloc_size < size)
15 		return (NULL);
16 	do {
17 		void *pages;
18 		size_t leadsize;
19 		pages = pages_map(NULL, alloc_size);
20 		if (pages == NULL)
21 			return (NULL);
22 		leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
23 		    (uintptr_t)pages;
24 		ret = pages_trim(pages, alloc_size, leadsize, size);
25 	} while (ret == NULL);
26 
27 	assert(ret != NULL);
28 	*zero = true;
29 	if (!*commit)
30 		*commit = pages_decommit(ret, size);
31 	return (ret);
32 }
33 
34 void *
chunk_alloc_mmap(void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit)35 chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
36     bool *commit)
37 {
38 	void *ret;
39 	size_t offset;
40 
41 	/*
42 	 * Ideally, there would be a way to specify alignment to mmap() (like
43 	 * NetBSD has), but in the absence of such a feature, we have to work
44 	 * hard to efficiently create aligned mappings.  The reliable, but
45 	 * slow method is to create a mapping that is over-sized, then trim the
46 	 * excess.  However, that always results in one or two calls to
47 	 * pages_unmap().
48 	 *
49 	 * Optimistically try mapping precisely the right amount before falling
50 	 * back to the slow method, with the expectation that the optimistic
51 	 * approach works most of the time.
52 	 */
53 
54 	assert(alignment != 0);
55 	assert((alignment & chunksize_mask) == 0);
56 
57 	ret = pages_map(new_addr, size);
58 	if (ret == NULL || ret == new_addr)
59 		return (ret);
60 	assert(new_addr == NULL);
61 	offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
62 	if (offset != 0) {
63 		pages_unmap(ret, size);
64 		return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
65 	}
66 
67 	assert(ret != NULL);
68 	*zero = true;
69 	if (!*commit)
70 		*commit = pages_decommit(ret, size);
71 	return (ret);
72 }
73 
74 bool
chunk_dalloc_mmap(void * chunk,size_t size)75 chunk_dalloc_mmap(void *chunk, size_t size)
76 {
77 
78 	if (config_munmap)
79 		pages_unmap(chunk, size);
80 
81 	return (!config_munmap);
82 }
83