1 #define JEMALLOC_PAGES_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3
4 #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
5 #include <sys/sysctl.h>
6 #endif
7
8 /******************************************************************************/
9 /* Data. */
10
11 #ifndef _WIN32
12 # define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
13 # define PAGES_PROT_DECOMMIT (PROT_NONE)
14 static int mmap_flags;
15 #endif
16 static bool os_overcommits;
17
18 /******************************************************************************/
19 /* Defines/includes needed for special android code. */
20
21 #if defined(__ANDROID__)
22 #include <sys/prctl.h>
23 #endif
24
25 /******************************************************************************/
26
27 void *
pages_map(void * addr,size_t size,bool * commit)28 pages_map(void *addr, size_t size, bool *commit)
29 {
30 void *ret;
31
32 assert(size != 0);
33
34 if (os_overcommits)
35 *commit = true;
36
37 #ifdef _WIN32
38 /*
39 * If VirtualAlloc can't allocate at the given address when one is
40 * given, it fails and returns NULL.
41 */
42 ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
43 PAGE_READWRITE);
44 #else
45 /*
46 * We don't use MAP_FIXED here, because it can cause the *replacement*
47 * of existing mappings, and we only want to create new mappings.
48 */
49 {
50 int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
51
52 ret = mmap(addr, size, prot, mmap_flags, -1, 0);
53 }
54 assert(ret != NULL);
55
56 if (ret == MAP_FAILED)
57 ret = NULL;
58 else if (addr != NULL && ret != addr) {
59 /*
60 * We succeeded in mapping memory, but not in the right place.
61 */
62 pages_unmap(ret, size);
63 ret = NULL;
64 }
65 #endif
66 #if defined(__ANDROID__)
67 if (ret != NULL) {
68 /* Name this memory as being used by libc */
69 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ret,
70 size, "libc_malloc");
71 }
72 #endif
73 assert(ret == NULL || (addr == NULL && ret != addr)
74 || (addr != NULL && ret == addr));
75 return (ret);
76 }
77
78 void
pages_unmap(void * addr,size_t size)79 pages_unmap(void *addr, size_t size)
80 {
81
82 #ifdef _WIN32
83 if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
84 #else
85 if (munmap(addr, size) == -1)
86 #endif
87 {
88 char buf[BUFERROR_BUF];
89
90 buferror(get_errno(), buf, sizeof(buf));
91 malloc_printf("<jemalloc>: Error in "
92 #ifdef _WIN32
93 "VirtualFree"
94 #else
95 "munmap"
96 #endif
97 "(): %s\n", buf);
98 if (opt_abort)
99 abort();
100 }
101 }
102
103 void *
pages_trim(void * addr,size_t alloc_size,size_t leadsize,size_t size,bool * commit)104 pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
105 bool *commit)
106 {
107 void *ret = (void *)((uintptr_t)addr + leadsize);
108
109 assert(alloc_size >= leadsize + size);
110 #ifdef _WIN32
111 {
112 void *new_addr;
113
114 pages_unmap(addr, alloc_size);
115 new_addr = pages_map(ret, size, commit);
116 if (new_addr == ret)
117 return (ret);
118 if (new_addr)
119 pages_unmap(new_addr, size);
120 return (NULL);
121 }
122 #else
123 {
124 size_t trailsize = alloc_size - leadsize - size;
125
126 if (leadsize != 0)
127 pages_unmap(addr, leadsize);
128 if (trailsize != 0)
129 pages_unmap((void *)((uintptr_t)ret + size), trailsize);
130 return (ret);
131 }
132 #endif
133 }
134
135 static bool
pages_commit_impl(void * addr,size_t size,bool commit)136 pages_commit_impl(void *addr, size_t size, bool commit)
137 {
138
139 if (os_overcommits)
140 return (true);
141
142 #ifdef _WIN32
143 return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
144 PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
145 #else
146 {
147 int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
148 void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
149 -1, 0);
150 if (result == MAP_FAILED)
151 return (true);
152 if (result != addr) {
153 /*
154 * We succeeded in mapping memory, but not in the right
155 * place.
156 */
157 pages_unmap(result, size);
158 return (true);
159 }
160 return (false);
161 }
162 #endif
163 }
164
165 bool
pages_commit(void * addr,size_t size)166 pages_commit(void *addr, size_t size)
167 {
168
169 return (pages_commit_impl(addr, size, true));
170 }
171
172 bool
pages_decommit(void * addr,size_t size)173 pages_decommit(void *addr, size_t size)
174 {
175
176 return (pages_commit_impl(addr, size, false));
177 }
178
179 bool
pages_purge(void * addr,size_t size)180 pages_purge(void *addr, size_t size)
181 {
182 bool unzeroed;
183
184 #ifdef _WIN32
185 VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
186 unzeroed = true;
187 #elif (defined(JEMALLOC_PURGE_MADVISE_FREE) || \
188 defined(JEMALLOC_PURGE_MADVISE_DONTNEED))
189 # if defined(JEMALLOC_PURGE_MADVISE_FREE)
190 # define JEMALLOC_MADV_PURGE MADV_FREE
191 # define JEMALLOC_MADV_ZEROS false
192 # elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED)
193 # define JEMALLOC_MADV_PURGE MADV_DONTNEED
194 # define JEMALLOC_MADV_ZEROS true
195 # else
196 # error No madvise(2) flag defined for purging unused dirty pages
197 # endif
198 int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
199 unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
200 # undef JEMALLOC_MADV_PURGE
201 # undef JEMALLOC_MADV_ZEROS
202 #else
203 /* Last resort no-op. */
204 unzeroed = true;
205 #endif
206 return (unzeroed);
207 }
208
209 bool
pages_huge(void * addr,size_t size)210 pages_huge(void *addr, size_t size)
211 {
212
213 assert(PAGE_ADDR2BASE(addr) == addr);
214 assert(PAGE_CEILING(size) == size);
215
216 #ifdef JEMALLOC_THP
217 return (madvise(addr, size, MADV_HUGEPAGE) != 0);
218 #else
219 return (false);
220 #endif
221 }
222
223 bool
pages_nohuge(void * addr,size_t size)224 pages_nohuge(void *addr, size_t size)
225 {
226
227 assert(PAGE_ADDR2BASE(addr) == addr);
228 assert(PAGE_CEILING(size) == size);
229
230 #ifdef JEMALLOC_THP
231 return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
232 #else
233 return (false);
234 #endif
235 }
236
237 #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
238 static bool
os_overcommits_sysctl(void)239 os_overcommits_sysctl(void)
240 {
241 int vm_overcommit;
242 size_t sz;
243
244 sz = sizeof(vm_overcommit);
245 if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0)
246 return (false); /* Error. */
247
248 return ((vm_overcommit & 0x3) == 0);
249 }
250 #endif
251
252 #ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
253 /*
254 * Use syscall(2) rather than {open,read,close}(2) when possible to avoid
255 * reentry during bootstrapping if another library has interposed system call
256 * wrappers.
257 */
258 static bool
os_overcommits_proc(void)259 os_overcommits_proc(void)
260 {
261 int fd;
262 char buf[1];
263 ssize_t nread;
264
265 #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
266 fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
267 #else
268 fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
269 #endif
270 if (fd == -1)
271 return (false); /* Error. */
272
273 #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
274 nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
275 #else
276 nread = read(fd, &buf, sizeof(buf));
277 #endif
278
279 #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
280 syscall(SYS_close, fd);
281 #else
282 close(fd);
283 #endif
284
285 if (nread < 1)
286 return (false); /* Error. */
287 /*
288 * /proc/sys/vm/overcommit_memory meanings:
289 * 0: Heuristic overcommit.
290 * 1: Always overcommit.
291 * 2: Never overcommit.
292 */
293 return (buf[0] == '0' || buf[0] == '1');
294 }
295 #endif
296
297 void
pages_boot(void)298 pages_boot(void)
299 {
300
301 #ifndef _WIN32
302 mmap_flags = MAP_PRIVATE | MAP_ANON;
303 #endif
304
305 #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
306 os_overcommits = os_overcommits_sysctl();
307 #elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
308 os_overcommits = os_overcommits_proc();
309 # ifdef MAP_NORESERVE
310 if (os_overcommits)
311 mmap_flags |= MAP_NORESERVE;
312 # endif
313 #else
314 os_overcommits = false;
315 #endif
316 }
317