• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 Google Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7  *
8  *     * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  *     * Redistributions in binary form must reproduce the above
11  * copyright notice, this list of conditions and the following disclaimer
12  * in the documentation and/or other materials provided with the
13  * distribution.
14  *     * Neither the name of Google Inc. nor the names of its
15  * contributors may be used to endorse or promote products derived from
16  * this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include "config.h"
32 #include "wtf/PageAllocator.h"
33 
34 #include "wtf/Assertions.h"
35 #include "wtf/ProcessID.h"
36 #include "wtf/SpinLock.h"
37 
38 #include <limits.h>
39 
40 #if OS(POSIX)
41 
42 #include <sys/mman.h>
43 
44 #ifndef MADV_FREE
45 #define MADV_FREE MADV_DONTNEED
46 #endif
47 
48 #ifndef MAP_ANONYMOUS
49 #define MAP_ANONYMOUS MAP_ANON
50 #endif
51 
52 #elif OS(WIN)
53 
54 #include <windows.h>
55 
56 #else
57 #error Unknown OS
58 #endif // OS(POSIX)
59 
60 namespace WTF {
61 
62 // This simple internal function wraps the OS-specific page allocation call so
63 // that it behaves consistently: the address is a hint and if it cannot be used,
64 // the allocation will be placed elsewhere.
systemAllocPages(void * addr,size_t len)65 static void* systemAllocPages(void* addr, size_t len)
66 {
67     ASSERT(!(len & kPageAllocationGranularityOffsetMask));
68     ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffsetMask));
69     void* ret;
70 #if OS(WIN)
71     ret = VirtualAlloc(addr, len, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
72     if (!ret)
73         ret = VirtualAlloc(0, len, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
74 #else
75     ret = mmap(addr, len, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
76     if (ret == MAP_FAILED)
77         ret = 0;
78 #endif
79     return ret;
80 }
81 
trimMapping(void * baseAddr,size_t baseLen,void * trimAddr,size_t trimLen)82 static bool trimMapping(void* baseAddr, size_t baseLen, void* trimAddr, size_t trimLen)
83 {
84 #if OS(WIN)
85     return false;
86 #else
87     char* basePtr = static_cast<char*>(baseAddr);
88     char* trimPtr = static_cast<char*>(trimAddr);
89     ASSERT(trimPtr >= basePtr);
90     ASSERT(trimPtr + trimLen <= basePtr + baseLen);
91     size_t preLen = trimPtr - basePtr;
92     if (preLen) {
93         int ret = munmap(basePtr, preLen);
94         RELEASE_ASSERT(!ret);
95     }
96     size_t postLen = (basePtr + baseLen) - (trimPtr + trimLen);
97     if (postLen) {
98         int ret = munmap(trimPtr + trimLen, postLen);
99         RELEASE_ASSERT(!ret);
100     }
101     return true;
102 #endif
103 }
104 
105 // This is the same PRNG as used by tcmalloc for mapping address randomness;
106 // see http://burtleburtle.net/bob/rand/smallprng.html
107 struct ranctx {
108     int lock;
109     bool initialized;
110     uint32_t a;
111     uint32_t b;
112     uint32_t c;
113     uint32_t d;
114 };
115 
116 #define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
117 
ranvalInternal(ranctx * x)118 uint32_t ranvalInternal(ranctx* x)
119 {
120     uint32_t e = x->a - rot(x->b, 27);
121     x->a = x->b ^ rot(x->c, 17);
122     x->b = x->c + x->d;
123     x->c = x->d + e;
124     x->d = e + x->a;
125     return x->d;
126 }
127 
128 #undef rot
129 
ranval(ranctx * x)130 uint32_t ranval(ranctx* x)
131 {
132     spinLockLock(&x->lock);
133     if (UNLIKELY(!x->initialized)) {
134         x->initialized = true;
135         char c;
136         uint32_t seed = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(&c));
137         seed ^= static_cast<uint32_t>(getCurrentProcessID());
138         x->a = 0xf1ea5eed;
139         x->b = x->c = x->d = seed;
140         for (int i = 0; i < 20; ++i) {
141             (void) ranvalInternal(x);
142         }
143     }
144     uint32_t ret = ranvalInternal(x);
145     spinLockUnlock(&x->lock);
146     return ret;
147 }
148 
149 static struct ranctx s_ranctx;
150 
151 // This internal function calculates a random preferred mapping address.
152 // It is used when the client of allocPages() passes null as the address.
153 // In calculating an address, we balance good ASLR against not fragmenting the
154 // address space too badly.
getRandomPageBase()155 static void* getRandomPageBase()
156 {
157     uintptr_t random;
158     random = static_cast<uintptr_t>(ranval(&s_ranctx));
159 #if CPU(X86_64)
160     random <<= 32UL;
161     random |= static_cast<uintptr_t>(ranval(&s_ranctx));
162     // This address mask gives a low liklihood of address space collisions.
163     // We handle the situation gracefully if there is a collision.
164 #if OS(WIN)
165     // 64-bit Windows has a bizarrely small 8TB user address space.
166     // Allocates in the 1-5TB region.
167     random &= 0x3ffffffffffUL;
168     random += 0x10000000000UL;
169 #else
170     // Linux and OS X support the full 47-bit user space of x64 processors.
171     random &= 0x3fffffffffffUL;
172 #endif
173 #elif CPU(ARM64)
174     // ARM64 on Linux has 39-bit user space.
175     random &= 0x3fffffffffUL;
176     random += 0x1000000000UL;
177 #else // !CPU(X86_64) && !CPU(ARM64)
178     // This is a good range on Windows, Linux and Mac.
179     // Allocates in the 0.5-1.5GB region.
180     random &= 0x3fffffff;
181     random += 0x20000000;
182 #endif // CPU(X86_64)
183     random &= kPageAllocationGranularityBaseMask;
184     return reinterpret_cast<void*>(random);
185 }
186 
allocPages(void * addr,size_t len,size_t align)187 void* allocPages(void* addr, size_t len, size_t align)
188 {
189     ASSERT(len >= kPageAllocationGranularity);
190     ASSERT(!(len & kPageAllocationGranularityOffsetMask));
191     ASSERT(align >= kPageAllocationGranularity);
192     ASSERT(!(align & kPageAllocationGranularityOffsetMask));
193     ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffsetMask));
194     size_t alignOffsetMask = align - 1;
195     size_t alignBaseMask = ~alignOffsetMask;
196     ASSERT(!(reinterpret_cast<uintptr_t>(addr) & alignOffsetMask));
197     // If the client passed null as the address, choose a good one.
198     if (!addr) {
199         addr = getRandomPageBase();
200         addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & alignBaseMask);
201     }
202 
203     // The common case, which is also the least work we can do, is that the
204     // address and length are suitable. Just try it.
205     void* ret = systemAllocPages(addr, len);
206     // If the alignment is to our liking, we're done.
207     if (!ret || !(reinterpret_cast<uintptr_t>(ret) & alignOffsetMask))
208         return ret;
209 
210     // Annoying. Unmap and map a larger range to be sure to succeed on the
211     // second, slower attempt.
212     freePages(ret, len);
213 
214     size_t tryLen = len + (align - kPageAllocationGranularity);
215     RELEASE_ASSERT(tryLen > len);
216 
217     // We loop to cater for the unlikely case where another thread maps on top
218     // of the aligned location we choose.
219     int count = 0;
220     while (count++ < 100) {
221         ret = systemAllocPages(addr, tryLen);
222         if (!ret)
223             return 0;
224         // We can now try and trim out a subset of the mapping.
225         addr = reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(ret) + alignOffsetMask) & alignBaseMask);
226 
227         // On POSIX systems, we can trim the oversized mapping to fit exactly.
228         // This will always work on POSIX systems.
229         if (trimMapping(ret, tryLen, addr, len))
230             return addr;
231 
232         // On Windows, you can't trim an existing mapping so we unmap and remap
233         // a subset. We used to do for all platforms, but OSX 10.8 has a
234         // broken mmap() that ignores address hints for valid, unused addresses.
235         freePages(ret, tryLen);
236         ret = systemAllocPages(addr, len);
237         if (ret == addr || !ret)
238             return ret;
239 
240         // Unlikely race / collision. Do the simple thing and just start again.
241         freePages(ret, len);
242         addr = getRandomPageBase();
243         addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & alignBaseMask);
244     }
245     IMMEDIATE_CRASH();
246     return 0;
247 }
248 
freePages(void * addr,size_t len)249 void freePages(void* addr, size_t len)
250 {
251     ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffsetMask));
252     ASSERT(!(len & kPageAllocationGranularityOffsetMask));
253 #if OS(POSIX)
254     int ret = munmap(addr, len);
255     RELEASE_ASSERT(!ret);
256 #else
257     BOOL ret = VirtualFree(addr, 0, MEM_RELEASE);
258     RELEASE_ASSERT(ret);
259 #endif
260 }
261 
setSystemPagesInaccessible(void * addr,size_t len)262 void setSystemPagesInaccessible(void* addr, size_t len)
263 {
264     ASSERT(!(len & kSystemPageOffsetMask));
265 #if OS(POSIX)
266     int ret = mprotect(addr, len, PROT_NONE);
267     RELEASE_ASSERT(!ret);
268 #else
269     BOOL ret = VirtualFree(addr, len, MEM_DECOMMIT);
270     RELEASE_ASSERT(ret);
271 #endif
272 }
273 
setSystemPagesAccessible(void * addr,size_t len)274 void setSystemPagesAccessible(void* addr, size_t len)
275 {
276     ASSERT(!(len & kSystemPageOffsetMask));
277 #if OS(POSIX)
278     int ret = mprotect(addr, len, PROT_READ | PROT_WRITE);
279     RELEASE_ASSERT(!ret);
280 #else
281     void* ret = VirtualAlloc(addr, len, MEM_COMMIT, PAGE_READWRITE);
282     RELEASE_ASSERT(ret);
283 #endif
284 }
285 
decommitSystemPages(void * addr,size_t len)286 void decommitSystemPages(void* addr, size_t len)
287 {
288     ASSERT(!(len & kSystemPageOffsetMask));
289 #if OS(POSIX)
290     int ret = madvise(addr, len, MADV_FREE);
291     RELEASE_ASSERT(!ret);
292 #else
293     setSystemPagesInaccessible(addr, len);
294 #endif
295 }
296 
recommitSystemPages(void * addr,size_t len)297 void recommitSystemPages(void* addr, size_t len)
298 {
299     ASSERT(!(len & kSystemPageOffsetMask));
300 #if OS(POSIX)
301     (void) addr;
302 #else
303     setSystemPagesAccessible(addr, len);
304 #endif
305 }
306 
307 } // namespace WTF
308 
309