• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 Google Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7  *
8  *     * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  *     * Redistributions in binary form must reproduce the above
11  * copyright notice, this list of conditions and the following disclaimer
12  * in the documentation and/or other materials provided with the
13  * distribution.
14  *     * Neither the name of Google Inc. nor the names of its
15  * contributors may be used to endorse or promote products derived from
16  * this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include "config.h"
32 #include "wtf/PageAllocator.h"
33 
34 #include "wtf/Assertions.h"
35 #include "wtf/ProcessID.h"
36 #include "wtf/SpinLock.h"
37 
38 #include <limits.h>
39 
40 #if OS(POSIX)
41 
42 #include <sys/mman.h>
43 
44 #ifndef MADV_FREE
45 #define MADV_FREE MADV_DONTNEED
46 #endif
47 
48 #ifndef MAP_ANONYMOUS
49 #define MAP_ANONYMOUS MAP_ANON
50 #endif
51 
52 #elif OS(WIN)
53 
54 #include <windows.h>
55 
56 #else
57 #error Unknown OS
58 #endif // OS(POSIX)
59 
60 namespace WTF {
61 
62 // This simple internal function wraps the OS-specific page allocation call so
63 // that it behaves consistently: the address is a hint and if it cannot be used,
64 // the allocation will be placed elsewhere.
systemAllocPages(void * addr,size_t len)65 static void* systemAllocPages(void* addr, size_t len)
66 {
67     ASSERT(!(len & kPageAllocationGranularityOffsetMask));
68     ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffsetMask));
69     void* ret;
70 #if OS(WIN)
71     ret = VirtualAlloc(addr, len, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
72     if (!ret)
73         ret = VirtualAlloc(0, len, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
74 #else
75     ret = mmap(addr, len, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
76     RELEASE_ASSERT(ret != MAP_FAILED);
77 #endif
78     RELEASE_ASSERT(ret);
79     return ret;
80 }
81 
trimMapping(void * baseAddr,size_t baseLen,void * trimAddr,size_t trimLen)82 static bool trimMapping(void* baseAddr, size_t baseLen, void* trimAddr, size_t trimLen)
83 {
84 #if OS(WIN)
85     return false;
86 #else
87     char* basePtr = static_cast<char*>(baseAddr);
88     char* trimPtr = static_cast<char*>(trimAddr);
89     ASSERT(trimPtr >= basePtr);
90     ASSERT(trimPtr + trimLen <= basePtr + baseLen);
91     size_t preLen = trimPtr - basePtr;
92     if (preLen) {
93         int ret = munmap(basePtr, preLen);
94         RELEASE_ASSERT(!ret);
95     }
96     size_t postLen = (basePtr + baseLen) - (trimPtr + trimLen);
97     if (postLen) {
98         int ret = munmap(trimPtr + trimLen, postLen);
99         RELEASE_ASSERT(!ret);
100     }
101     return true;
102 #endif
103 }
104 
105 // This is the same PRNG as used by tcmalloc for mapping address randomness;
106 // see http://burtleburtle.net/bob/rand/smallprng.html
107 struct ranctx {
108     int lock;
109     bool initialized;
110     uint32_t a;
111     uint32_t b;
112     uint32_t c;
113     uint32_t d;
114 };
115 
116 #define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
117 
ranvalInternal(ranctx * x)118 uint32_t ranvalInternal(ranctx* x)
119 {
120     uint32_t e = x->a - rot(x->b, 27);
121     x->a = x->b ^ rot(x->c, 17);
122     x->b = x->c + x->d;
123     x->c = x->d + e;
124     x->d = e + x->a;
125     return x->d;
126 }
127 
128 #undef rot
129 
ranval(ranctx * x)130 uint32_t ranval(ranctx* x)
131 {
132     spinLockLock(&x->lock);
133     if (UNLIKELY(!x->initialized)) {
134         x->initialized = true;
135         char c;
136         uint32_t seed = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(&c));
137         seed ^= static_cast<uint32_t>(getCurrentProcessID());
138         x->a = 0xf1ea5eed;
139         x->b = x->c = x->d = seed;
140         for (int i = 0; i < 20; ++i) {
141             (void) ranvalInternal(x);
142         }
143     }
144     uint32_t ret = ranvalInternal(x);
145     spinLockUnlock(&x->lock);
146     return ret;
147 }
148 
149 static struct ranctx s_ranctx;
150 
151 // This internal function calculates a random preferred mapping address.
152 // It is used when the client of allocPages() passes null as the address.
153 // In calculating an address, we balance good ASLR against not fragmenting the
154 // address space too badly.
getRandomPageBase()155 static void* getRandomPageBase()
156 {
157     uintptr_t random;
158     random = static_cast<uintptr_t>(ranval(&s_ranctx));
159 #if CPU(X86_64)
160     random <<= 32UL;
161     random |= static_cast<uintptr_t>(ranval(&s_ranctx));
162     // This address mask gives a low liklihood of address space collisions.
163     // We handle the situation gracefully if there is a collision.
164 #if OS(WIN)
165     // 64-bit Windows has a bizarrely small 8TB user address space.
166     // Allocates in the 1-5TB region.
167     random &= 0x3ffffffffffUL;
168     random += 0x10000000000UL;
169 #else
170     // Linux and OS X support the full 47-bit user space of x64 processors.
171     random &= 0x3fffffffffffUL;
172 #endif
173 #else // !CPU(X86_64)
174     // This is a good range on Windows, Linux and Mac.
175     // Allocates in the 0.5-1.5GB region.
176     random &= 0x3fffffff;
177     random += 0x20000000;
178 #endif // CPU(X86_64)
179     random &= kPageAllocationGranularityBaseMask;
180     return reinterpret_cast<void*>(random);
181 }
182 
allocPages(void * addr,size_t len,size_t align)183 void* allocPages(void* addr, size_t len, size_t align)
184 {
185     RELEASE_ASSERT(len < INT_MAX - align);
186     ASSERT(len >= kPageAllocationGranularity);
187     ASSERT(!(len & kPageAllocationGranularityOffsetMask));
188     ASSERT(align >= kPageAllocationGranularity);
189     ASSERT(!(align & kPageAllocationGranularityOffsetMask));
190     ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffsetMask));
191     size_t alignOffsetMask = align - 1;
192     size_t alignBaseMask = ~alignOffsetMask;
193     ASSERT(!(reinterpret_cast<uintptr_t>(addr) & alignOffsetMask));
194     // If the client passed null as the address, choose a good one.
195     if (!addr) {
196         addr = getRandomPageBase();
197         addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & alignBaseMask);
198     }
199 
200     // The common case, which is also the least work we can do, is that the
201     // address and length are suitable. Just try it.
202     void* ret = systemAllocPages(addr, len);
203     // If the alignment is to our liking, we're done.
204     if (!(reinterpret_cast<uintptr_t>(ret) & alignOffsetMask))
205         return ret;
206 
207     // Annoying. Unmap and map a larger range to be sure to succeed on the
208     // second, slower attempt.
209     freePages(ret, len);
210 
211     size_t tryLen = len + (align - kPageAllocationGranularity);
212 
213     // We loop to cater for the unlikely case where another thread maps on top
214     // of the aligned location we choose.
215     int count = 0;
216     while (count++ < 100) {
217         ret = systemAllocPages(addr, tryLen);
218         // We can now try and trim out a subset of the mapping.
219         addr = reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(ret) + alignOffsetMask) & alignBaseMask);
220 
221         // On POSIX systems, we can trim the oversized mapping to fit exactly.
222         // This will always work on POSIX systems.
223         if (trimMapping(ret, tryLen, addr, len))
224             return addr;
225 
226         // On Windows, you can't trim an existing mapping so we unmap and remap
227         // a subset. We used to do for all platforms, but OSX 10.8 has a
228         // broken mmap() that ignores address hints for valid, unused addresses.
229         freePages(ret, tryLen);
230         ret = systemAllocPages(addr, len);
231         if (ret == addr)
232             return ret;
233 
234         // Unlikely race / collision. Do the simple thing and just start again.
235         freePages(ret, len);
236         addr = getRandomPageBase();
237         addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & alignBaseMask);
238     }
239     IMMEDIATE_CRASH();
240     return 0;
241 }
242 
freePages(void * addr,size_t len)243 void freePages(void* addr, size_t len)
244 {
245     ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffsetMask));
246     ASSERT(!(len & kPageAllocationGranularityOffsetMask));
247 #if OS(POSIX)
248     int ret = munmap(addr, len);
249     RELEASE_ASSERT(!ret);
250 #else
251     BOOL ret = VirtualFree(addr, 0, MEM_RELEASE);
252     RELEASE_ASSERT(ret);
253 #endif
254 }
255 
setSystemPagesInaccessible(void * addr,size_t len)256 void setSystemPagesInaccessible(void* addr, size_t len)
257 {
258     ASSERT(!(len & kSystemPageOffsetMask));
259 #if OS(POSIX)
260     int ret = mprotect(addr, len, PROT_NONE);
261     RELEASE_ASSERT(!ret);
262 #else
263     BOOL ret = VirtualFree(addr, len, MEM_DECOMMIT);
264     RELEASE_ASSERT(ret);
265 #endif
266 }
267 
decommitSystemPages(void * addr,size_t len)268 void decommitSystemPages(void* addr, size_t len)
269 {
270     ASSERT(!(len & kSystemPageOffsetMask));
271 #if OS(POSIX)
272     int ret = madvise(addr, len, MADV_FREE);
273     RELEASE_ASSERT(!ret);
274 #else
275     void* ret = VirtualAlloc(addr, len, MEM_RESET, PAGE_READWRITE);
276     RELEASE_ASSERT(ret);
277 #endif
278 }
279 
280 } // namespace WTF
281 
282