1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "third_party/base/allocator/partition_allocator/page_allocator.h"
6
7 #include <limits.h>
8
9 #include <atomic>
10
11 #include "third_party/base/allocator/partition_allocator/address_space_randomization.h"
12 #include "third_party/base/base_export.h"
13 #include "third_party/base/logging.h"
14 #include "third_party/build/build_config.h"
15
16 #if defined(OS_POSIX)
17
18 #include <errno.h>
19 #include <sys/mman.h>
20
21 #ifndef MADV_FREE
22 #define MADV_FREE MADV_DONTNEED
23 #endif
24
25 #ifndef MAP_ANONYMOUS
26 #define MAP_ANONYMOUS MAP_ANON
27 #endif
28
29 // On POSIX |mmap| uses a nearby address if the hint address is blocked.
30 static const bool kHintIsAdvisory = true;
31 static std::atomic<int32_t> s_allocPageErrorCode{0};
32
33 #elif defined(OS_WIN)
34
35 #include <windows.h>
36
37 // |VirtualAlloc| will fail if allocation at the hint address is blocked.
38 static const bool kHintIsAdvisory = false;
39 static std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
40
41 #else
42 #error Unknown OS
43 #endif // defined(OS_POSIX)
44
45 namespace pdfium {
46 namespace base {
47
48 // This internal function wraps the OS-specific page allocation call:
49 // |VirtualAlloc| on Windows, and |mmap| on POSIX.
SystemAllocPages(void * hint,size_t length,PageAccessibilityConfiguration page_accessibility)50 static void* SystemAllocPages(
51 void* hint,
52 size_t length,
53 PageAccessibilityConfiguration page_accessibility) {
54 DCHECK(!(length & kPageAllocationGranularityOffsetMask));
55 DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
56 kPageAllocationGranularityOffsetMask));
57 void* ret;
58 #if defined(OS_WIN)
59 DWORD access_flag =
60 page_accessibility == PageAccessible ? PAGE_READWRITE : PAGE_NOACCESS;
61 ret = VirtualAlloc(hint, length, MEM_RESERVE | MEM_COMMIT, access_flag);
62 if (!ret)
63 s_allocPageErrorCode = GetLastError();
64 #else
65 int access_flag = page_accessibility == PageAccessible
66 ? (PROT_READ | PROT_WRITE)
67 : PROT_NONE;
68 ret = mmap(hint, length, access_flag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
69 if (ret == MAP_FAILED) {
70 s_allocPageErrorCode = errno;
71 ret = 0;
72 }
73 #endif
74 return ret;
75 }
76
77 // Trims base to given length and alignment. Windows returns null on failure and
78 // frees base.
TrimMapping(void * base,size_t base_length,size_t trim_length,uintptr_t align,PageAccessibilityConfiguration page_accessibility)79 static void* TrimMapping(void* base,
80 size_t base_length,
81 size_t trim_length,
82 uintptr_t align,
83 PageAccessibilityConfiguration page_accessibility) {
84 size_t pre_slack = reinterpret_cast<uintptr_t>(base) & (align - 1);
85 if (pre_slack)
86 pre_slack = align - pre_slack;
87 size_t post_slack = base_length - pre_slack - trim_length;
88 DCHECK(base_length >= trim_length || pre_slack || post_slack);
89 DCHECK(pre_slack < base_length);
90 DCHECK(post_slack < base_length);
91 void* ret = base;
92
93 #if defined(OS_POSIX) // On POSIX we can resize the allocation run.
94 (void)page_accessibility;
95 if (pre_slack) {
96 int res = munmap(base, pre_slack);
97 CHECK(!res);
98 ret = reinterpret_cast<char*>(base) + pre_slack;
99 }
100 if (post_slack) {
101 int res = munmap(reinterpret_cast<char*>(ret) + trim_length, post_slack);
102 CHECK(!res);
103 }
104 #else // On Windows we can't resize the allocation run.
105 if (pre_slack || post_slack) {
106 ret = reinterpret_cast<char*>(base) + pre_slack;
107 FreePages(base, base_length);
108 ret = SystemAllocPages(ret, trim_length, page_accessibility);
109 }
110 #endif
111
112 return ret;
113 }
114
AllocPages(void * address,size_t length,size_t align,PageAccessibilityConfiguration page_accessibility)115 void* AllocPages(void* address,
116 size_t length,
117 size_t align,
118 PageAccessibilityConfiguration page_accessibility) {
119 DCHECK(length >= kPageAllocationGranularity);
120 DCHECK(!(length & kPageAllocationGranularityOffsetMask));
121 DCHECK(align >= kPageAllocationGranularity);
122 DCHECK(!(align & kPageAllocationGranularityOffsetMask));
123 DCHECK(!(reinterpret_cast<uintptr_t>(address) &
124 kPageAllocationGranularityOffsetMask));
125 uintptr_t align_offset_mask = align - 1;
126 uintptr_t align_base_mask = ~align_offset_mask;
127 DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask));
128
129 // If the client passed null as the address, choose a good one.
130 if (!address) {
131 address = GetRandomPageBase();
132 address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
133 align_base_mask);
134 }
135
136 // First try to force an exact-size, aligned allocation from our random base.
137 for (int count = 0; count < 3; ++count) {
138 void* ret = SystemAllocPages(address, length, page_accessibility);
139 if (kHintIsAdvisory || ret) {
140 // If the alignment is to our liking, we're done.
141 if (!(reinterpret_cast<uintptr_t>(ret) & align_offset_mask))
142 return ret;
143 FreePages(ret, length);
144 #if defined(ARCH_CPU_32_BITS)
145 address = reinterpret_cast<void*>(
146 (reinterpret_cast<uintptr_t>(ret) + align) & align_base_mask);
147 #endif
148 } else if (!address) { // We know we're OOM when an unhinted allocation
149 // fails.
150 return nullptr;
151 } else {
152 #if defined(ARCH_CPU_32_BITS)
153 address = reinterpret_cast<char*>(address) + align;
154 #endif
155 }
156
157 #if !defined(ARCH_CPU_32_BITS)
158 // Keep trying random addresses on systems that have a large address space.
159 address = GetRandomPageBase();
160 address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
161 align_base_mask);
162 #endif
163 }
164
165 // Map a larger allocation so we can force alignment, but continue randomizing
166 // only on 64-bit POSIX.
167 size_t try_length = length + (align - kPageAllocationGranularity);
168 CHECK(try_length >= length);
169 void* ret;
170
171 do {
172 // Don't continue to burn cycles on mandatory hints (Windows).
173 address = kHintIsAdvisory ? GetRandomPageBase() : nullptr;
174 ret = SystemAllocPages(address, try_length, page_accessibility);
175 // The retries are for Windows, where a race can steal our mapping on
176 // resize.
177 } while (ret &&
178 (ret = TrimMapping(ret, try_length, length, align,
179 page_accessibility)) == nullptr);
180
181 return ret;
182 }
183
FreePages(void * address,size_t length)184 void FreePages(void* address, size_t length) {
185 DCHECK(!(reinterpret_cast<uintptr_t>(address) &
186 kPageAllocationGranularityOffsetMask));
187 DCHECK(!(length & kPageAllocationGranularityOffsetMask));
188 #if defined(OS_POSIX)
189 int ret = munmap(address, length);
190 CHECK(!ret);
191 #else
192 BOOL ret = VirtualFree(address, 0, MEM_RELEASE);
193 CHECK(ret);
194 #endif
195 }
196
SetSystemPagesInaccessible(void * address,size_t length)197 void SetSystemPagesInaccessible(void* address, size_t length) {
198 DCHECK(!(length & kSystemPageOffsetMask));
199 #if defined(OS_POSIX)
200 int ret = mprotect(address, length, PROT_NONE);
201 CHECK(!ret);
202 #else
203 BOOL ret = VirtualFree(address, length, MEM_DECOMMIT);
204 CHECK(ret);
205 #endif
206 }
207
SetSystemPagesAccessible(void * address,size_t length)208 bool SetSystemPagesAccessible(void* address, size_t length) {
209 DCHECK(!(length & kSystemPageOffsetMask));
210 #if defined(OS_POSIX)
211 return !mprotect(address, length, PROT_READ | PROT_WRITE);
212 #else
213 return !!VirtualAlloc(address, length, MEM_COMMIT, PAGE_READWRITE);
214 #endif
215 }
216
DecommitSystemPages(void * address,size_t length)217 void DecommitSystemPages(void* address, size_t length) {
218 DCHECK(!(length & kSystemPageOffsetMask));
219 #if defined(OS_POSIX)
220 int ret = madvise(address, length, MADV_FREE);
221 if (ret != 0 && errno == EINVAL) {
222 // MADV_FREE only works on Linux 4.5+ . If request failed,
223 // retry with older MADV_DONTNEED . Note that MADV_FREE
224 // being defined at compile time doesn't imply runtime support.
225 ret = madvise(address, length, MADV_DONTNEED);
226 }
227 CHECK(!ret);
228 #else
229 SetSystemPagesInaccessible(address, length);
230 #endif
231 }
232
RecommitSystemPages(void * address,size_t length)233 void RecommitSystemPages(void* address, size_t length) {
234 DCHECK(!(length & kSystemPageOffsetMask));
235 #if defined(OS_POSIX)
236 (void)address;
237 #else
238 CHECK(SetSystemPagesAccessible(address, length));
239 #endif
240 }
241
DiscardSystemPages(void * address,size_t length)242 void DiscardSystemPages(void* address, size_t length) {
243 DCHECK(!(length & kSystemPageOffsetMask));
244 #if defined(OS_POSIX)
245 // On POSIX, the implementation detail is that discard and decommit are the
246 // same, and lead to pages that are returned to the system immediately and
247 // get replaced with zeroed pages when touched. So we just call
248 // DecommitSystemPages() here to avoid code duplication.
249 DecommitSystemPages(address, length);
250 #else
251 // On Windows discarded pages are not returned to the system immediately and
252 // not guaranteed to be zeroed when returned to the application.
253 using DiscardVirtualMemoryFunction =
254 DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size);
255 static DiscardVirtualMemoryFunction discard_virtual_memory =
256 reinterpret_cast<DiscardVirtualMemoryFunction>(-1);
257 if (discard_virtual_memory ==
258 reinterpret_cast<DiscardVirtualMemoryFunction>(-1))
259 discard_virtual_memory =
260 reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
261 GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
262 // Use DiscardVirtualMemory when available because it releases faster than
263 // MEM_RESET.
264 DWORD ret = 1;
265 if (discard_virtual_memory)
266 ret = discard_virtual_memory(address, length);
267 // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
268 // failure.
269 if (ret) {
270 void* ret = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
271 CHECK(ret);
272 }
273 #endif
274 }
275
GetAllocPageErrorCode()276 uint32_t GetAllocPageErrorCode() {
277 return s_allocPageErrorCode;
278 }
279
280 } // namespace base
281 } // namespace pdfium
282