1 // Copyright 2013 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/allocator/partition_allocator/page_allocator.h"
6
7 #include <atomic>
8 #include <cstdint>
9
10 #include "base/allocator/partition_allocator/address_space_randomization.h"
11 #include "base/allocator/partition_allocator/page_allocator_internal.h"
12 #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
13 #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
14 #include "base/allocator/partition_allocator/partition_alloc_check.h"
15 #include "base/allocator/partition_allocator/partition_lock.h"
16 #include "build/build_config.h"
17
18 #if BUILDFLAG(IS_WIN)
19 #include <windows.h>
20 #endif
21
22 #if BUILDFLAG(IS_WIN)
23 #include "base/allocator/partition_allocator/page_allocator_internals_win.h"
24 #elif BUILDFLAG(IS_POSIX)
25 #include "base/allocator/partition_allocator/page_allocator_internals_posix.h"
26 #elif BUILDFLAG(IS_FUCHSIA)
27 #include "base/allocator/partition_allocator/page_allocator_internals_fuchsia.h"
28 #else
29 #error Platform not supported.
30 #endif
31
32 namespace partition_alloc {
33
34 namespace {
35
36 internal::Lock g_reserve_lock;
37
38 // We may reserve/release address space on different threads.
GetReserveLock()39 internal::Lock& GetReserveLock() {
40 return g_reserve_lock;
41 }
42
43 std::atomic<size_t> g_total_mapped_address_space;
44
45 // We only support a single block of reserved address space.
46 uintptr_t s_reservation_address PA_GUARDED_BY(GetReserveLock()) = 0;
47 size_t s_reservation_size PA_GUARDED_BY(GetReserveLock()) = 0;
48
AllocPagesIncludingReserved(uintptr_t address,size_t length,PageAccessibilityConfiguration accessibility,PageTag page_tag,int file_descriptor_for_shared_alloc=-1)49 uintptr_t AllocPagesIncludingReserved(
50 uintptr_t address,
51 size_t length,
52 PageAccessibilityConfiguration accessibility,
53 PageTag page_tag,
54 int file_descriptor_for_shared_alloc = -1) {
55 uintptr_t ret =
56 internal::SystemAllocPages(address, length, accessibility, page_tag,
57 file_descriptor_for_shared_alloc);
58 if (!ret) {
59 const bool cant_alloc_length = internal::kHintIsAdvisory || !address;
60 if (cant_alloc_length) {
61 // The system cannot allocate |length| bytes. Release any reserved address
62 // space and try once more.
63 ReleaseReservation();
64 ret = internal::SystemAllocPages(address, length, accessibility, page_tag,
65 file_descriptor_for_shared_alloc);
66 }
67 }
68 return ret;
69 }
70
71 // Trims memory at |base_address| to given |trim_length| and |alignment|.
72 //
73 // On failure, on Windows, this function returns 0 and frees memory at
74 // |base_address|.
TrimMapping(uintptr_t base_address,size_t base_length,size_t trim_length,uintptr_t alignment,uintptr_t alignment_offset,PageAccessibilityConfiguration accessibility)75 uintptr_t TrimMapping(uintptr_t base_address,
76 size_t base_length,
77 size_t trim_length,
78 uintptr_t alignment,
79 uintptr_t alignment_offset,
80 PageAccessibilityConfiguration accessibility) {
81 PA_DCHECK(base_length >= trim_length);
82 PA_DCHECK(internal::base::bits::IsPowerOfTwo(alignment));
83 PA_DCHECK(alignment_offset < alignment);
84 uintptr_t new_base =
85 NextAlignedWithOffset(base_address, alignment, alignment_offset);
86 PA_DCHECK(new_base >= base_address);
87 size_t pre_slack = new_base - base_address;
88 size_t post_slack = base_length - pre_slack - trim_length;
89 PA_DCHECK(base_length == trim_length || pre_slack || post_slack);
90 PA_DCHECK(pre_slack < base_length);
91 PA_DCHECK(post_slack < base_length);
92 return internal::TrimMappingInternal(base_address, base_length, trim_length,
93 accessibility, pre_slack, post_slack);
94 }
95
96 } // namespace
97
98 // Align |address| up to the closest, non-smaller address, that gives
99 // |requested_offset| remainder modulo |alignment|.
100 //
101 // Examples for alignment=1024 and requested_offset=64:
102 // 64 -> 64
103 // 65 -> 1088
104 // 1024 -> 1088
105 // 1088 -> 1088
106 // 1089 -> 2112
107 // 2048 -> 2112
NextAlignedWithOffset(uintptr_t address,uintptr_t alignment,uintptr_t requested_offset)108 uintptr_t NextAlignedWithOffset(uintptr_t address,
109 uintptr_t alignment,
110 uintptr_t requested_offset) {
111 PA_DCHECK(internal::base::bits::IsPowerOfTwo(alignment));
112 PA_DCHECK(requested_offset < alignment);
113
114 uintptr_t actual_offset = address & (alignment - 1);
115 uintptr_t new_address;
116 if (actual_offset <= requested_offset) {
117 new_address = address + requested_offset - actual_offset;
118 } else {
119 new_address = address + alignment + requested_offset - actual_offset;
120 }
121 PA_DCHECK(new_address >= address);
122 PA_DCHECK(new_address - address < alignment);
123 PA_DCHECK(new_address % alignment == requested_offset);
124
125 return new_address;
126 }
127
128 namespace internal {
129
SystemAllocPages(uintptr_t hint,size_t length,PageAccessibilityConfiguration accessibility,PageTag page_tag,int file_descriptor_for_shared_alloc)130 uintptr_t SystemAllocPages(uintptr_t hint,
131 size_t length,
132 PageAccessibilityConfiguration accessibility,
133 PageTag page_tag,
134 int file_descriptor_for_shared_alloc) {
135 PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
136 PA_DCHECK(!(hint & internal::PageAllocationGranularityOffsetMask()));
137 uintptr_t ret = internal::SystemAllocPagesInternal(
138 hint, length, accessibility, page_tag, file_descriptor_for_shared_alloc);
139 if (ret) {
140 g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed);
141 }
142
143 return ret;
144 }
145
146 } // namespace internal
147
AllocPages(size_t length,size_t align,PageAccessibilityConfiguration accessibility,PageTag page_tag,int file_descriptor_for_shared_alloc)148 uintptr_t AllocPages(size_t length,
149 size_t align,
150 PageAccessibilityConfiguration accessibility,
151 PageTag page_tag,
152 int file_descriptor_for_shared_alloc) {
153 return AllocPagesWithAlignOffset(0, length, align, 0, accessibility, page_tag,
154 file_descriptor_for_shared_alloc);
155 }
AllocPages(uintptr_t address,size_t length,size_t align,PageAccessibilityConfiguration accessibility,PageTag page_tag)156 uintptr_t AllocPages(uintptr_t address,
157 size_t length,
158 size_t align,
159 PageAccessibilityConfiguration accessibility,
160 PageTag page_tag) {
161 return AllocPagesWithAlignOffset(address, length, align, 0, accessibility,
162 page_tag);
163 }
AllocPages(void * address,size_t length,size_t align,PageAccessibilityConfiguration accessibility,PageTag page_tag)164 void* AllocPages(void* address,
165 size_t length,
166 size_t align,
167 PageAccessibilityConfiguration accessibility,
168 PageTag page_tag) {
169 return reinterpret_cast<void*>(
170 AllocPages(reinterpret_cast<uintptr_t>(address), length, align,
171 accessibility, page_tag));
172 }
173
AllocPagesWithAlignOffset(uintptr_t address,size_t length,size_t align,size_t align_offset,PageAccessibilityConfiguration accessibility,PageTag page_tag,int file_descriptor_for_shared_alloc)174 uintptr_t AllocPagesWithAlignOffset(
175 uintptr_t address,
176 size_t length,
177 size_t align,
178 size_t align_offset,
179 PageAccessibilityConfiguration accessibility,
180 PageTag page_tag,
181 int file_descriptor_for_shared_alloc) {
182 PA_DCHECK(length >= internal::PageAllocationGranularity());
183 PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
184 PA_DCHECK(align >= internal::PageAllocationGranularity());
185 // Alignment must be power of 2 for masking math to work.
186 PA_DCHECK(internal::base::bits::IsPowerOfTwo(align));
187 PA_DCHECK(align_offset < align);
188 PA_DCHECK(!(align_offset & internal::PageAllocationGranularityOffsetMask()));
189 PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
190 uintptr_t align_offset_mask = align - 1;
191 uintptr_t align_base_mask = ~align_offset_mask;
192 PA_DCHECK(!address || (address & align_offset_mask) == align_offset);
193
194 // If the client passed null as the address, choose a good one.
195 if (!address) {
196 address = (GetRandomPageBase() & align_base_mask) + align_offset;
197 }
198
199 // First try to force an exact-size, aligned allocation from our random base.
200 #if defined(ARCH_CPU_32_BITS)
201 // On 32 bit systems, first try one random aligned address, and then try an
202 // aligned address derived from the value of |ret|.
203 constexpr int kExactSizeTries = 2;
204 #else
205 // On 64 bit systems, try 3 random aligned addresses.
206 constexpr int kExactSizeTries = 3;
207 #endif
208
209 for (int i = 0; i < kExactSizeTries; ++i) {
210 uintptr_t ret =
211 AllocPagesIncludingReserved(address, length, accessibility, page_tag,
212 file_descriptor_for_shared_alloc);
213 if (ret) {
214 // If the alignment is to our liking, we're done.
215 if ((ret & align_offset_mask) == align_offset) {
216 return ret;
217 }
218 // Free the memory and try again.
219 FreePages(ret, length);
220 } else {
221 // |ret| is null; if this try was unhinted, we're OOM.
222 if (internal::kHintIsAdvisory || !address) {
223 return 0;
224 }
225 }
226
227 #if defined(ARCH_CPU_32_BITS)
228 // For small address spaces, try the first aligned address >= |ret|. Note
229 // |ret| may be null, in which case |address| becomes null. If
230 // |align_offset| is non-zero, this calculation may get us not the first,
231 // but the next matching address.
232 address = ((ret + align_offset_mask) & align_base_mask) + align_offset;
233 #else // defined(ARCH_CPU_64_BITS)
234 // Keep trying random addresses on systems that have a large address space.
235 address = NextAlignedWithOffset(GetRandomPageBase(), align, align_offset);
236 #endif
237 }
238
239 // Make a larger allocation so we can force alignment.
240 size_t try_length = length + (align - internal::PageAllocationGranularity());
241 PA_CHECK(try_length >= length);
242 uintptr_t ret;
243
244 do {
245 // Continue randomizing only on POSIX.
246 address = internal::kHintIsAdvisory ? GetRandomPageBase() : 0;
247 ret =
248 AllocPagesIncludingReserved(address, try_length, accessibility,
249 page_tag, file_descriptor_for_shared_alloc);
250 // The retries are for Windows, where a race can steal our mapping on
251 // resize.
252 } while (ret && (ret = TrimMapping(ret, try_length, length, align,
253 align_offset, accessibility)) == 0);
254
255 return ret;
256 }
257
FreePages(uintptr_t address,size_t length)258 void FreePages(uintptr_t address, size_t length) {
259 PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
260 PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
261 internal::FreePagesInternal(address, length);
262 PA_DCHECK(g_total_mapped_address_space.load(std::memory_order_relaxed) > 0);
263 g_total_mapped_address_space.fetch_sub(length, std::memory_order_relaxed);
264 }
FreePages(void * address,size_t length)265 void FreePages(void* address, size_t length) {
266 FreePages(reinterpret_cast<uintptr_t>(address), length);
267 }
268
TrySetSystemPagesAccess(uintptr_t address,size_t length,PageAccessibilityConfiguration accessibility)269 bool TrySetSystemPagesAccess(uintptr_t address,
270 size_t length,
271 PageAccessibilityConfiguration accessibility) {
272 PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
273 return internal::TrySetSystemPagesAccessInternal(address, length,
274 accessibility);
275 }
TrySetSystemPagesAccess(void * address,size_t length,PageAccessibilityConfiguration accessibility)276 bool TrySetSystemPagesAccess(void* address,
277 size_t length,
278 PageAccessibilityConfiguration accessibility) {
279 return TrySetSystemPagesAccess(reinterpret_cast<uintptr_t>(address), length,
280 accessibility);
281 }
282
SetSystemPagesAccess(uintptr_t address,size_t length,PageAccessibilityConfiguration accessibility)283 void SetSystemPagesAccess(uintptr_t address,
284 size_t length,
285 PageAccessibilityConfiguration accessibility) {
286 PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
287 internal::SetSystemPagesAccessInternal(address, length, accessibility);
288 }
289
DecommitSystemPages(uintptr_t address,size_t length,PageAccessibilityDisposition accessibility_disposition)290 void DecommitSystemPages(
291 uintptr_t address,
292 size_t length,
293 PageAccessibilityDisposition accessibility_disposition) {
294 PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
295 PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
296 internal::DecommitSystemPagesInternal(address, length,
297 accessibility_disposition);
298 }
DecommitSystemPages(void * address,size_t length,PageAccessibilityDisposition accessibility_disposition)299 void DecommitSystemPages(
300 void* address,
301 size_t length,
302 PageAccessibilityDisposition accessibility_disposition) {
303 DecommitSystemPages(reinterpret_cast<uintptr_t>(address), length,
304 accessibility_disposition);
305 }
306
DecommitAndZeroSystemPages(uintptr_t address,size_t length)307 void DecommitAndZeroSystemPages(uintptr_t address, size_t length) {
308 PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
309 PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
310 internal::DecommitAndZeroSystemPagesInternal(address, length);
311 }
DecommitAndZeroSystemPages(void * address,size_t length)312 void DecommitAndZeroSystemPages(void* address, size_t length) {
313 DecommitAndZeroSystemPages(reinterpret_cast<uintptr_t>(address), length);
314 }
315
RecommitSystemPages(uintptr_t address,size_t length,PageAccessibilityConfiguration accessibility,PageAccessibilityDisposition accessibility_disposition)316 void RecommitSystemPages(
317 uintptr_t address,
318 size_t length,
319 PageAccessibilityConfiguration accessibility,
320 PageAccessibilityDisposition accessibility_disposition) {
321 PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
322 PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
323 PA_DCHECK(accessibility.permissions !=
324 PageAccessibilityConfiguration::kInaccessible);
325 internal::RecommitSystemPagesInternal(address, length, accessibility,
326 accessibility_disposition);
327 }
328
TryRecommitSystemPages(uintptr_t address,size_t length,PageAccessibilityConfiguration accessibility,PageAccessibilityDisposition accessibility_disposition)329 bool TryRecommitSystemPages(
330 uintptr_t address,
331 size_t length,
332 PageAccessibilityConfiguration accessibility,
333 PageAccessibilityDisposition accessibility_disposition) {
334 // Duplicated because we want errors to be reported at a lower level in the
335 // crashing case.
336 PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
337 PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
338 PA_DCHECK(accessibility.permissions !=
339 PageAccessibilityConfiguration::kInaccessible);
340 return internal::TryRecommitSystemPagesInternal(
341 address, length, accessibility, accessibility_disposition);
342 }
343
DiscardSystemPages(uintptr_t address,size_t length)344 void DiscardSystemPages(uintptr_t address, size_t length) {
345 PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
346 internal::DiscardSystemPagesInternal(address, length);
347 }
DiscardSystemPages(void * address,size_t length)348 void DiscardSystemPages(void* address, size_t length) {
349 DiscardSystemPages(reinterpret_cast<uintptr_t>(address), length);
350 }
351
ReserveAddressSpace(size_t size)352 bool ReserveAddressSpace(size_t size) {
353 // To avoid deadlock, call only SystemAllocPages.
354 internal::ScopedGuard guard(GetReserveLock());
355 if (!s_reservation_address) {
356 uintptr_t mem = internal::SystemAllocPages(
357 0, size,
358 PageAccessibilityConfiguration(
359 PageAccessibilityConfiguration::kInaccessible),
360 PageTag::kChromium);
361 if (mem) {
362 // We guarantee this alignment when reserving address space.
363 PA_DCHECK(!(mem & internal::PageAllocationGranularityOffsetMask()));
364 s_reservation_address = mem;
365 s_reservation_size = size;
366 return true;
367 }
368 }
369 return false;
370 }
371
ReleaseReservation()372 bool ReleaseReservation() {
373 // To avoid deadlock, call only FreePages.
374 internal::ScopedGuard guard(GetReserveLock());
375 if (!s_reservation_address) {
376 return false;
377 }
378
379 FreePages(s_reservation_address, s_reservation_size);
380 s_reservation_address = 0;
381 s_reservation_size = 0;
382 return true;
383 }
384
HasReservationForTesting()385 bool HasReservationForTesting() {
386 internal::ScopedGuard guard(GetReserveLock());
387 return s_reservation_address;
388 }
389
GetAllocPageErrorCode()390 uint32_t GetAllocPageErrorCode() {
391 return internal::s_allocPageErrorCode;
392 }
393
GetTotalMappedSize()394 size_t GetTotalMappedSize() {
395 return g_total_mapped_address_space;
396 }
397
398 #if BUILDFLAG(IS_WIN)
399 namespace {
400 bool g_retry_on_commit_failure = false;
401 }
402
SetRetryOnCommitFailure(bool retry_on_commit_failure)403 void SetRetryOnCommitFailure(bool retry_on_commit_failure) {
404 g_retry_on_commit_failure = retry_on_commit_failure;
405 }
406
GetRetryOnCommitFailure()407 bool GetRetryOnCommitFailure() {
408 return g_retry_on_commit_failure;
409 }
410 #endif
411
412 } // namespace partition_alloc
413