1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "partition_alloc/partition_address_space.h"
6
7 #include <array>
8 #include <bit>
9 #include <cstddef>
10 #include <cstdint>
11 #include <ostream>
12 #include <string>
13
14 #include "build/build_config.h"
15 #include "partition_alloc/address_pool_manager.h"
16 #include "partition_alloc/compressed_pointer.h"
17 #include "partition_alloc/page_allocator.h"
18 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
19 #include "partition_alloc/partition_alloc_base/debug/alias.h"
20 #include "partition_alloc/partition_alloc_buildflags.h"
21 #include "partition_alloc/partition_alloc_check.h"
22 #include "partition_alloc/partition_alloc_config.h"
23 #include "partition_alloc/partition_alloc_constants.h"
24 #include "partition_alloc/thread_isolation/thread_isolation.h"
25
26 #if BUILDFLAG(IS_IOS)
27 #include <mach-o/dyld.h>
28 #endif
29
30 #if BUILDFLAG(IS_WIN)
31 #include <windows.h>
32 #endif // BUILDFLAG(IS_WIN)
33
34 #if PA_CONFIG(ENABLE_SHADOW_METADATA) || BUILDFLAG(ENABLE_THREAD_ISOLATION)
35 #include <sys/mman.h>
36 #endif
37
38 namespace partition_alloc::internal {
39
40 #if BUILDFLAG(HAS_64_BIT_POINTERS)
41
42 namespace {
43
44 #if BUILDFLAG(IS_WIN)
45
HandlePoolAllocFailureOutOfVASpace()46 PA_NOINLINE void HandlePoolAllocFailureOutOfVASpace() {
47 PA_NO_CODE_FOLDING();
48 PA_CHECK(false);
49 }
50
HandlePoolAllocFailureOutOfCommitCharge()51 PA_NOINLINE void HandlePoolAllocFailureOutOfCommitCharge() {
52 PA_NO_CODE_FOLDING();
53 PA_CHECK(false);
54 }
55 #endif // BUILDFLAG(IS_WIN)
56
HandlePoolAllocFailure()57 PA_NOINLINE void HandlePoolAllocFailure() {
58 PA_NO_CODE_FOLDING();
59 uint32_t alloc_page_error_code = GetAllocPageErrorCode();
60 PA_DEBUG_DATA_ON_STACK("error", static_cast<size_t>(alloc_page_error_code));
61 // It's important to easily differentiate these two failures on Windows, so
62 // crash with different stacks.
63 #if BUILDFLAG(IS_WIN)
64 if (alloc_page_error_code == ERROR_NOT_ENOUGH_MEMORY) {
65 // The error code says NOT_ENOUGH_MEMORY, but since we only do MEM_RESERVE,
66 // it must be VA space exhaustion.
67 HandlePoolAllocFailureOutOfVASpace();
68 } else if (alloc_page_error_code == ERROR_COMMITMENT_LIMIT) {
69 // Should not happen, since as of Windows 8.1+, reserving address space
70 // should not be charged against the commit limit, aside from a very small
71 // amount per 64kiB block. Keep this path anyway, to check in crash reports.
72 HandlePoolAllocFailureOutOfCommitCharge();
73 } else
74 #endif // BUILDFLAG(IS_WIN)
75 {
76 PA_CHECK(false);
77 }
78 }
79
80 } // namespace
81
82 PartitionAddressSpace::PoolSetup PartitionAddressSpace::setup_;
83
84 #if PA_CONFIG(ENABLE_SHADOW_METADATA)
85 std::ptrdiff_t PartitionAddressSpace::regular_pool_shadow_offset_ = 0;
86 std::ptrdiff_t PartitionAddressSpace::brp_pool_shadow_offset_ = 0;
87 #endif
88
89 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
90 #if !BUILDFLAG(IS_IOS)
91 #error Dynamic pool size is only supported on iOS.
92 #endif
93
94 namespace {
IsIOSTestProcess()95 bool IsIOSTestProcess() {
96 // On iOS, only applications with the extended virtual addressing entitlement
97 // can use a large address space. Since Earl Grey test runner apps cannot get
98 // entitlements, they must use a much smaller pool size. Similarly,
99 // integration tests for ChromeWebView end up with two PartitionRoots since
100 // both the integration tests and ChromeWebView have a copy of base/. Even
101 // with the entitlement, there is insufficient address space for two
102 // PartitionRoots, so a smaller pool size is needed.
103
104 // Use a fixed buffer size to avoid allocation inside the allocator.
105 constexpr size_t path_buffer_size = 8192;
106 char executable_path[path_buffer_size];
107
108 uint32_t executable_length = path_buffer_size;
109 int rv = _NSGetExecutablePath(executable_path, &executable_length);
110 PA_CHECK(!rv);
111 size_t executable_path_length =
112 std::char_traits<char>::length(executable_path);
113
114 auto has_suffix = [&](const char* suffix) -> bool {
115 size_t suffix_length = std::char_traits<char>::length(suffix);
116 if (executable_path_length < suffix_length) {
117 return false;
118 }
119 return std::char_traits<char>::compare(
120 executable_path + (executable_path_length - suffix_length),
121 suffix, suffix_length) == 0;
122 };
123
124 return has_suffix("Runner") || has_suffix("ios_web_view_inttests");
125 }
126 } // namespace
127
RegularPoolSize()128 PA_ALWAYS_INLINE size_t PartitionAddressSpace::RegularPoolSize() {
129 return IsIOSTestProcess() ? kRegularPoolSizeForIOSTestProcess
130 : kRegularPoolSize;
131 }
BRPPoolSize()132 PA_ALWAYS_INLINE size_t PartitionAddressSpace::BRPPoolSize() {
133 return IsIOSTestProcess() ? kBRPPoolSizeForIOSTestProcess : kBRPPoolSize;
134 }
135 #endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
136
Init()137 void PartitionAddressSpace::Init() {
138 if (IsInitialized()) {
139 return;
140 }
141
142 size_t regular_pool_size = RegularPoolSize();
143 size_t brp_pool_size = BRPPoolSize();
144
145 #if BUILDFLAG(GLUE_CORE_POOLS)
146 // Gluing core pools (regular & BRP) makes sense only when both pools are of
147 // the same size. This the only way we can check belonging to either of the
148 // two with a single bitmask operation.
149 PA_CHECK(regular_pool_size == brp_pool_size);
150
151 // TODO(crbug.com/1362969): Support PA_ENABLE_SHADOW_METADATA.
152 int pools_fd = -1;
153
154 size_t glued_pool_sizes = regular_pool_size * 2;
155 // Note, BRP pool requires to be preceded by a "forbidden zone", which is
156 // conveniently taken care of by the last guard page of the regular pool.
157 setup_.regular_pool_base_address_ =
158 AllocPages(glued_pool_sizes, glued_pool_sizes,
159 PageAccessibilityConfiguration(
160 PageAccessibilityConfiguration::kInaccessible),
161 PageTag::kPartitionAlloc, pools_fd);
162 if (!setup_.regular_pool_base_address_) {
163 HandlePoolAllocFailure();
164 }
165 setup_.brp_pool_base_address_ =
166 setup_.regular_pool_base_address_ + regular_pool_size;
167 #else // BUILDFLAG(GLUE_CORE_POOLS)
168 #if PA_CONFIG(ENABLE_SHADOW_METADATA)
169 int regular_pool_fd = memfd_create("/regular_pool", MFD_CLOEXEC);
170 #else
171 int regular_pool_fd = -1;
172 #endif
173 setup_.regular_pool_base_address_ =
174 AllocPages(regular_pool_size, regular_pool_size,
175 PageAccessibilityConfiguration(
176 PageAccessibilityConfiguration::kInaccessible),
177 PageTag::kPartitionAlloc, regular_pool_fd);
178 if (!setup_.regular_pool_base_address_) {
179 HandlePoolAllocFailure();
180 }
181
182 #if PA_CONFIG(ENABLE_SHADOW_METADATA)
183 int brp_pool_fd = memfd_create("/brp_pool", MFD_CLOEXEC);
184 #else
185 int brp_pool_fd = -1;
186 #endif
187 // Reserve an extra allocation granularity unit before the BRP pool, but keep
188 // the pool aligned at BRPPoolSize(). A pointer immediately past an allocation
189 // is a valid pointer, and having a "forbidden zone" before the BRP pool
190 // prevents such a pointer from "sneaking into" the pool.
191 const size_t kForbiddenZoneSize = PageAllocationGranularity();
192 uintptr_t base_address = AllocPagesWithAlignOffset(
193 0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
194 brp_pool_size - kForbiddenZoneSize,
195 PageAccessibilityConfiguration(
196 PageAccessibilityConfiguration::kInaccessible),
197 PageTag::kPartitionAlloc, brp_pool_fd);
198 if (!base_address) {
199 HandlePoolAllocFailure();
200 }
201 setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize;
202 #endif // BUILDFLAG(GLUE_CORE_POOLS)
203
204 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
205 setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1);
206 setup_.brp_pool_base_mask_ = ~(brp_pool_size - 1);
207 #if BUILDFLAG(GLUE_CORE_POOLS)
208 // When PA_GLUE_CORE_POOLS is on, the BRP pool is placed at the end of the
209 // regular pool, effectively forming one virtual pool of a twice bigger
210 // size. Adjust the mask appropriately.
211 setup_.core_pools_base_mask_ = setup_.regular_pool_base_mask_ << 1;
212 PA_DCHECK(setup_.core_pools_base_mask_ == (setup_.brp_pool_base_mask_ << 1));
213 #endif
214 #endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
215
216 AddressPoolManager::GetInstance().Add(
217 kRegularPoolHandle, setup_.regular_pool_base_address_, regular_pool_size);
218 AddressPoolManager::GetInstance().Add(
219 kBRPPoolHandle, setup_.brp_pool_base_address_, brp_pool_size);
220
221 // Sanity check pool alignment.
222 PA_DCHECK(!(setup_.regular_pool_base_address_ & (regular_pool_size - 1)));
223 PA_DCHECK(!(setup_.brp_pool_base_address_ & (brp_pool_size - 1)));
224 #if BUILDFLAG(GLUE_CORE_POOLS)
225 PA_DCHECK(!(setup_.regular_pool_base_address_ & (glued_pool_sizes - 1)));
226 #endif
227
228 // Sanity check pool belonging.
229 PA_DCHECK(!IsInRegularPool(setup_.regular_pool_base_address_ - 1));
230 PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_));
231 PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_ +
232 regular_pool_size - 1));
233 PA_DCHECK(
234 !IsInRegularPool(setup_.regular_pool_base_address_ + regular_pool_size));
235 PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ - 1));
236 PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_));
237 PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size - 1));
238 PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size));
239 #if BUILDFLAG(GLUE_CORE_POOLS)
240 PA_DCHECK(!IsInCorePools(setup_.regular_pool_base_address_ - 1));
241 PA_DCHECK(IsInCorePools(setup_.regular_pool_base_address_));
242 PA_DCHECK(
243 IsInCorePools(setup_.regular_pool_base_address_ + regular_pool_size - 1));
244 PA_DCHECK(
245 IsInCorePools(setup_.regular_pool_base_address_ + regular_pool_size));
246 PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_ - 1));
247 PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_));
248 PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_ + brp_pool_size - 1));
249 PA_DCHECK(!IsInCorePools(setup_.brp_pool_base_address_ + brp_pool_size));
250 #endif // BUILDFLAG(GLUE_CORE_POOLS)
251
252 #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
253 // Reserve memory for PCScan quarantine card table.
254 uintptr_t requested_address = setup_.regular_pool_base_address_;
255 uintptr_t actual_address = AddressPoolManager::GetInstance().Reserve(
256 kRegularPoolHandle, requested_address, kSuperPageSize);
257 PA_CHECK(requested_address == actual_address)
258 << "QuarantineCardTable is required to be allocated at the beginning of "
259 "the regular pool";
260 #endif // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
261
262 #if PA_CONFIG(ENABLE_SHADOW_METADATA)
263 // Reserve memory for the shadow pools.
264 uintptr_t regular_pool_shadow_address =
265 AllocPages(regular_pool_size, regular_pool_size,
266 PageAccessibilityConfiguration(
267 PageAccessibilityConfiguration::kInaccessible),
268 PageTag::kPartitionAlloc, regular_pool_fd);
269 regular_pool_shadow_offset_ =
270 regular_pool_shadow_address - setup_.regular_pool_base_address_;
271
272 uintptr_t brp_pool_shadow_address = AllocPagesWithAlignOffset(
273 0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
274 brp_pool_size - kForbiddenZoneSize,
275 PageAccessibilityConfiguration(
276 PageAccessibilityConfiguration::kInaccessible),
277 PageTag::kPartitionAlloc, brp_pool_fd);
278 brp_pool_shadow_offset_ =
279 brp_pool_shadow_address - setup_.brp_pool_base_address_;
280 #endif
281
282 #if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
283 CompressedPointerBaseGlobal::SetBase(setup_.regular_pool_base_address_);
284 #endif // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
285 }
286
InitConfigurablePool(uintptr_t pool_base,size_t size)287 void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base,
288 size_t size) {
289 // The ConfigurablePool must only be initialized once.
290 PA_CHECK(!IsConfigurablePoolInitialized());
291
292 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
293 // It's possible that the thread isolated pool has been initialized first, in
294 // which case the setup_ memory has been made read-only. Remove the protection
295 // temporarily.
296 if (IsThreadIsolatedPoolInitialized()) {
297 UnprotectThreadIsolatedGlobals();
298 }
299 #endif
300
301 PA_CHECK(pool_base);
302 PA_CHECK(size <= kConfigurablePoolMaxSize);
303 PA_CHECK(size >= kConfigurablePoolMinSize);
304 PA_CHECK(std::has_single_bit(size));
305 PA_CHECK(pool_base % size == 0);
306
307 setup_.configurable_pool_base_address_ = pool_base;
308 setup_.configurable_pool_base_mask_ = ~(size - 1);
309
310 AddressPoolManager::GetInstance().Add(
311 kConfigurablePoolHandle, setup_.configurable_pool_base_address_, size);
312
313 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
314 // Put the metadata protection back in place.
315 if (IsThreadIsolatedPoolInitialized()) {
316 WriteProtectThreadIsolatedGlobals(setup_.thread_isolation_);
317 }
318 #endif
319 }
320
321 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
InitThreadIsolatedPool(ThreadIsolationOption thread_isolation)322 void PartitionAddressSpace::InitThreadIsolatedPool(
323 ThreadIsolationOption thread_isolation) {
324 // The ThreadIsolated pool can't be initialized with conflicting settings.
325 if (IsThreadIsolatedPoolInitialized()) {
326 PA_CHECK(setup_.thread_isolation_ == thread_isolation);
327 return;
328 }
329
330 size_t pool_size = ThreadIsolatedPoolSize();
331 setup_.thread_isolated_pool_base_address_ =
332 AllocPages(pool_size, pool_size,
333 PageAccessibilityConfiguration(
334 PageAccessibilityConfiguration::kInaccessible),
335 PageTag::kPartitionAlloc);
336 if (!setup_.thread_isolated_pool_base_address_) {
337 HandlePoolAllocFailure();
338 }
339
340 PA_DCHECK(!(setup_.thread_isolated_pool_base_address_ & (pool_size - 1)));
341 setup_.thread_isolation_ = thread_isolation;
342 AddressPoolManager::GetInstance().Add(
343 kThreadIsolatedPoolHandle, setup_.thread_isolated_pool_base_address_,
344 pool_size);
345
346 PA_DCHECK(
347 !IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_ - 1));
348 PA_DCHECK(IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_));
349 PA_DCHECK(IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_ +
350 pool_size - 1));
351 PA_DCHECK(!IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_ +
352 pool_size));
353
354 // TODO(1362969): support PA_ENABLE_SHADOW_METADATA
355 }
356 #endif // BUILDFLAG(ENABLE_THREAD_ISOLATION)
357
UninitForTesting()358 void PartitionAddressSpace::UninitForTesting() {
359 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
360 UninitThreadIsolatedPoolForTesting(); // IN-TEST
361 #endif
362 #if BUILDFLAG(GLUE_CORE_POOLS)
363 // The core pools (regular & BRP) were allocated using a single allocation of
364 // double size.
365 FreePages(setup_.regular_pool_base_address_, 2 * RegularPoolSize());
366 #else // BUILDFLAG(GLUE_CORE_POOLS)
367 FreePages(setup_.regular_pool_base_address_, RegularPoolSize());
368 // For BRP pool, the allocation region includes a "forbidden zone" before the
369 // pool.
370 const size_t kForbiddenZoneSize = PageAllocationGranularity();
371 FreePages(setup_.brp_pool_base_address_ - kForbiddenZoneSize,
372 BRPPoolSize() + kForbiddenZoneSize);
373 #endif // BUILDFLAG(GLUE_CORE_POOLS)
374 // Do not free pages for the configurable pool, because its memory is owned
375 // by someone else, but deinitialize it nonetheless.
376 setup_.regular_pool_base_address_ = kUninitializedPoolBaseAddress;
377 setup_.brp_pool_base_address_ = kUninitializedPoolBaseAddress;
378 setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
379 setup_.configurable_pool_base_mask_ = 0;
380 AddressPoolManager::GetInstance().ResetForTesting();
381 #if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
382 CompressedPointerBaseGlobal::ResetBaseForTesting();
383 #endif // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
384 }
385
UninitConfigurablePoolForTesting()386 void PartitionAddressSpace::UninitConfigurablePoolForTesting() {
387 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
388 // It's possible that the thread isolated pool has been initialized first, in
389 // which case the setup_ memory has been made read-only. Remove the protection
390 // temporarily.
391 if (IsThreadIsolatedPoolInitialized()) {
392 UnprotectThreadIsolatedGlobals();
393 }
394 #endif
395 AddressPoolManager::GetInstance().Remove(kConfigurablePoolHandle);
396 setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
397 setup_.configurable_pool_base_mask_ = 0;
398 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
399 // Put the metadata protection back in place.
400 if (IsThreadIsolatedPoolInitialized()) {
401 WriteProtectThreadIsolatedGlobals(setup_.thread_isolation_);
402 }
403 #endif
404 }
405
406 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
UninitThreadIsolatedPoolForTesting()407 void PartitionAddressSpace::UninitThreadIsolatedPoolForTesting() {
408 if (IsThreadIsolatedPoolInitialized()) {
409 UnprotectThreadIsolatedGlobals();
410 #if BUILDFLAG(PA_DCHECK_IS_ON)
411 ThreadIsolationSettings::settings.enabled = false;
412 #endif
413
414 FreePages(setup_.thread_isolated_pool_base_address_,
415 ThreadIsolatedPoolSize());
416 AddressPoolManager::GetInstance().Remove(kThreadIsolatedPoolHandle);
417 setup_.thread_isolated_pool_base_address_ = kUninitializedPoolBaseAddress;
418 setup_.thread_isolation_.enabled = false;
419 }
420 }
421 #endif
422
423 #if BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
424
425 PageCharacteristics page_characteristics;
426
427 #endif // BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
428
429 #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
430
431 } // namespace partition_alloc::internal
432