• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
7 
8 #include <algorithm>
9 #include <array>
10 #include <cstddef>
11 #include <limits>
12 
13 #include "base/allocator/partition_allocator/address_pool_manager_types.h"
14 #include "base/allocator/partition_allocator/page_allocator_constants.h"
15 #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
16 #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
17 #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
18 #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
19 #include "base/allocator/partition_allocator/partition_alloc_check.h"
20 #include "base/allocator/partition_allocator/partition_alloc_config.h"
21 #include "base/allocator/partition_allocator/partition_alloc_constants.h"
22 #include "base/allocator/partition_allocator/partition_alloc_forward.h"
23 #include "base/allocator/partition_allocator/partition_alloc_notreached.h"
24 #include "base/allocator/partition_allocator/pkey.h"
25 #include "base/allocator/partition_allocator/tagging.h"
26 #include "build/build_config.h"
27 
28 // The feature is not applicable to 32-bit address space.
29 #if BUILDFLAG(HAS_64_BIT_POINTERS)
30 
31 namespace partition_alloc {
32 
33 namespace internal {
34 
35 // Manages PartitionAlloc address space, which is split into pools.
36 // See `glossary.md`.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)37 class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
38  public:
39 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
40   PA_ALWAYS_INLINE static uintptr_t RegularPoolBaseMask() {
41     return setup_.regular_pool_base_mask_;
42   }
43 #else
44   PA_ALWAYS_INLINE static constexpr uintptr_t RegularPoolBaseMask() {
45     return kRegularPoolBaseMask;
46   }
47 #endif
48 
49   PA_ALWAYS_INLINE static std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
50       uintptr_t address) {
51     // When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
52 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
53     PA_DCHECK(!IsInBRPPool(address));
54 #endif
55     pool_handle pool = kNullPoolHandle;
56     uintptr_t base = 0;
57     if (IsInRegularPool(address)) {
58       pool = kRegularPoolHandle;
59       base = setup_.regular_pool_base_address_;
60 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
61     } else if (IsInBRPPool(address)) {
62       pool = kBRPPoolHandle;
63       base = setup_.brp_pool_base_address_;
64 #endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
65     } else if (IsInConfigurablePool(address)) {
66       PA_DCHECK(IsConfigurablePoolInitialized());
67       pool = kConfigurablePoolHandle;
68       base = setup_.configurable_pool_base_address_;
69 #if BUILDFLAG(ENABLE_PKEYS)
70     } else if (IsInPkeyPool(address)) {
71       pool = kPkeyPoolHandle;
72       base = setup_.pkey_pool_base_address_;
73 #endif
74     } else {
75       PA_NOTREACHED();
76     }
77     return std::make_pair(pool, address - base);
78   }
79   PA_ALWAYS_INLINE static constexpr size_t ConfigurablePoolMaxSize() {
80     return kConfigurablePoolMaxSize;
81   }
82   PA_ALWAYS_INLINE static constexpr size_t ConfigurablePoolMinSize() {
83     return kConfigurablePoolMinSize;
84   }
85 
86   // Initialize pools (except for the configurable one).
87   //
88   // This function must only be called from the main thread.
89   static void Init();
90   // Initialize the ConfigurablePool at the given address |pool_base|. It must
91   // be aligned to the size of the pool. The size must be a power of two and
92   // must be within [ConfigurablePoolMinSize(), ConfigurablePoolMaxSize()].
93   //
94   // This function must only be called from the main thread.
95   static void InitConfigurablePool(uintptr_t pool_base, size_t size);
96 #if BUILDFLAG(ENABLE_PKEYS)
97   static void InitPkeyPool(int pkey);
98   static void UninitPkeyPoolForTesting();
99 #endif
100   static void UninitForTesting();
101   static void UninitConfigurablePoolForTesting();
102 
103   PA_ALWAYS_INLINE static bool IsInitialized() {
104     // Either neither or both regular and BRP pool are initialized. The
105     // configurable and pkey pool are initialized separately.
106     if (setup_.regular_pool_base_address_ != kUninitializedPoolBaseAddress) {
107       PA_DCHECK(setup_.brp_pool_base_address_ != kUninitializedPoolBaseAddress);
108       return true;
109     }
110 
111     PA_DCHECK(setup_.brp_pool_base_address_ == kUninitializedPoolBaseAddress);
112     return false;
113   }
114 
115   PA_ALWAYS_INLINE static bool IsConfigurablePoolInitialized() {
116     return setup_.configurable_pool_base_address_ !=
117            kUninitializedPoolBaseAddress;
118   }
119 
120 #if BUILDFLAG(ENABLE_PKEYS)
121   PA_ALWAYS_INLINE static bool IsPkeyPoolInitialized() {
122     return setup_.pkey_pool_base_address_ != kUninitializedPoolBaseAddress;
123   }
124 #endif
125 
126   // Returns false for nullptr.
127   PA_ALWAYS_INLINE static bool IsInRegularPool(uintptr_t address) {
128 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
129     const uintptr_t regular_pool_base_mask = setup_.regular_pool_base_mask_;
130 #else
131     constexpr uintptr_t regular_pool_base_mask = kRegularPoolBaseMask;
132 #endif
133     return (address & regular_pool_base_mask) ==
134            setup_.regular_pool_base_address_;
135   }
136 
137   PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() {
138     return setup_.regular_pool_base_address_;
139   }
140 
141   // Returns false for nullptr.
142   PA_ALWAYS_INLINE static bool IsInBRPPool(uintptr_t address) {
143 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
144     const uintptr_t brp_pool_base_mask = setup_.brp_pool_base_mask_;
145 #else
146     constexpr uintptr_t brp_pool_base_mask = kBRPPoolBaseMask;
147 #endif
148     return (address & brp_pool_base_mask) == setup_.brp_pool_base_address_;
149   }
150 
151 #if PA_CONFIG(GLUE_CORE_POOLS)
152   // Checks whether the address belongs to either regular or BRP pool.
153   // Returns false for nullptr.
154   PA_ALWAYS_INLINE static bool IsInCorePools(uintptr_t address) {
155 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
156     const uintptr_t core_pools_base_mask = setup_.core_pools_base_mask_;
157 #else
158     // When PA_GLUE_CORE_POOLS is on, the BRP pool is placed at the end of the
159     // regular pool, effectively forming one virtual pool of a twice bigger
160     // size. Adjust the mask appropriately.
161     constexpr uintptr_t core_pools_base_mask = kRegularPoolBaseMask << 1;
162 #endif  // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
163     bool ret =
164         (address & core_pools_base_mask) == setup_.regular_pool_base_address_;
165     PA_DCHECK(ret == (IsInRegularPool(address) || IsInBRPPool(address)));
166     return ret;
167   }
168 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
169   PA_ALWAYS_INLINE static size_t CorePoolsSize() {
170     return RegularPoolSize() * 2;
171   }
172 #else
173   PA_ALWAYS_INLINE static constexpr size_t CorePoolsSize() {
174     return RegularPoolSize() * 2;
175   }
176 #endif  // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
177 #endif  // PA_CONFIG(GLUE_CORE_POOLS)
178 
179   PA_ALWAYS_INLINE static uintptr_t OffsetInBRPPool(uintptr_t address) {
180     PA_DCHECK(IsInBRPPool(address));
181     return address - setup_.brp_pool_base_address_;
182   }
183 
184   // Returns false for nullptr.
185   PA_ALWAYS_INLINE static bool IsInConfigurablePool(uintptr_t address) {
186     return (address & setup_.configurable_pool_base_mask_) ==
187            setup_.configurable_pool_base_address_;
188   }
189 
190   PA_ALWAYS_INLINE static uintptr_t ConfigurablePoolBase() {
191     return setup_.configurable_pool_base_address_;
192   }
193 
194 #if BUILDFLAG(ENABLE_PKEYS)
195   // Returns false for nullptr.
196   PA_ALWAYS_INLINE static bool IsInPkeyPool(uintptr_t address) {
197     return (address & kPkeyPoolBaseMask) == setup_.pkey_pool_base_address_;
198   }
199 #endif
200 
201 #if PA_CONFIG(ENABLE_SHADOW_METADATA)
202   PA_ALWAYS_INLINE static std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
203     if (pool == kRegularPoolHandle) {
204       return regular_pool_shadow_offset_;
205     } else if (pool == kBRPPoolHandle) {
206       return brp_pool_shadow_offset_;
207     } else {
208       // TODO(crbug.com/1362969): Add shadow for configurable pool as well.
209       // Shadow is not created for ConfigurablePool for now, so this part should
210       // be unreachable.
211       PA_NOTREACHED();
212       return 0;
213     }
214   }
215 #endif
216 
217   // PartitionAddressSpace is static_only class.
218   PartitionAddressSpace() = delete;
219   PartitionAddressSpace(const PartitionAddressSpace&) = delete;
220   void* operator new(size_t) = delete;
221   void* operator new(size_t, void*) = delete;
222 
223  private:
224 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
225   PA_ALWAYS_INLINE static size_t RegularPoolSize();
226   PA_ALWAYS_INLINE static size_t BRPPoolSize();
227 #else
228   // The pool sizes should be as large as maximum whenever possible.
229   PA_ALWAYS_INLINE static constexpr size_t RegularPoolSize() {
230     return kRegularPoolSize;
231   }
232   PA_ALWAYS_INLINE static constexpr size_t BRPPoolSize() {
233     return kBRPPoolSize;
234   }
235 #endif  // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
236 
237 #if BUILDFLAG(ENABLE_PKEYS)
238   PA_ALWAYS_INLINE static constexpr size_t PkeyPoolSize() {
239     return kPkeyPoolSize;
240   }
241 #endif
242 
243   // On 64-bit systems, PA allocates from several contiguous, mutually disjoint
244   // pools. The BRP pool is where all allocations have a BRP ref-count, thus
245   // pointers pointing there can use a BRP protection against UaF. Allocations
246   // in the other pools don't have that.
247   //
248   // Pool sizes have to be the power of two. Each pool will be aligned at its
249   // own size boundary.
250   //
251   // NOTE! The BRP pool must be preceded by an inaccessible region. This is to
252   // prevent a pointer to the end of a non-BRP-pool allocation from falling into
253   // the BRP pool, thus triggering BRP mechanism and likely crashing. This
254   // "forbidden zone" can be as small as 1B, but it's simpler to just reserve an
255   // allocation granularity unit.
256   //
257   // The ConfigurablePool is an optional Pool that can be created inside an
258   // existing mapping provided by the embedder. This Pool can be used when
259   // certain PA allocations must be located inside a given virtual address
260   // region. One use case for this Pool is V8 Sandbox, which requires that
261   // ArrayBuffers be located inside of it.
262   static constexpr size_t kRegularPoolSize = kPoolMaxSize;
263   static constexpr size_t kBRPPoolSize = kPoolMaxSize;
264   static_assert(base::bits::IsPowerOfTwo(kRegularPoolSize));
265   static_assert(base::bits::IsPowerOfTwo(kBRPPoolSize));
266 #if BUILDFLAG(ENABLE_PKEYS)
267   static constexpr size_t kPkeyPoolSize = kGiB / 4;
268   static_assert(base::bits::IsPowerOfTwo(kPkeyPoolSize));
269 #endif
270   static constexpr size_t kConfigurablePoolMaxSize = kPoolMaxSize;
271   static constexpr size_t kConfigurablePoolMinSize = 1 * kGiB;
272   static_assert(kConfigurablePoolMinSize <= kConfigurablePoolMaxSize);
273   static_assert(base::bits::IsPowerOfTwo(kConfigurablePoolMaxSize));
274   static_assert(base::bits::IsPowerOfTwo(kConfigurablePoolMinSize));
275 
276 #if BUILDFLAG(IS_IOS)
277 
278 #if !PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
279 #error iOS is only supported with a dynamically sized GigaCase.
280 #endif
281 
282   // We can't afford pool sizes as large as kPoolMaxSize in iOS EarlGrey tests,
283   // since the test process cannot use an extended virtual address space (see
284   // crbug.com/1250788).
285   static constexpr size_t kRegularPoolSizeForIOSTestProcess = kGiB / 4;
286   static constexpr size_t kBRPPoolSizeForIOSTestProcess = kGiB / 4;
287   static_assert(kRegularPoolSizeForIOSTestProcess < kRegularPoolSize);
288   static_assert(kBRPPoolSizeForIOSTestProcess < kBRPPoolSize);
289   static_assert(base::bits::IsPowerOfTwo(kRegularPoolSizeForIOSTestProcess));
290   static_assert(base::bits::IsPowerOfTwo(kBRPPoolSizeForIOSTestProcess));
291 #endif  // BUILDFLAG(IOS_IOS)
292 
293 #if !PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
294   // Masks used to easy determine belonging to a pool.
295   static constexpr uintptr_t kRegularPoolOffsetMask =
296       static_cast<uintptr_t>(kRegularPoolSize) - 1;
297   static constexpr uintptr_t kRegularPoolBaseMask = ~kRegularPoolOffsetMask;
298   static constexpr uintptr_t kBRPPoolOffsetMask =
299       static_cast<uintptr_t>(kBRPPoolSize) - 1;
300   static constexpr uintptr_t kBRPPoolBaseMask = ~kBRPPoolOffsetMask;
301 #endif  // !PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
302 
303 #if BUILDFLAG(ENABLE_PKEYS)
304   static constexpr uintptr_t kPkeyPoolOffsetMask =
305       static_cast<uintptr_t>(kPkeyPoolSize) - 1;
306   static constexpr uintptr_t kPkeyPoolBaseMask = ~kPkeyPoolOffsetMask;
307 #endif
308 
309   // This must be set to such a value that IsIn*Pool() always returns false when
310   // the pool isn't initialized.
311   static constexpr uintptr_t kUninitializedPoolBaseAddress =
312       static_cast<uintptr_t>(-1);
313 
314   struct PoolSetup {
315     // Before PartitionAddressSpace::Init(), no allocation are allocated from a
316     // reserved address space. Therefore, set *_pool_base_address_ initially to
317     // -1, so that PartitionAddressSpace::IsIn*Pool() always returns false.
318     constexpr PoolSetup()
319         : regular_pool_base_address_(kUninitializedPoolBaseAddress),
320           brp_pool_base_address_(kUninitializedPoolBaseAddress),
321           configurable_pool_base_address_(kUninitializedPoolBaseAddress),
322 #if BUILDFLAG(ENABLE_PKEYS)
323           pkey_pool_base_address_(kUninitializedPoolBaseAddress),
324 #endif
325 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
326           regular_pool_base_mask_(0),
327           brp_pool_base_mask_(0),
328 #if PA_CONFIG(GLUE_CORE_POOLS)
329           core_pools_base_mask_(0),
330 #endif
331 #endif  // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
332           configurable_pool_base_mask_(0)
333 #if BUILDFLAG(ENABLE_PKEYS)
334           ,
335           pkey_(kInvalidPkey)
336 #endif
337     {
338     }
339 
340     // Using a union to enforce padding.
341     union {
342       struct {
343         uintptr_t regular_pool_base_address_;
344         uintptr_t brp_pool_base_address_;
345         uintptr_t configurable_pool_base_address_;
346 #if BUILDFLAG(ENABLE_PKEYS)
347         uintptr_t pkey_pool_base_address_;
348 #endif
349 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
350         uintptr_t regular_pool_base_mask_;
351         uintptr_t brp_pool_base_mask_;
352 #if PA_CONFIG(GLUE_CORE_POOLS)
353         uintptr_t core_pools_base_mask_;
354 #endif
355 #endif  // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
356         uintptr_t configurable_pool_base_mask_;
357 #if BUILDFLAG(ENABLE_PKEYS)
358         int pkey_;
359 #endif
360       };
361 
362 #if BUILDFLAG(ENABLE_PKEYS)
363       // With pkey support, we want to be able to pkey-tag all global metadata
364       // which requires page granularity.
365       char one_page_[SystemPageSize()];
366 #else
367       char one_cacheline_[kPartitionCachelineSize];
368 #endif
369     };
370   };
371 #if BUILDFLAG(ENABLE_PKEYS)
372   static_assert(sizeof(PoolSetup) % SystemPageSize() == 0,
373                 "PoolSetup has to fill a page(s)");
374 #else
375   static_assert(sizeof(PoolSetup) % kPartitionCachelineSize == 0,
376                 "PoolSetup has to fill a cacheline(s)");
377 #endif
378 
379   // See the comment describing the address layout above.
380   //
381   // These are write-once fields, frequently accessed thereafter. Make sure they
382   // don't share a cacheline with other, potentially writeable data, through
383   // alignment and padding.
384 #if BUILDFLAG(ENABLE_PKEYS)
385   static_assert(PA_PKEY_ALIGN_SZ >= kPartitionCachelineSize);
386   alignas(PA_PKEY_ALIGN_SZ)
387 #else
388   alignas(kPartitionCachelineSize)
389 #endif
390       static PoolSetup setup_ PA_CONSTINIT;
391 
392 #if PA_CONFIG(ENABLE_SHADOW_METADATA)
393   static std::ptrdiff_t regular_pool_shadow_offset_;
394   static std::ptrdiff_t brp_pool_shadow_offset_;
395 #endif
396 
397 #if BUILDFLAG(ENABLE_PKEYS)
398   // If we use a pkey pool, we need to tag its metadata with the pkey. Allow the
399   // function to get access to the PoolSetup.
400   friend void TagGlobalsWithPkey(int pkey);
401 #endif
402 };
403 
GetPoolAndOffset(uintptr_t address)404 PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
405     uintptr_t address) {
406   return PartitionAddressSpace::GetPoolAndOffset(address);
407 }
408 
GetPool(uintptr_t address)409 PA_ALWAYS_INLINE pool_handle GetPool(uintptr_t address) {
410   return std::get<0>(GetPoolAndOffset(address));
411 }
412 
OffsetInBRPPool(uintptr_t address)413 PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
414   return PartitionAddressSpace::OffsetInBRPPool(address);
415 }
416 
417 #if PA_CONFIG(ENABLE_SHADOW_METADATA)
ShadowPoolOffset(pool_handle pool)418 PA_ALWAYS_INLINE std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
419   return PartitionAddressSpace::ShadowPoolOffset(pool);
420 }
421 #endif
422 
423 }  // namespace internal
424 
425 // Returns false for nullptr.
IsManagedByPartitionAlloc(uintptr_t address)426 PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
427   // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
428 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
429   PA_DCHECK(!internal::PartitionAddressSpace::IsInBRPPool(address));
430 #endif
431   return internal::PartitionAddressSpace::IsInRegularPool(address)
432 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
433          || internal::PartitionAddressSpace::IsInBRPPool(address)
434 #endif
435 #if BUILDFLAG(ENABLE_PKEYS)
436          || internal::PartitionAddressSpace::IsInPkeyPool(address)
437 #endif
438          || internal::PartitionAddressSpace::IsInConfigurablePool(address);
439 }
440 
441 // Returns false for nullptr.
IsManagedByPartitionAllocRegularPool(uintptr_t address)442 PA_ALWAYS_INLINE bool IsManagedByPartitionAllocRegularPool(uintptr_t address) {
443   return internal::PartitionAddressSpace::IsInRegularPool(address);
444 }
445 
446 // Returns false for nullptr.
IsManagedByPartitionAllocBRPPool(uintptr_t address)447 PA_ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
448   return internal::PartitionAddressSpace::IsInBRPPool(address);
449 }
450 
451 #if PA_CONFIG(GLUE_CORE_POOLS)
452 // Checks whether the address belongs to either regular or BRP pool.
453 // Returns false for nullptr.
IsManagedByPartitionAllocCorePools(uintptr_t address)454 PA_ALWAYS_INLINE bool IsManagedByPartitionAllocCorePools(uintptr_t address) {
455   return internal::PartitionAddressSpace::IsInCorePools(address);
456 }
457 #endif  // PA_CONFIG(GLUE_CORE_POOLS)
458 
459 // Returns false for nullptr.
IsManagedByPartitionAllocConfigurablePool(uintptr_t address)460 PA_ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(
461     uintptr_t address) {
462   return internal::PartitionAddressSpace::IsInConfigurablePool(address);
463 }
464 
465 #if BUILDFLAG(ENABLE_PKEYS)
466 // Returns false for nullptr.
IsManagedByPartitionAllocPkeyPool(uintptr_t address)467 PA_ALWAYS_INLINE bool IsManagedByPartitionAllocPkeyPool(uintptr_t address) {
468   return internal::PartitionAddressSpace::IsInPkeyPool(address);
469 }
470 #endif
471 
IsConfigurablePoolAvailable()472 PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
473   return internal::PartitionAddressSpace::IsConfigurablePoolInitialized();
474 }
475 
476 }  // namespace partition_alloc
477 
478 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
479 
480 #endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
481