• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
7 
8 #include <algorithm>
9 #include <climits>
10 #include <cstddef>
11 #include <limits>
12 
13 #include "base/allocator/partition_allocator/address_pool_manager_types.h"
14 #include "base/allocator/partition_allocator/page_allocator_constants.h"
15 #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
16 #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
17 #include "base/allocator/partition_allocator/partition_alloc_config.h"
18 #include "base/allocator/partition_allocator/partition_alloc_forward.h"
19 #include "base/allocator/partition_allocator/tagging.h"
20 #include "build/build_config.h"
21 
22 #if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
23 #include <mach/vm_page_size.h>
24 #endif
25 
26 namespace partition_alloc {
27 
28 // Bit flag constants used as `flag` argument of PartitionRoot::AllocWithFlags,
29 // AlignedAllocWithFlags, etc.
30 struct AllocFlags {
31   static constexpr unsigned int kReturnNull = 1 << 0;
32   static constexpr unsigned int kZeroFill = 1 << 1;
33   // Don't allow allocation override hooks. Override hooks are expected to
34   // check for the presence of this flag and return false if it is active.
35   static constexpr unsigned int kNoOverrideHooks = 1 << 2;
36   // Never let a memory tool like ASan (if active) perform the allocation.
37   static constexpr unsigned int kNoMemoryToolOverride = 1 << 3;
38   // Don't allow any hooks (override or observers).
39   static constexpr unsigned int kNoHooks = 1 << 4;  // Internal.
40   // If the allocation requires a "slow path" (such as allocating/committing a
41   // new slot span), return nullptr instead. Note this makes all large
42   // allocations return nullptr, such as direct-mapped ones, and even for
43   // smaller ones, a nullptr value is common.
44   static constexpr unsigned int kFastPathOrReturnNull = 1 << 5;  // Internal.
45 
46   static constexpr unsigned int kLastFlag = kFastPathOrReturnNull;
47 };
48 
49 // Bit flag constants used as `flag` argument of PartitionRoot::FreeWithFlags.
50 struct FreeFlags {
51   // See AllocFlags::kNoMemoryToolOverride.
52   static constexpr unsigned int kNoMemoryToolOverride = 1 << 0;
53 
54   static constexpr unsigned int kLastFlag = kNoMemoryToolOverride;
55 };
56 
57 namespace internal {
58 
59 // Size of a cache line. Not all CPUs in the world have a 64 bytes cache line
60 // size, but as of 2021, most do. This is in particular the case for almost all
61 // x86_64 and almost all ARM CPUs supported by Chromium. As this is used for
62 // static alignment, we cannot query the CPU at runtime to determine the actual
63 // alignment, so use 64 bytes everywhere. Since this is only used to avoid false
64 // sharing, getting this wrong only results in lower performance, not incorrect
65 // code.
66 constexpr size_t kPartitionCachelineSize = 64;
67 
68 // Underlying partition storage pages (`PartitionPage`s) are a power-of-2 size.
69 // It is typical for a `PartitionPage` to be based on multiple system pages.
70 // Most references to "page" refer to `PartitionPage`s.
71 //
72 // *Super pages* are the underlying system allocations we make. Super pages
73 // contain multiple partition pages and include space for a small amount of
74 // metadata per partition page.
75 //
76 // Inside super pages, we store *slot spans*. A slot span is a continguous range
77 // of one or more `PartitionPage`s that stores allocations of the same size.
78 // Slot span sizes are adjusted depending on the allocation size, to make sure
79 // the packing does not lead to unused (wasted) space at the end of the last
80 // system page of the span. For our current maximum slot span size of 64 KiB and
81 // other constant values, we pack _all_ `PartitionRoot::Alloc` sizes perfectly
82 // up against the end of a system page.
83 
84 #if defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONG64)
85 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift()86 PartitionPageShift() {
87   return 16;  // 64 KiB
88 }
89 #elif defined(ARCH_CPU_PPC64)
90 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift()91 PartitionPageShift() {
92   return 18;  // 256 KiB
93 }
94 #elif (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
95     (BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
96 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift()97 PartitionPageShift() {
98   return PageAllocationGranularityShift() + 2;
99 }
100 #else
101 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift()102 PartitionPageShift() {
103   return 14;  // 16 KiB
104 }
105 #endif
106 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageSize()107 PartitionPageSize() {
108   return 1 << PartitionPageShift();
109 }
110 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageOffsetMask()111 PartitionPageOffsetMask() {
112   return PartitionPageSize() - 1;
113 }
114 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageBaseMask()115 PartitionPageBaseMask() {
116   return ~PartitionPageOffsetMask();
117 }
118 
119 // Number of system pages per regular slot span. Above this limit, we call it
120 // a single-slot span, as the span literally hosts only one slot, and has
121 // somewhat different implementation. At run-time, single-slot spans can be
122 // differentiated with a call to CanStoreRawSize().
123 // TODO: Should this be 1 on platforms with page size larger than 4kB, e.g.
124 // ARM macOS or defined(_MIPS_ARCH_LOONGSON)?
125 constexpr size_t kMaxPartitionPagesPerRegularSlotSpan = 4;
126 
127 // To avoid fragmentation via never-used freelist entries, we hand out partition
128 // freelist sections gradually, in units of the dominant system page size. What
129 // we're actually doing is avoiding filling the full `PartitionPage` (16 KiB)
130 // with freelist pointers right away. Writing freelist pointers will fault and
131 // dirty a private page, which is very wasteful if we never actually store
132 // objects there.
133 
134 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
NumSystemPagesPerPartitionPage()135 NumSystemPagesPerPartitionPage() {
136   return PartitionPageSize() >> SystemPageShift();
137 }
138 
139 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
MaxSystemPagesPerRegularSlotSpan()140 MaxSystemPagesPerRegularSlotSpan() {
141   return NumSystemPagesPerPartitionPage() *
142          kMaxPartitionPagesPerRegularSlotSpan;
143 }
144 
145 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
MaxRegularSlotSpanSize()146 MaxRegularSlotSpanSize() {
147   return kMaxPartitionPagesPerRegularSlotSpan << PartitionPageShift();
148 }
149 
150 // The maximum size that is used in an alternate bucket distribution. After this
151 // threshold, we only have 1 slot per slot-span, so external fragmentation
152 // doesn't matter. So, using the alternate bucket distribution after this
153 // threshold has no benefit, and only increases internal fragmentation.
154 //
155 // We would like this to be |MaxRegularSlotSpanSize()| on all platforms, but
156 // this is not constexpr on all platforms, so on other platforms we hardcode it,
157 // even though this may be too low, e.g. on systems with a page size >4KiB.
158 constexpr size_t kHighThresholdForAlternateDistribution =
159 #if PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
160     MaxRegularSlotSpanSize();
161 #else
162     1 << 16;
163 #endif
164 
165 // We reserve virtual address space in 2 MiB chunks (aligned to 2 MiB as well).
166 // These chunks are called *super pages*. We do this so that we can store
167 // metadata in the first few pages of each 2 MiB-aligned section. This makes
168 // freeing memory very fast. 2 MiB size & alignment were chosen, because this
169 // virtual address block represents a full but single page table allocation on
170 // ARM, ia32 and x64, which may be slightly more performance&memory efficient.
171 // (Note, these super pages are backed by 4 KiB system pages and have nothing to
172 // do with OS concept of "huge pages"/"large pages", even though the size
173 // coincides.)
174 //
175 // The layout of the super page is as follows. The sizes below are the same for
176 // 32- and 64-bit platforms.
177 //
178 //     +-----------------------+
179 //     | Guard page (4 KiB)    |
180 //     | Metadata page (4 KiB) |
181 //     | Guard pages (8 KiB)   |
182 //     | Free Slot Bitmap      |
183 //     | *Scan State Bitmap    |
184 //     | Slot span             |
185 //     | Slot span             |
186 //     | ...                   |
187 //     | Slot span             |
188 //     | Guard pages (16 KiB)  |
189 //     +-----------------------+
190 //
191 // Free Slot Bitmap is only present when USE_FREESLOT_BITMAP is true. State
192 // Bitmap is inserted for partitions that may have quarantine enabled.
193 //
194 // If refcount_at_end_allocation is enabled, RefcountBitmap(4KiB) is inserted
195 // after the Metadata page for BackupRefPtr. The guard pages after the bitmap
196 // will be 4KiB.
197 //
198 //...
199 //     | Metadata page (4 KiB) |
200 //     | RefcountBitmap (4 KiB)|
201 //     | Guard pages (4 KiB)   |
202 //...
203 //
204 // Each slot span is a contiguous range of one or more `PartitionPage`s. Note
205 // that slot spans of different sizes may co-exist with one super page. Even
206 // slot spans of the same size may support different slot sizes. However, all
207 // slots within a span have to be of the same size.
208 //
209 // The metadata page has the following format. Note that the `PartitionPage`
210 // that is not at the head of a slot span is "unused" (by most part, it only
211 // stores the offset from the head page). In other words, the metadata for the
212 // slot span is stored only in the first `PartitionPage` of the slot span.
213 // Metadata accesses to other `PartitionPage`s are redirected to the first
214 // `PartitionPage`.
215 //
216 //     +---------------------------------------------+
217 //     | SuperPageExtentEntry (32 B)                 |
218 //     | PartitionPage of slot span 1 (32 B, used)   |
219 //     | PartitionPage of slot span 1 (32 B, unused) |
220 //     | PartitionPage of slot span 1 (32 B, unused) |
221 //     | PartitionPage of slot span 2 (32 B, used)   |
222 //     | PartitionPage of slot span 3 (32 B, used)   |
223 //     | ...                                         |
224 //     | PartitionPage of slot span N (32 B, used)   |
225 //     | PartitionPage of slot span N (32 B, unused) |
226 //     | PartitionPage of slot span N (32 B, unused) |
227 //     +---------------------------------------------+
228 //
229 // A direct-mapped page has an identical layout at the beginning to fake it
230 // looking like a super page:
231 //
232 //     +---------------------------------+
233 //     | Guard page (4 KiB)              |
234 //     | Metadata page (4 KiB)           |
235 //     | Guard pages (8 KiB)             |
236 //     | Direct mapped object            |
237 //     | Guard page (4 KiB, 32-bit only) |
238 //     +---------------------------------+
239 //
240 // A direct-mapped page's metadata page has the following layout (on 64 bit
241 // architectures. On 32 bit ones, the layout is identical, some sizes are
242 // different due to smaller pointers.):
243 //
244 //     +----------------------------------+
245 //     | SuperPageExtentEntry (32 B)      |
246 //     | PartitionPage (32 B)             |
247 //     | PartitionBucket (40 B)           |
248 //     | PartitionDirectMapExtent (32 B)  |
249 //     +----------------------------------+
250 //
251 // See |PartitionDirectMapMetadata| for details.
252 
253 constexpr size_t kGiB = 1024 * 1024 * 1024ull;
254 constexpr size_t kSuperPageShift = 21;  // 2 MiB
255 constexpr size_t kSuperPageSize = 1 << kSuperPageShift;
256 constexpr size_t kSuperPageAlignment = kSuperPageSize;
257 constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1;
258 constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
259 
260 // PartitionAlloc's address space is split into pools. See `glossary.md`.
261 
262 enum pool_handle : unsigned {
263   kNullPoolHandle = 0u,
264 
265   kRegularPoolHandle,
266   kBRPPoolHandle,
267 #if BUILDFLAG(HAS_64_BIT_POINTERS)
268   kConfigurablePoolHandle,
269 #endif
270 
271 // New pool_handles will be added here.
272 
273 #if BUILDFLAG(ENABLE_PKEYS)
274   // The pkey pool must come last since we pkey_mprotect its entry in the
275   // metadata tables, e.g. AddressPoolManager::aligned_pools_
276   kPkeyPoolHandle,
277 #endif
278   kMaxPoolHandle
279 };
280 
281 // kNullPoolHandle doesn't have metadata, hence - 1
282 constexpr size_t kNumPools = kMaxPoolHandle - 1;
283 
284 // Maximum pool size. With exception of Configurable Pool, it is also
285 // the actual size, unless PA_DYNAMICALLY_SELECT_POOL_SIZE is set, which
286 // allows to choose a different size at initialization time for certain
287 // configurations.
288 //
289 // Special-case Android and iOS, which incur test failures with larger
290 // pools. Regardless, allocating >8GiB with malloc() on these platforms is
291 // unrealistic as of 2022.
292 //
293 // When pointer compression is enabled, we cannot use large pools (at most
294 // 8GB for each of the glued pools).
295 #if BUILDFLAG(HAS_64_BIT_POINTERS)
296 #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_IOS) || PA_CONFIG(POINTER_COMPRESSION)
297 constexpr size_t kPoolMaxSize = 8 * kGiB;
298 #else
299 constexpr size_t kPoolMaxSize = 16 * kGiB;
300 #endif
301 #else  // BUILDFLAG(HAS_64_BIT_POINTERS)
302 constexpr size_t kPoolMaxSize = 4 * kGiB;
303 #endif
304 constexpr size_t kMaxSuperPagesInPool = kPoolMaxSize / kSuperPageSize;
305 
306 #if BUILDFLAG(ENABLE_PKEYS)
307 static_assert(
308     kPkeyPoolHandle == kNumPools,
309     "The pkey pool must come last since we pkey_mprotect its metadata.");
310 #endif
311 
312 // Slots larger than this size will not receive MTE protection. Pages intended
313 // for allocations larger than this constant should not be backed with PROT_MTE
314 // (which saves shadow tag memory). We also save CPU cycles by skipping tagging
315 // of large areas which are less likely to benefit from MTE protection.
316 // TODO(Richard.Townsend@arm.com): adjust RecommitSystemPagesForData to skip
317 // PROT_MTE.
318 constexpr size_t kMaxMemoryTaggingSize = 1024;
319 
320 #if PA_CONFIG(HAS_MEMORY_TAGGING)
321 // Returns whether the tag of |object| overflowed, meaning the containing slot
322 // needs to be moved to quarantine.
HasOverflowTag(void * object)323 PA_ALWAYS_INLINE bool HasOverflowTag(void* object) {
324   // The tag with which the slot is put to quarantine.
325   constexpr uintptr_t kOverflowTag = 0x0f00000000000000uLL;
326   static_assert((kOverflowTag & kPtrTagMask) != 0,
327                 "Overflow tag must be in tag bits");
328   return (reinterpret_cast<uintptr_t>(object) & kPtrTagMask) == kOverflowTag;
329 }
330 #endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
331 
332 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
NumPartitionPagesPerSuperPage()333 NumPartitionPagesPerSuperPage() {
334   return kSuperPageSize >> PartitionPageShift();
335 }
336 
MaxSuperPagesInPool()337 PA_ALWAYS_INLINE constexpr size_t MaxSuperPagesInPool() {
338   return kMaxSuperPagesInPool;
339 }
340 
341 #if BUILDFLAG(HAS_64_BIT_POINTERS)
342 // In 64-bit mode, the direct map allocation granularity is super page size,
343 // because this is the reservation granularity of the pools.
DirectMapAllocationGranularity()344 PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularity() {
345   return kSuperPageSize;
346 }
347 
DirectMapAllocationGranularityShift()348 PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularityShift() {
349   return kSuperPageShift;
350 }
351 #else   // BUILDFLAG(HAS_64_BIT_POINTERS)
352 // In 32-bit mode, address space is space is a scarce resource. Use the system
353 // allocation granularity, which is the lowest possible address space allocation
354 // unit. However, don't go below partition page size, so that pool bitmaps
355 // don't get too large. See kBytesPer1BitOfBRPPoolBitmap.
356 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
DirectMapAllocationGranularity()357 DirectMapAllocationGranularity() {
358   return std::max(PageAllocationGranularity(), PartitionPageSize());
359 }
360 
361 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
DirectMapAllocationGranularityShift()362 DirectMapAllocationGranularityShift() {
363   return std::max(PageAllocationGranularityShift(), PartitionPageShift());
364 }
365 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
366 
367 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
DirectMapAllocationGranularityOffsetMask()368 DirectMapAllocationGranularityOffsetMask() {
369   return DirectMapAllocationGranularity() - 1;
370 }
371 
372 // The "order" of an allocation is closely related to the power-of-1 size of the
373 // allocation. More precisely, the order is the bit index of the
374 // most-significant-bit in the allocation size, where the bit numbers starts at
375 // index 1 for the least-significant-bit.
376 //
377 // In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
378 // covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
379 
380 // PartitionAlloc should return memory properly aligned for any type, to behave
381 // properly as a generic allocator. This is not strictly required as long as
382 // types are explicitly allocated with PartitionAlloc, but is to use it as a
383 // malloc() implementation, and generally to match malloc()'s behavior.
384 //
385 // In practice, this means 8 bytes alignment on 32 bit architectures, and 16
386 // bytes on 64 bit ones.
387 //
388 // Keep in sync with //tools/memory/partition_allocator/objects_per_size_py.
389 constexpr size_t kMinBucketedOrder =
390     kAlignment == 16 ? 5 : 4;  // 2^(order - 1), that is 16 or 8.
391 // The largest bucketed order is 1 << (20 - 1), storing [512 KiB, 1 MiB):
392 constexpr size_t kMaxBucketedOrder = 20;
393 constexpr size_t kNumBucketedOrders =
394     (kMaxBucketedOrder - kMinBucketedOrder) + 1;
395 // 8 buckets per order (for the higher orders).
396 // Note: this is not what is used by default, but the maximum amount of buckets
397 // per order. By default, only 4 are used.
398 constexpr size_t kNumBucketsPerOrderBits = 3;
399 constexpr size_t kNumBucketsPerOrder = 1 << kNumBucketsPerOrderBits;
400 constexpr size_t kNumBuckets = kNumBucketedOrders * kNumBucketsPerOrder;
401 constexpr size_t kSmallestBucket = 1 << (kMinBucketedOrder - 1);
402 constexpr size_t kMaxBucketSpacing =
403     1 << ((kMaxBucketedOrder - 1) - kNumBucketsPerOrderBits);
404 constexpr size_t kMaxBucketed = (1 << (kMaxBucketedOrder - 1)) +
405                                 ((kNumBucketsPerOrder - 1) * kMaxBucketSpacing);
406 // Limit when downsizing a direct mapping using `realloc`:
407 constexpr size_t kMinDirectMappedDownsize = kMaxBucketed + 1;
408 // Intentionally set to less than 2GiB to make sure that a 2GiB allocation
409 // fails. This is a security choice in Chrome, to help making size_t vs int bugs
410 // harder to exploit.
411 
412 // The definition of MaxDirectMapped does only depend on constants that are
413 // unconditionally constexpr. Therefore it is not necessary to use
414 // PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR here.
MaxDirectMapped()415 PA_ALWAYS_INLINE constexpr size_t MaxDirectMapped() {
416   // Subtract kSuperPageSize to accommodate for granularity inside
417   // PartitionRoot::GetDirectMapReservationSize.
418   return (1UL << 31) - kSuperPageSize;
419 }
420 
421 // Max alignment supported by AlignedAllocWithFlags().
422 // kSuperPageSize alignment can't be easily supported, because each super page
423 // starts with guard pages & metadata.
424 constexpr size_t kMaxSupportedAlignment = kSuperPageSize / 2;
425 
426 constexpr size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
427 
428 // When a SlotSpan becomes empty, the allocator tries to avoid re-using it
429 // immediately, to help with fragmentation. At this point, it becomes dirty
430 // committed memory, which we want to minimize. This could be decommitted
431 // immediately, but that would imply doing a lot of system calls. In particular,
432 // for single-slot SlotSpans, a malloc() / free() loop would cause a *lot* of
433 // system calls.
434 //
435 // As an intermediate step, empty SlotSpans are placed into a per-partition
436 // global ring buffer, giving the newly-empty SlotSpan a chance to be re-used
437 // before getting decommitted. A new entry (i.e. a newly empty SlotSpan) taking
438 // the place used by a previous one will lead the previous SlotSpan to be
439 // decommitted immediately, provided that it is still empty.
440 //
441 // Setting this value higher means giving more time for reuse to happen, at the
442 // cost of possibly increasing peak committed memory usage (and increasing the
443 // size of PartitionRoot a bit, since the ring buffer is there). Note that the
444 // ring buffer doesn't necessarily contain an empty SlotSpan, as SlotSpans are
445 // *not* removed from it when re-used. So the ring buffer really is a buffer of
446 // *possibly* empty SlotSpans.
447 //
448 // In all cases, PartitionRoot::PurgeMemory() with the
449 // PurgeFlags::kDecommitEmptySlotSpans flag will eagerly decommit all entries
450 // in the ring buffer, so with periodic purge enabled, this typically happens
451 // every few seconds.
452 constexpr size_t kEmptyCacheIndexBits = 7;
453 // kMaxFreeableSpans is the buffer size, but is never used as an index value,
454 // hence <= is appropriate.
455 constexpr size_t kMaxFreeableSpans = 1 << kEmptyCacheIndexBits;
456 constexpr size_t kDefaultEmptySlotSpanRingSize = 16;
457 
458 // If the total size in bytes of allocated but not committed pages exceeds this
459 // value (probably it is a "out of virtual address space" crash), a special
460 // crash stack trace is generated at
461 // `PartitionOutOfMemoryWithLotsOfUncommitedPages`. This is to distinguish "out
462 // of virtual address space" from "out of physical memory" in crash reports.
463 constexpr size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024;  // 1 GiB
464 
465 // These byte values match tcmalloc.
466 constexpr unsigned char kUninitializedByte = 0xAB;
467 constexpr unsigned char kFreedByte = 0xCD;
468 
469 constexpr unsigned char kQuarantinedByte = 0xEF;
470 
471 // 1 is smaller than anything we can use, as it is not properly aligned. Not
472 // using a large size, since PartitionBucket::slot_size is a uint32_t, and
473 // static_cast<uint32_t>(-1) is too close to a "real" size.
474 constexpr size_t kInvalidBucketSize = 1;
475 
476 #if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
477 // Requested size that require the hack.
478 constexpr size_t kMac11MallocSizeHackRequestedSize = 32;
479 // Usable size for allocations that require the hack.
480 constexpr size_t kMac11MallocSizeHackUsableSize =
481 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) || \
482     PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE) || \
483     PA_CONFIG(REF_COUNT_CHECK_COOKIE)
484     40;
485 #else
486     44;
487 #endif  // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) ||
488         // PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE) ||
489         // PA_CONFIG(REF_COUNT_CHECK_COOKIE)
490 #endif  // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
491 }  // namespace internal
492 
493 // These constants are used outside PartitionAlloc itself, so we provide
494 // non-internal aliases here.
495 using ::partition_alloc::internal::kInvalidBucketSize;
496 using ::partition_alloc::internal::kMaxSuperPagesInPool;
497 using ::partition_alloc::internal::kMaxSupportedAlignment;
498 using ::partition_alloc::internal::kNumBuckets;
499 using ::partition_alloc::internal::kSuperPageSize;
500 using ::partition_alloc::internal::MaxDirectMapped;
501 using ::partition_alloc::internal::PartitionPageSize;
502 
503 }  // namespace partition_alloc
504 
505 #endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
506