• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_CONSTANTS_H_
6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_CONSTANTS_H_
7 
8 #include <algorithm>
9 #include <climits>
10 #include <cstddef>
11 #include <limits>
12 
13 #include "build/build_config.h"
14 #include "partition_alloc/address_pool_manager_types.h"
15 #include "partition_alloc/flags.h"
16 #include "partition_alloc/page_allocator_constants.h"
17 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
18 #include "partition_alloc/partition_alloc_buildflags.h"
19 #include "partition_alloc/partition_alloc_config.h"
20 #include "partition_alloc/partition_alloc_forward.h"
21 
22 #if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
23 #include <mach/vm_page_size.h>
24 #endif
25 
26 #if PA_CONFIG(HAS_MEMORY_TAGGING)
27 #include "partition_alloc/tagging.h"
28 #endif
29 
30 namespace partition_alloc {
31 
32 namespace internal {
33 // Bit flag constants used as `flag` argument of PartitionRoot::Alloc<flags>,
34 // AlignedAlloc, etc.
35 enum class AllocFlags {
36   kNone = 0,
37   kReturnNull = 1 << 0,
38   kZeroFill = 1 << 1,
39   // Don't allow allocation override hooks. Override hooks are expected to
40   // check for the presence of this flag and return false if it is active.
41   kNoOverrideHooks = 1 << 2,
42   // Never let a memory tool like ASan (if active) perform the allocation.
43   kNoMemoryToolOverride = 1 << 3,
44   // Don't allow any hooks (override or observers).
45   kNoHooks = 1 << 4,  // Internal.
46   // If the allocation requires a "slow path" (such as allocating/committing a
47   // new slot span), return nullptr instead. Note this makes all large
48   // allocations return nullptr, such as direct-mapped ones, and even for
49   // smaller ones, a nullptr value is common.
50   kFastPathOrReturnNull = 1 << 5,  // Internal.
51   // An allocation override hook should tag the allocated memory for MTE.
52   kMemoryShouldBeTaggedForMte = 1 << 6,  // Internal.
53   kMaxValue = kMemoryShouldBeTaggedForMte,
54 };
55 PA_DEFINE_OPERATORS_FOR_FLAGS(AllocFlags);
56 
57 // Bit flag constants used as `flag` argument of PartitionRoot::Free<flags>.
58 enum class FreeFlags {
59   kNone = 0,
60   // See AllocFlags::kNoMemoryToolOverride.
61   kNoMemoryToolOverride = 1 << 0,
62   // Don't allow any hooks (override or observers).
63   kNoHooks = 1 << 1,  // Internal.
64   // Quarantine for a while to ensure no UaF from on-stack pointers.
65   kSchedulerLoopQuarantine = 1 << 2,
66   // Zap the object region on `Free()`.
67   kZap = 1 << 3,
68   kMaxValue = kZap,
69 };
70 PA_DEFINE_OPERATORS_FOR_FLAGS(FreeFlags);
71 }  // namespace internal
72 
73 using internal::AllocFlags;
74 using internal::FreeFlags;
75 
76 namespace internal {
77 
78 // Size of a cache line. Not all CPUs in the world have a 64 bytes cache line
79 // size, but as of 2021, most do. This is in particular the case for almost all
80 // x86_64 and almost all ARM CPUs supported by Chromium. As this is used for
81 // static alignment, we cannot query the CPU at runtime to determine the actual
82 // alignment, so use 64 bytes everywhere. Since this is only used to avoid false
83 // sharing, getting this wrong only results in lower performance, not incorrect
84 // code.
85 constexpr size_t kPartitionCachelineSize = 64;
86 
87 // Underlying partition storage pages (`PartitionPage`s) are a power-of-2 size.
88 // It is typical for a `PartitionPage` to be based on multiple system pages.
89 // Most references to "page" refer to `PartitionPage`s.
90 //
91 // *Super pages* are the underlying system allocations we make. Super pages
92 // contain multiple partition pages and include space for a small amount of
93 // metadata per partition page.
94 //
95 // Inside super pages, we store *slot spans*. A slot span is a continguous range
96 // of one or more `PartitionPage`s that stores allocations of the same size.
97 // Slot span sizes are adjusted depending on the allocation size, to make sure
98 // the packing does not lead to unused (wasted) space at the end of the last
99 // system page of the span. For our current maximum slot span size of 64 KiB and
100 // other constant values, we pack _all_ `PartitionRoot::Alloc` sizes perfectly
101 // up against the end of a system page.
102 
103 #if defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONGARCH64)
104 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift()105 PartitionPageShift() {
106   return 16;  // 64 KiB
107 }
108 #elif defined(ARCH_CPU_PPC64)
109 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift()110 PartitionPageShift() {
111   return 18;  // 256 KiB
112 }
113 #elif (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
114     (BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
115 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift()116 PartitionPageShift() {
117   return PageAllocationGranularityShift() + 2;
118 }
119 #else
120 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift()121 PartitionPageShift() {
122   return 14;  // 16 KiB
123 }
124 #endif
125 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageSize()126 PartitionPageSize() {
127   return 1 << PartitionPageShift();
128 }
129 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageOffsetMask()130 PartitionPageOffsetMask() {
131   return PartitionPageSize() - 1;
132 }
133 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageBaseMask()134 PartitionPageBaseMask() {
135   return ~PartitionPageOffsetMask();
136 }
137 
138 // Number of system pages per regular slot span. Above this limit, we call it
139 // a single-slot span, as the span literally hosts only one slot, and has
140 // somewhat different implementation. At run-time, single-slot spans can be
141 // differentiated with a call to CanStoreRawSize().
142 // TODO: Should this be 1 on platforms with page size larger than 4kB, e.g.
143 // ARM macOS or defined(_MIPS_ARCH_LOONGSON)?
144 constexpr size_t kMaxPartitionPagesPerRegularSlotSpan = 4;
145 
146 // To avoid fragmentation via never-used freelist entries, we hand out partition
147 // freelist sections gradually, in units of the dominant system page size. What
148 // we're actually doing is avoiding filling the full `PartitionPage` (16 KiB)
149 // with freelist pointers right away. Writing freelist pointers will fault and
150 // dirty a private page, which is very wasteful if we never actually store
151 // objects there.
152 
153 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
NumSystemPagesPerPartitionPage()154 NumSystemPagesPerPartitionPage() {
155   return PartitionPageSize() >> SystemPageShift();
156 }
157 
158 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
MaxSystemPagesPerRegularSlotSpan()159 MaxSystemPagesPerRegularSlotSpan() {
160   return NumSystemPagesPerPartitionPage() *
161          kMaxPartitionPagesPerRegularSlotSpan;
162 }
163 
164 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
MaxRegularSlotSpanSize()165 MaxRegularSlotSpanSize() {
166   return kMaxPartitionPagesPerRegularSlotSpan << PartitionPageShift();
167 }
168 
169 // The maximum size that is used in an alternate bucket distribution. After this
170 // threshold, we only have 1 slot per slot-span, so external fragmentation
171 // doesn't matter. So, using the alternate bucket distribution after this
172 // threshold has no benefit, and only increases internal fragmentation.
173 //
174 // We would like this to be |MaxRegularSlotSpanSize()| on all platforms, but
175 // this is not constexpr on all platforms, so on other platforms we hardcode it,
176 // even though this may be too low, e.g. on systems with a page size >4KiB.
177 constexpr size_t kHighThresholdForAlternateDistribution =
178 #if PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
179     MaxRegularSlotSpanSize();
180 #else
181     1 << 16;
182 #endif
183 
184 // We reserve virtual address space in 2 MiB chunks (aligned to 2 MiB as well).
185 // These chunks are called *super pages*. We do this so that we can store
186 // metadata in the first few pages of each 2 MiB-aligned section. This makes
187 // freeing memory very fast. 2 MiB size & alignment were chosen, because this
188 // virtual address block represents a full but single page table allocation on
189 // ARM, ia32 and x64, which may be slightly more performance&memory efficient.
190 // (Note, these super pages are backed by 4 KiB system pages and have nothing to
191 // do with OS concept of "huge pages"/"large pages", even though the size
192 // coincides.)
193 //
194 // The layout of the super page is as follows. The sizes below are the same for
195 // 32- and 64-bit platforms.
196 //
197 //     +-----------------------+
198 //     | Guard page (4 KiB)    |
199 //     | Metadata page (4 KiB) |
200 //     | Guard pages (8 KiB)   |
201 //     | Free Slot Bitmap      |
202 //     | *Scan State Bitmap    |
203 //     | Slot span             |
204 //     | Slot span             |
205 //     | ...                   |
206 //     | Slot span             |
207 //     | Guard pages (16 KiB)  |
208 //     +-----------------------+
209 //
210 // Free Slot Bitmap is only present when USE_FREESLOT_BITMAP is true. State
211 // Bitmap is inserted for partitions that may have quarantine enabled.
212 //
213 // If refcount_at_end_allocation is enabled, RefcountBitmap(4KiB) is inserted
214 // after the Metadata page for BackupRefPtr. The guard pages after the bitmap
215 // will be 4KiB.
216 //
217 //...
218 //     | Metadata page (4 KiB) |
219 //     | RefcountBitmap (4 KiB)|
220 //     | Guard pages (4 KiB)   |
221 //...
222 //
223 // Each slot span is a contiguous range of one or more `PartitionPage`s. Note
224 // that slot spans of different sizes may co-exist with one super page. Even
225 // slot spans of the same size may support different slot sizes. However, all
226 // slots within a span have to be of the same size.
227 //
228 // The metadata page has the following format. Note that the `PartitionPage`
229 // that is not at the head of a slot span is "unused" (by most part, it only
230 // stores the offset from the head page). In other words, the metadata for the
231 // slot span is stored only in the first `PartitionPage` of the slot span.
232 // Metadata accesses to other `PartitionPage`s are redirected to the first
233 // `PartitionPage`.
234 //
235 //     +---------------------------------------------+
236 //     | SuperPageExtentEntry (32 B)                 |
237 //     | PartitionPage of slot span 1 (32 B, used)   |
238 //     | PartitionPage of slot span 1 (32 B, unused) |
239 //     | PartitionPage of slot span 1 (32 B, unused) |
240 //     | PartitionPage of slot span 2 (32 B, used)   |
241 //     | PartitionPage of slot span 3 (32 B, used)   |
242 //     | ...                                         |
243 //     | PartitionPage of slot span N (32 B, used)   |
244 //     | PartitionPage of slot span N (32 B, unused) |
245 //     | PartitionPage of slot span N (32 B, unused) |
246 //     +---------------------------------------------+
247 //
248 // A direct-mapped page has an identical layout at the beginning to fake it
249 // looking like a super page:
250 //
251 //     +---------------------------------+
252 //     | Guard page (4 KiB)              |
253 //     | Metadata page (4 KiB)           |
254 //     | Guard pages (8 KiB)             |
255 //     | Direct mapped object            |
256 //     | Guard page (4 KiB, 32-bit only) |
257 //     +---------------------------------+
258 //
259 // A direct-mapped page's metadata page has the following layout (on 64 bit
260 // architectures. On 32 bit ones, the layout is identical, some sizes are
261 // different due to smaller pointers.):
262 //
263 //     +----------------------------------+
264 //     | SuperPageExtentEntry (32 B)      |
265 //     | PartitionPage (32 B)             |
266 //     | PartitionBucket (40 B)           |
267 //     | PartitionDirectMapExtent (32 B)  |
268 //     +----------------------------------+
269 //
270 // See |PartitionDirectMapMetadata| for details.
271 
272 constexpr size_t kGiB = 1024 * 1024 * 1024ull;
273 constexpr size_t kSuperPageShift = 21;  // 2 MiB
274 constexpr size_t kSuperPageSize = 1 << kSuperPageShift;
275 constexpr size_t kSuperPageAlignment = kSuperPageSize;
276 constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1;
277 constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
278 
279 // PartitionAlloc's address space is split into pools. See `glossary.md`.
280 
281 enum pool_handle : unsigned {
282   kNullPoolHandle = 0u,
283 
284   kRegularPoolHandle,
285   kBRPPoolHandle,
286 #if BUILDFLAG(HAS_64_BIT_POINTERS)
287   kConfigurablePoolHandle,
288 #endif
289 
290 // New pool_handles will be added here.
291 
292 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
293   // The thread isolated pool must come last since we write-protect its entry in
294   // the metadata tables, e.g. AddressPoolManager::aligned_pools_
295   kThreadIsolatedPoolHandle,
296 #endif
297   kMaxPoolHandle
298 };
299 
300 // kNullPoolHandle doesn't have metadata, hence - 1
301 constexpr size_t kNumPools = kMaxPoolHandle - 1;
302 
303 // Maximum pool size. With exception of Configurable Pool, it is also
304 // the actual size, unless PA_DYNAMICALLY_SELECT_POOL_SIZE is set, which
305 // allows to choose a different size at initialization time for certain
306 // configurations.
307 //
308 // Special-case Android and iOS, which incur test failures with larger
309 // pools. Regardless, allocating >8GiB with malloc() on these platforms is
310 // unrealistic as of 2022.
311 //
312 // When pointer compression is enabled, we cannot use large pools (at most
313 // 8GB for each of the glued pools).
314 #if BUILDFLAG(HAS_64_BIT_POINTERS)
315 #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_IOS) || \
316     BUILDFLAG(ENABLE_POINTER_COMPRESSION)
317 constexpr size_t kPoolMaxSize = 8 * kGiB;
318 #else
319 constexpr size_t kPoolMaxSize = 16 * kGiB;
320 #endif
321 #else  // BUILDFLAG(HAS_64_BIT_POINTERS)
322 constexpr size_t kPoolMaxSize = 4 * kGiB;
323 #endif
324 constexpr size_t kMaxSuperPagesInPool = kPoolMaxSize / kSuperPageSize;
325 
326 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
327 static_assert(kThreadIsolatedPoolHandle == kNumPools,
328               "The thread isolated pool must come last since we write-protect "
329               "its metadata.");
330 #endif
331 
332 // Slots larger than this size will not receive MTE protection. Pages intended
333 // for allocations larger than this constant should not be backed with PROT_MTE
334 // (which saves shadow tag memory). We also save CPU cycles by skipping tagging
335 // of large areas which are less likely to benefit from MTE protection.
336 constexpr size_t kMaxMemoryTaggingSize = 1024;
337 
338 #if PA_CONFIG(HAS_MEMORY_TAGGING)
339 // Returns whether the tag of |object| overflowed, meaning the containing slot
340 // needs to be moved to quarantine.
HasOverflowTag(void * object)341 PA_ALWAYS_INLINE bool HasOverflowTag(void* object) {
342   // The tag with which the slot is put to quarantine.
343   constexpr uintptr_t kOverflowTag = 0x0f00000000000000uLL;
344   static_assert((kOverflowTag & kPtrTagMask) != 0,
345                 "Overflow tag must be in tag bits");
346   return (reinterpret_cast<uintptr_t>(object) & kPtrTagMask) == kOverflowTag;
347 }
348 #endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
349 
350 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
NumPartitionPagesPerSuperPage()351 NumPartitionPagesPerSuperPage() {
352   return kSuperPageSize >> PartitionPageShift();
353 }
354 
MaxSuperPagesInPool()355 PA_ALWAYS_INLINE constexpr size_t MaxSuperPagesInPool() {
356   return kMaxSuperPagesInPool;
357 }
358 
359 #if BUILDFLAG(HAS_64_BIT_POINTERS)
360 // In 64-bit mode, the direct map allocation granularity is super page size,
361 // because this is the reservation granularity of the pools.
DirectMapAllocationGranularity()362 PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularity() {
363   return kSuperPageSize;
364 }
365 
DirectMapAllocationGranularityShift()366 PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularityShift() {
367   return kSuperPageShift;
368 }
369 #else   // BUILDFLAG(HAS_64_BIT_POINTERS)
370 // In 32-bit mode, address space is space is a scarce resource. Use the system
371 // allocation granularity, which is the lowest possible address space allocation
372 // unit. However, don't go below partition page size, so that pool bitmaps
373 // don't get too large. See kBytesPer1BitOfBRPPoolBitmap.
374 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
DirectMapAllocationGranularity()375 DirectMapAllocationGranularity() {
376   return std::max(PageAllocationGranularity(), PartitionPageSize());
377 }
378 
379 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
DirectMapAllocationGranularityShift()380 DirectMapAllocationGranularityShift() {
381   return std::max(PageAllocationGranularityShift(), PartitionPageShift());
382 }
383 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
384 
385 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
DirectMapAllocationGranularityOffsetMask()386 DirectMapAllocationGranularityOffsetMask() {
387   return DirectMapAllocationGranularity() - 1;
388 }
389 
390 // The "order" of an allocation is closely related to the power-of-1 size of the
391 // allocation. More precisely, the order is the bit index of the
392 // most-significant-bit in the allocation size, where the bit numbers starts at
393 // index 1 for the least-significant-bit.
394 //
395 // In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
396 // covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
397 
398 // PartitionAlloc should return memory properly aligned for any type, to behave
399 // properly as a generic allocator. This is not strictly required as long as
400 // types are explicitly allocated with PartitionAlloc, but is to use it as a
401 // malloc() implementation, and generally to match malloc()'s behavior.
402 //
403 // In practice, this means 8 bytes alignment on 32 bit architectures, and 16
404 // bytes on 64 bit ones.
405 //
406 // Keep in sync with //tools/memory/partition_allocator/objects_per_size_py.
407 constexpr size_t kMinBucketedOrder =
408     kAlignment == 16 ? 5 : 4;  // 2^(order - 1), that is 16 or 8.
409 // The largest bucketed order is 1 << (20 - 1), storing [512 KiB, 1 MiB):
410 constexpr size_t kMaxBucketedOrder = 20;
411 constexpr size_t kNumBucketedOrders =
412     (kMaxBucketedOrder - kMinBucketedOrder) + 1;
413 // 8 buckets per order (for the higher orders).
414 // Note: this is not what is used by default, but the maximum amount of buckets
415 // per order. By default, only 4 are used.
416 constexpr size_t kNumBucketsPerOrderBits = 3;
417 constexpr size_t kNumBucketsPerOrder = 1 << kNumBucketsPerOrderBits;
418 constexpr size_t kNumBuckets = kNumBucketedOrders * kNumBucketsPerOrder;
419 constexpr size_t kSmallestBucket = 1 << (kMinBucketedOrder - 1);
420 constexpr size_t kMaxBucketSpacing =
421     1 << ((kMaxBucketedOrder - 1) - kNumBucketsPerOrderBits);
422 constexpr size_t kMaxBucketed = (1 << (kMaxBucketedOrder - 1)) +
423                                 ((kNumBucketsPerOrder - 1) * kMaxBucketSpacing);
424 // Limit when downsizing a direct mapping using `realloc`:
425 constexpr size_t kMinDirectMappedDownsize = kMaxBucketed + 1;
426 // Intentionally set to less than 2GiB to make sure that a 2GiB allocation
427 // fails. This is a security choice in Chrome, to help making size_t vs int bugs
428 // harder to exploit.
429 
430 // The definition of MaxDirectMapped does only depend on constants that are
431 // unconditionally constexpr. Therefore it is not necessary to use
432 // PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR here.
MaxDirectMapped()433 PA_ALWAYS_INLINE constexpr size_t MaxDirectMapped() {
434   // Subtract kSuperPageSize to accommodate for granularity inside
435   // PartitionRoot::GetDirectMapReservationSize.
436   return (1UL << 31) - kSuperPageSize;
437 }
438 
439 // Max alignment supported by AlignedAlloc().
440 // kSuperPageSize alignment can't be easily supported, because each super page
441 // starts with guard pages & metadata.
442 constexpr size_t kMaxSupportedAlignment = kSuperPageSize / 2;
443 
444 constexpr size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
445 
446 // When a SlotSpan becomes empty, the allocator tries to avoid re-using it
447 // immediately, to help with fragmentation. At this point, it becomes dirty
448 // committed memory, which we want to minimize. This could be decommitted
449 // immediately, but that would imply doing a lot of system calls. In particular,
450 // for single-slot SlotSpans, a malloc() / free() loop would cause a *lot* of
451 // system calls.
452 //
453 // As an intermediate step, empty SlotSpans are placed into a per-partition
454 // global ring buffer, giving the newly-empty SlotSpan a chance to be re-used
455 // before getting decommitted. A new entry (i.e. a newly empty SlotSpan) taking
456 // the place used by a previous one will lead the previous SlotSpan to be
457 // decommitted immediately, provided that it is still empty.
458 //
459 // Setting this value higher means giving more time for reuse to happen, at the
460 // cost of possibly increasing peak committed memory usage (and increasing the
461 // size of PartitionRoot a bit, since the ring buffer is there). Note that the
462 // ring buffer doesn't necessarily contain an empty SlotSpan, as SlotSpans are
463 // *not* removed from it when re-used. So the ring buffer really is a buffer of
464 // *possibly* empty SlotSpans.
465 //
466 // In all cases, PartitionRoot::PurgeMemory() with the
467 // PurgeFlags::kDecommitEmptySlotSpans flag will eagerly decommit all entries
468 // in the ring buffer, so with periodic purge enabled, this typically happens
469 // every few seconds.
470 constexpr size_t kEmptyCacheIndexBits = 7;
471 // kMaxFreeableSpans is the buffer size, but is never used as an index value,
472 // hence <= is appropriate.
473 constexpr size_t kMaxFreeableSpans = 1 << kEmptyCacheIndexBits;
474 constexpr size_t kDefaultEmptySlotSpanRingSize = 16;
475 
476 // If the total size in bytes of allocated but not committed pages exceeds this
477 // value (probably it is a "out of virtual address space" crash), a special
478 // crash stack trace is generated at
479 // `PartitionOutOfMemoryWithLotsOfUncommitedPages`. This is to distinguish "out
480 // of virtual address space" from "out of physical memory" in crash reports.
481 constexpr size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024;  // 1 GiB
482 
483 // These byte values match tcmalloc.
484 constexpr unsigned char kUninitializedByte = 0xAB;
485 constexpr unsigned char kFreedByte = 0xCD;
486 
487 constexpr unsigned char kQuarantinedByte = 0xEF;
488 
489 // 1 is smaller than anything we can use, as it is not properly aligned. Not
490 // using a large size, since PartitionBucket::slot_size is a uint32_t, and
491 // static_cast<uint32_t>(-1) is too close to a "real" size.
492 constexpr size_t kInvalidBucketSize = 1;
493 
494 #if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
495 // Requested size that require the hack.
496 constexpr size_t kMac11MallocSizeHackRequestedSize = 32;
497 #endif  // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
498 }  // namespace internal
499 
500 // These constants are used outside PartitionAlloc itself, so we provide
501 // non-internal aliases here.
502 using ::partition_alloc::internal::kInvalidBucketSize;
503 using ::partition_alloc::internal::kMaxSuperPagesInPool;
504 using ::partition_alloc::internal::kMaxSupportedAlignment;
505 using ::partition_alloc::internal::kNumBuckets;
506 using ::partition_alloc::internal::kSuperPageSize;
507 using ::partition_alloc::internal::MaxDirectMapped;
508 using ::partition_alloc::internal::PartitionPageSize;
509 
510 }  // namespace partition_alloc
511 
512 #endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_CONSTANTS_H_
513