• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2023 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_BACKUP_REF_IMPL_H_
6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_BACKUP_REF_IMPL_H_
7 
8 #include <stddef.h>
9 
10 #include <type_traits>
11 
12 #include "build/build_config.h"
13 #include "partition_alloc/chromeos_buildflags.h"
14 #include "partition_alloc/partition_address_space.h"
15 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
16 #include "partition_alloc/partition_alloc_base/component_export.h"
17 #include "partition_alloc/partition_alloc_base/cxx20_is_constant_evaluated.h"
18 #include "partition_alloc/partition_alloc_buildflags.h"
19 #include "partition_alloc/partition_alloc_config.h"
20 #include "partition_alloc/partition_alloc_constants.h"
21 #include "partition_alloc/partition_alloc_forward.h"
22 #include "partition_alloc/tagging.h"
23 
24 #if !BUILDFLAG(HAS_64_BIT_POINTERS)
25 #include "partition_alloc/address_pool_manager_bitmap.h"
26 #endif
27 
28 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
29 #error "Included under wrong build option"
30 #endif
31 
32 namespace base::internal {
33 
34 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
35 PA_COMPONENT_EXPORT(RAW_PTR)
36 void CheckThatAddressIsntWithinFirstPartitionPage(uintptr_t address);
37 #endif
38 
39 class BackupRefPtrGlobalSettings {
40  public:
EnableExperimentalAsh()41   static void EnableExperimentalAsh() {
42     PA_CHECK(!experimental_ash_raw_ptr_enabled_);
43     experimental_ash_raw_ptr_enabled_ = true;
44   }
45 
DisableExperimentalAshForTest()46   static void DisableExperimentalAshForTest() {
47     PA_CHECK(experimental_ash_raw_ptr_enabled_);
48     experimental_ash_raw_ptr_enabled_ = false;
49   }
50 
IsExperimentalAshEnabled()51   PA_ALWAYS_INLINE static bool IsExperimentalAshEnabled() {
52     return experimental_ash_raw_ptr_enabled_;
53   }
54 
55  private:
56   // Write-once settings that should be in its own cacheline, as they're
57   // accessed frequently on a hot path.
58   PA_ALIGNAS(partition_alloc::internal::kPartitionCachelineSize)
59   static inline bool experimental_ash_raw_ptr_enabled_ = false;
60   [[maybe_unused]] char
61       padding_[partition_alloc::internal::kPartitionCachelineSize - 1];
62 };
63 
64 // Note that `RawPtrBackupRefImpl` itself is not thread-safe. If multiple
65 // threads modify the same raw_ptr object without synchronization, a data race
66 // will occur.
67 template <bool AllowDangling = false, bool ExperimentalAsh = false>
68 struct RawPtrBackupRefImpl {
69   // These are needed for correctness, or else we may end up manipulating
70   // ref-count where we shouldn't, thus affecting the BRP's integrity. Unlike
71   // the first two, kMustZeroOnDestruct wouldn't be needed if raw_ptr was used
72   // correctly, but we already caught cases where a value is written after
73   // destruction.
74   static constexpr bool kMustZeroOnConstruct = true;
75   static constexpr bool kMustZeroOnMove = true;
76   static constexpr bool kMustZeroOnDestruct = true;
77 
78  private:
UseBrpRawPtrBackupRefImpl79   PA_ALWAYS_INLINE static bool UseBrp(uintptr_t address) {
80     // Pointer annotated with ExperimentalAsh are subject to a separate,
81     // Ash-related experiment.
82     //
83     // Note that this can be enabled only before the BRP partition is created,
84     // so it's impossible for this function to change its answer for a specific
85     // pointer. (This relies on the original partition to not be BRP-enabled.)
86     if constexpr (ExperimentalAsh) {
87 #if BUILDFLAG(PA_IS_CHROMEOS_ASH)
88       if (!BackupRefPtrGlobalSettings::IsExperimentalAshEnabled()) {
89         return false;
90       }
91 #endif
92     }
93     return partition_alloc::IsManagedByPartitionAllocBRPPool(address);
94   }
95 
IsSupportedAndNotNullRawPtrBackupRefImpl96   PA_ALWAYS_INLINE static bool IsSupportedAndNotNull(uintptr_t address) {
97     // There are many situations where the compiler can prove that
98     // `ReleaseWrappedPtr` is called on a value that is always nullptr, but the
99     // way `IsManagedByPartitionAllocBRPPool` is written, the compiler can't
100     // prove that nullptr is not managed by PartitionAlloc; and so the compiler
101     // has to emit a useless check and dead code. To avoid that without making
102     // the runtime check slower, tell the compiler to skip
103     // `IsManagedByPartitionAllocBRPPool` when it can statically determine that
104     // address is nullptr.
105 #if PA_HAS_BUILTIN(__builtin_constant_p)
106     if (__builtin_constant_p(address == 0) && (address == 0)) {
107 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
108       PA_BASE_CHECK(
109           !partition_alloc::IsManagedByPartitionAllocBRPPool(address));
110 #endif  // BUILDFLAG(PA_DCHECK_IS_ON) ||
111         // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
112       return false;
113     }
114 #endif  // PA_HAS_BUILTIN(__builtin_constant_p)
115 
116     // This covers the nullptr case, as address 0 is never in any
117     // PartitionAlloc pool.
118     bool use_brp = UseBrp(address);
119 
120     // There may be pointers immediately after the allocation, e.g.
121     //   {
122     //     // Assume this allocation happens outside of PartitionAlloc.
123     //     raw_ptr<T> ptr = new T[20];
124     //     for (size_t i = 0; i < 20; i ++) { ptr++; }
125     //   }
126     //
127     // Such pointers are *not* at risk of accidentally falling into BRP pool,
128     // because:
129     // 1) On 64-bit systems, BRP pool is preceded by a forbidden region.
130     // 2) On 32-bit systems, the guard pages and metadata of super pages in BRP
131     //    pool aren't considered to be part of that pool.
132     //
133     // This allows us to make a stronger assertion that if
134     // IsManagedByPartitionAllocBRPPool returns true for a valid pointer,
135     // it must be at least partition page away from the beginning of a super
136     // page.
137 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
138     if (use_brp) {
139       CheckThatAddressIsntWithinFirstPartitionPage(address);
140     }
141 #endif
142 
143     return use_brp;
144   }
145 
146 #if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
147   // Out-Of-Bounds (OOB) poison bit is set when the pointer has overflowed by
148   // one byte.
149 #if defined(ARCH_CPU_X86_64)
150   // Bit 63 is the only pointer bit that will work as the poison bit across both
151   // LAM48 and LAM57. It also works when all unused linear address bits are
152   // checked for canonicality.
153   static constexpr uintptr_t OOB_POISON_BIT = static_cast<uintptr_t>(1) << 63;
154 #else
155   // Avoid ARM's Top-Byte Ignore.
156   static constexpr uintptr_t OOB_POISON_BIT = static_cast<uintptr_t>(1) << 55;
157 #endif
158 
159   template <typename T>
UnpoisonPtrRawPtrBackupRefImpl160   PA_ALWAYS_INLINE static T* UnpoisonPtr(T* ptr) {
161     return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(ptr) &
162                                 ~OOB_POISON_BIT);
163   }
164 
165   template <typename T>
IsPtrOOBRawPtrBackupRefImpl166   PA_ALWAYS_INLINE static bool IsPtrOOB(T* ptr) {
167     return (reinterpret_cast<uintptr_t>(ptr) & OOB_POISON_BIT) ==
168            OOB_POISON_BIT;
169   }
170 
171   template <typename T>
PoisonOOBPtrRawPtrBackupRefImpl172   PA_ALWAYS_INLINE static T* PoisonOOBPtr(T* ptr) {
173     return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(ptr) |
174                                 OOB_POISON_BIT);
175   }
176 #else   // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
177   template <typename T>
UnpoisonPtrRawPtrBackupRefImpl178   PA_ALWAYS_INLINE static T* UnpoisonPtr(T* ptr) {
179     return ptr;
180   }
181 #endif  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
182 
183  public:
184   // Wraps a pointer.
185   template <typename T>
WrapRawPtrRawPtrBackupRefImpl186   PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) {
187     if (partition_alloc::internal::base::is_constant_evaluated()) {
188       return ptr;
189     }
190     uintptr_t address = partition_alloc::UntagPtr(UnpoisonPtr(ptr));
191     if (IsSupportedAndNotNull(address)) {
192 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
193       PA_BASE_CHECK(ptr != nullptr);
194 #endif
195       AcquireInternal(address);
196     } else {
197 #if !BUILDFLAG(HAS_64_BIT_POINTERS)
198 #if PA_HAS_BUILTIN(__builtin_constant_p)
199       // Similarly to `IsSupportedAndNotNull` above, elide the
200       // `BanSuperPageFromBRPPool` call if the compiler can prove that `address`
201       // is zero since PA won't be able to map anything at that address anyway.
202       bool known_constant_zero =
203           __builtin_constant_p(address == 0) && (address == 0);
204 #else   // PA_HAS_BUILTIN(__builtin_constant_p)
205       bool known_constant_zero = false;
206 #endif  // PA_HAS_BUILTIN(__builtin_constant_p)
207 
208       if (!known_constant_zero) {
209         partition_alloc::internal::AddressPoolManagerBitmap::
210             BanSuperPageFromBRPPool(address);
211       }
212 #endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
213     }
214 
215     return ptr;
216   }
217 
218   // Notifies the allocator when a wrapped pointer is being removed or replaced.
219   template <typename T>
ReleaseWrappedPtrRawPtrBackupRefImpl220   PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* wrapped_ptr) {
221     if (partition_alloc::internal::base::is_constant_evaluated()) {
222       return;
223     }
224     uintptr_t address = partition_alloc::UntagPtr(UnpoisonPtr(wrapped_ptr));
225     if (IsSupportedAndNotNull(address)) {
226 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
227       PA_BASE_CHECK(wrapped_ptr != nullptr);
228 #endif
229       ReleaseInternal(address);
230     }
231     // We are unable to counteract BanSuperPageFromBRPPool(), called from
232     // WrapRawPtr(). We only use one bit per super-page and, thus can't tell if
233     // there's more than one associated raw_ptr<T> at a given time. The risk of
234     // exhausting the entire address space is minuscule, therefore, we couldn't
235     // resist the perf gain of a single relaxed store (in the above mentioned
236     // function) over much more expensive two CAS operations, which we'd have to
237     // use if we were to un-ban a super-page.
238   }
239 
240   // Unwraps the pointer, while asserting that memory hasn't been freed. The
241   // function is allowed to crash on nullptr.
242   template <typename T>
SafelyUnwrapPtrForDereferenceRawPtrBackupRefImpl243   PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference(
244       T* wrapped_ptr) {
245     if (partition_alloc::internal::base::is_constant_evaluated()) {
246       return wrapped_ptr;
247     }
248 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
249 #if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
250     PA_BASE_CHECK(!IsPtrOOB(wrapped_ptr));
251 #endif
252     uintptr_t address = partition_alloc::UntagPtr(wrapped_ptr);
253     if (IsSupportedAndNotNull(address)) {
254       PA_BASE_CHECK(wrapped_ptr != nullptr);
255       PA_BASE_CHECK(IsPointeeAlive(address));
256     }
257 #endif  // BUILDFLAG(PA_DCHECK_IS_ON) ||
258         // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
259     return wrapped_ptr;
260   }
261 
262   // Unwraps the pointer, while asserting that memory hasn't been freed. The
263   // function must handle nullptr gracefully.
264   template <typename T>
SafelyUnwrapPtrForExtractionRawPtrBackupRefImpl265   PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction(
266       T* wrapped_ptr) {
267     if (partition_alloc::internal::base::is_constant_evaluated()) {
268       return wrapped_ptr;
269     }
270     T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
271 #if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
272     // Some code uses invalid pointer values as indicators, so those values must
273     // be passed through unchanged during extraction. The following check will
274     // pass invalid values through if those values do not fall within the BRP
275     // pool after being unpoisoned.
276     if (!IsSupportedAndNotNull(partition_alloc::UntagPtr(unpoisoned_ptr))) {
277       return wrapped_ptr;
278     }
279     // Poison-based OOB checks do not extend to extracted pointers. The
280     // alternative of retaining poison on extracted pointers could introduce new
281     // OOB conditions, e.g., in code that extracts an end-of-allocation pointer
282     // for use in a loop termination condition. The poison bit would make that
283     // pointer appear to reference a very high address.
284 #endif  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
285     return unpoisoned_ptr;
286   }
287 
288   // Unwraps the pointer, without making an assertion on whether memory was
289   // freed or not.
290   template <typename T>
UnsafelyUnwrapPtrForComparisonRawPtrBackupRefImpl291   PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison(
292       T* wrapped_ptr) {
293     if (partition_alloc::internal::base::is_constant_evaluated()) {
294       return wrapped_ptr;
295     }
296     // This may be used for unwrapping an end-of-allocation pointer to be used
297     // as an endpoint in an iterative algorithm, so this removes the OOB poison
298     // bit.
299     return UnpoisonPtr(wrapped_ptr);
300   }
301 
302   // Upcasts the wrapped pointer.
303   template <typename To, typename From>
UpcastRawPtrBackupRefImpl304   PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) {
305     static_assert(std::is_convertible_v<From*, To*>,
306                   "From must be convertible to To.");
307     // Note, this cast may change the address if upcasting to base that lies in
308     // the middle of the derived object.
309     return wrapped_ptr;
310   }
311 
312   // Verify the pointer stayed in the same slot, and return the poisoned version
313   // of `new_ptr` if OOB poisoning is enabled.
314   template <typename T>
VerifyAndPoisonPointerAfterAdvanceOrRetreatRawPtrBackupRefImpl315   PA_ALWAYS_INLINE static T* VerifyAndPoisonPointerAfterAdvanceOrRetreat(
316       T* unpoisoned_ptr,
317       T* new_ptr) {
318     // In the "before allocation" mode, on 32-bit, we can run into a problem
319     // that the end-of-allocation address could fall outside of
320     // PartitionAlloc's pools, if this is the last slot of the super page,
321     // thus pointing to the guard page. This means the ref-count won't be
322     // decreased when the pointer is released (leak).
323     //
324     // We could possibly solve it in a few different ways:
325     // - Add the trailing guard page to the pool, but we'd have to think very
326     //   hard if this doesn't create another hole.
327     // - Add an address adjustment to "is in pool?" check, similar as the one in
328     //   PartitionAllocGetSlotStartInBRPPool(), but that seems fragile, not to
329     //   mention adding an extra instruction to an inlined hot path.
330     // - Let the leak happen, since it should a very rare condition.
331     // - Go back to the previous solution of rewrapping the pointer, but that
332     //   had an issue of losing BRP protection in case the pointer ever gets
333     //   shifted back before the end of allocation.
334     //
335     // We decided to cross that bridge once we get there... if we ever get
336     // there. Currently there are no plans to switch back to the "before
337     // allocation" mode.
338     //
339     // This problem doesn't exist in the "previous slot" mode, or any mode that
340     // involves putting extras after the allocation, because the
341     // end-of-allocation address belongs to the same slot.
342     static_assert(BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT));
343 
344     // First check if the new address didn't migrate in/out the BRP pool, and
345     // that it lands within the same allocation. An end-of-allocation address is
346     // ok, too, and that may lead to the pointer being poisoned if the relevant
347     // feature is enabled. These checks add a non-trivial cost, but they're
348     // cheaper and more secure than the previous implementation that rewrapped
349     // the pointer (wrapped the new pointer and unwrapped the old one).
350     //
351     // Note, the value of these checks goes beyond OOB protection. They're
352     // important for integrity of the BRP algorithm. Without these, an attacker
353     // could make the pointer point to another allocation, and cause its
354     // ref-count to go to 0 upon this pointer's destruction, even though there
355     // may be another pointer still pointing to it, thus making it lose the BRP
356     // protection prematurely.
357     const uintptr_t before_addr = partition_alloc::UntagPtr(unpoisoned_ptr);
358     const uintptr_t after_addr = partition_alloc::UntagPtr(new_ptr);
359     // TODO(bartekn): Consider adding support for non-BRP pools too (without
360     // removing the cross-pool migration check).
361     if (IsSupportedAndNotNull(before_addr)) {
362       constexpr size_t size = sizeof(T);
363       [[maybe_unused]] const bool is_end =
364           CheckPointerWithinSameAlloc(before_addr, after_addr, size);
365 #if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
366       if (is_end) {
367         new_ptr = PoisonOOBPtr(new_ptr);
368       }
369 #endif  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
370     } else {
371       // Check that the new address didn't migrate into the BRP pool, as it
372       // would result in more pointers pointing to an allocation than its
373       // ref-count reflects.
374       PA_BASE_CHECK(!IsSupportedAndNotNull(after_addr));
375     }
376     return new_ptr;
377   }
378 
379   // Advance the wrapped pointer by `delta_elems`.
380   template <
381       typename T,
382       typename Z,
383       typename =
384           std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
AdvanceRawPtrBackupRefImpl385   PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) {
386     if (partition_alloc::internal::base::is_constant_evaluated()) {
387       return wrapped_ptr + delta_elems;
388     }
389     T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
390     return VerifyAndPoisonPointerAfterAdvanceOrRetreat(
391         unpoisoned_ptr, unpoisoned_ptr + delta_elems);
392   }
393 
394   // Retreat the wrapped pointer by `delta_elems`.
395   template <
396       typename T,
397       typename Z,
398       typename =
399           std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
RetreatRawPtrBackupRefImpl400   PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
401     if (partition_alloc::internal::base::is_constant_evaluated()) {
402       return wrapped_ptr - delta_elems;
403     }
404     T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
405     return VerifyAndPoisonPointerAfterAdvanceOrRetreat(
406         unpoisoned_ptr, unpoisoned_ptr - delta_elems);
407   }
408 
409   template <typename T>
GetDeltaElemsRawPtrBackupRefImpl410   PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
411                                                             T* wrapped_ptr2) {
412     if (partition_alloc::internal::base::is_constant_evaluated()) {
413       return wrapped_ptr1 - wrapped_ptr2;
414     }
415 
416     T* unpoisoned_ptr1 = UnpoisonPtr(wrapped_ptr1);
417     T* unpoisoned_ptr2 = UnpoisonPtr(wrapped_ptr2);
418 #if BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
419     if (partition_alloc::internal::base::is_constant_evaluated()) {
420       return unpoisoned_ptr1 - unpoisoned_ptr2;
421     }
422     uintptr_t address1 = partition_alloc::UntagPtr(unpoisoned_ptr1);
423     uintptr_t address2 = partition_alloc::UntagPtr(unpoisoned_ptr2);
424     // Ensure that both pointers are within the same slot, and pool!
425     // TODO(bartekn): Consider adding support for non-BRP pool too.
426     if (IsSupportedAndNotNull(address1)) {
427       PA_BASE_CHECK(IsSupportedAndNotNull(address2));
428       PA_BASE_CHECK(partition_alloc::internal::IsPtrWithinSameAlloc(
429                         address2, address1, sizeof(T)) !=
430                     partition_alloc::internal::PtrPosWithinAlloc::kFarOOB);
431     } else {
432       PA_BASE_CHECK(!IsSupportedAndNotNull(address2));
433     }
434 #endif  // BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
435     return unpoisoned_ptr1 - unpoisoned_ptr2;
436   }
437 
438   // Returns a copy of a wrapped pointer, without making an assertion on whether
439   // memory was freed or not.
440   // This method increments the reference count of the allocation slot.
441   template <typename T>
DuplicateRawPtrBackupRefImpl442   PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) {
443     if (partition_alloc::internal::base::is_constant_evaluated()) {
444       return wrapped_ptr;
445     }
446     return WrapRawPtr(wrapped_ptr);
447   }
448 
449   // Report the current wrapped pointer if pointee isn't alive anymore.
450   template <typename T>
ReportIfDanglingRawPtrBackupRefImpl451   PA_ALWAYS_INLINE static void ReportIfDangling(T* wrapped_ptr) {
452     ReportIfDanglingInternal(partition_alloc::UntagPtr(wrapped_ptr));
453   }
454 
455   // `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
456   // to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
457   template <typename T>
WrapRawPtrForDuplicationRawPtrBackupRefImpl458   PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) {
459     if (partition_alloc::internal::base::is_constant_evaluated()) {
460       return ptr;
461     } else {
462       return WrapRawPtr(ptr);
463     }
464   }
465 
466   template <typename T>
UnsafelyUnwrapPtrForDuplicationRawPtrBackupRefImpl467   PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication(
468       T* wrapped_ptr) {
469     if (partition_alloc::internal::base::is_constant_evaluated()) {
470       return wrapped_ptr;
471     } else {
472       return UnpoisonPtr(wrapped_ptr);
473     }
474   }
475 
476   // This is for accounting only, used by unit tests.
IncrementSwapCountForTestRawPtrBackupRefImpl477   PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {}
IncrementLessCountForTestRawPtrBackupRefImpl478   PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {}
479 
480  private:
481   // We've evaluated several strategies (inline nothing, various parts, or
482   // everything in |Wrap()| and |Release()|) using the Speedometer2 benchmark
483   // to measure performance. The best results were obtained when only the
484   // lightweight |IsManagedByPartitionAllocBRPPool()| check was inlined.
485   // Therefore, we've extracted the rest into the functions below and marked
486   // them as PA_NOINLINE to prevent unintended LTO effects.
487   PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void AcquireInternal(
488       uintptr_t address);
489   PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void ReleaseInternal(
490       uintptr_t address);
491   PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) bool IsPointeeAlive(
492       uintptr_t address);
493   PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void ReportIfDanglingInternal(
494       uintptr_t address);
495 
496   // CHECK if `before_addr` and `after_addr` are in the same allocation, for a
497   // given `type_size`.
498   // If BACKUP_REF_PTR_POISON_OOB_PTR is enabled, return whether the allocation
499   // is at the end.
500   // If BACKUP_REF_PTR_POISON_OOB_PTR is disable, return false.
501   PA_NOINLINE static PA_COMPONENT_EXPORT(
502       RAW_PTR) bool CheckPointerWithinSameAlloc(uintptr_t before_addr,
503                                                 uintptr_t after_addr,
504                                                 size_t type_size);
505 };
506 
507 }  // namespace base::internal
508 
509 #endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_BACKUP_REF_IMPL_H_
510