• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_REF_COUNT_H_
6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_REF_COUNT_H_
7 
8 #include <stddef.h>
9 #include <stdint.h>
10 
11 #include <atomic>
12 
13 #include "build/build_config.h"
14 #include "partition_alloc/dangling_raw_ptr_checks.h"
15 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
16 #include "partition_alloc/partition_alloc_base/component_export.h"
17 #include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
18 #include "partition_alloc/partition_alloc_base/immediate_crash.h"
19 #include "partition_alloc/partition_alloc_buildflags.h"
20 #include "partition_alloc/partition_alloc_check.h"
21 #include "partition_alloc/partition_alloc_config.h"
22 #include "partition_alloc/partition_alloc_constants.h"
23 #include "partition_alloc/partition_alloc_forward.h"
24 #include "partition_alloc/tagging.h"
25 
26 #if BUILDFLAG(IS_MAC)
27 #include "partition_alloc/partition_alloc_base/bits.h"
28 #include "partition_alloc/partition_alloc_base/mac/mac_util.h"
29 #endif  // BUILDFLAG(IS_MAC)
30 
31 namespace partition_alloc::internal {
32 
33 // Aligns up (on 8B boundary) and returns `ref_count_size` if needed.
34 // *  Known to be needed on MacOS 13: https://crbug.com/1378822.
35 // *  Thought to be needed on MacOS 14: https://crbug.com/1457756.
36 // *  No-op everywhere else.
37 //
38 // Placed outside `BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)`
39 // intentionally to accommodate usage in contexts also outside
40 // this gating.
AlignUpRefCountSizeForMac(size_t ref_count_size)41 PA_ALWAYS_INLINE size_t AlignUpRefCountSizeForMac(size_t ref_count_size) {
42 #if BUILDFLAG(IS_MAC)
43   if (internal::base::mac::MacOSMajorVersion() == 13 ||
44       internal::base::mac::MacOSMajorVersion() == 14) {
45     return internal::base::bits::AlignUp<size_t>(ref_count_size, 8);
46   }
47 #endif  // BUILDFLAG(IS_MAC)
48   return ref_count_size;
49 }
50 
51 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
52 
53 // Special-purpose atomic reference count class used by RawPtrBackupRefImpl.
54 // The least significant bit of the count is reserved for tracking the liveness
55 // state of an allocation: it's set when the allocation is created and cleared
56 // on free(). So the count can be:
57 //
58 // 1 for an allocation that is just returned from Alloc()
59 // 2 * k + 1 for a "live" allocation with k references
60 // 2 * k for an allocation with k dangling references after Free()
61 //
62 // This protects against double-free's, as we check whether the reference count
63 // is odd in |ReleaseFromAllocator()|, and if not we have a double-free.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)64 class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
65  public:
66   // This class holds an atomic bit field: `count_`. It holds up to 5 values:
67   //
68   // bits   name                   description
69   // -----  ---------------------  ----------------------------------------
70   // 0      is_allocated           Whether or not the memory is held by the
71   //                               allocator.
72   //                               - 1 at construction time.
73   //                               - Decreased in ReleaseFromAllocator();
74   //
75   // 1-31   ptr_count              Number of raw_ptr<T>.
76   //                               - Increased in Acquire()
77   //                               - Decreased in Release()
78   //
79   // 32     dangling_detected      A dangling raw_ptr<> has been detected.
80   // 33     needs_mac11_malloc_    Whether malloc_size() return value needs to
81   //          size_hack            be adjusted for this allocation.
82   //
83   // 34-63  unprotected_ptr_count  Number of
84   //                               raw_ptr<T, DisableDanglingPtrDetection>
85   //                               - Increased in AcquireFromUnprotectedPtr().
86   //                               - Decreased in ReleaseFromUnprotectedPtr().
87   //
88   // The allocation is reclaimed if all of:
89   // - |is_allocated|
90   // - |ptr_count|
91   // - |unprotected_ptr_count|
92   // are zero.
93   //
94   // During ReleaseFromAllocator(), if |ptr_count| is not zero,
95   // |dangling_detected| is set and the error is reported via
96   // DanglingRawPtrDetected(id). The matching DanglingRawPtrReleased(id) will be
97   // called when the last raw_ptr<> is released.
98 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
99   using CountType = uint64_t;
100   static constexpr CountType kMemoryHeldByAllocatorBit = 0x0000'0000'0000'0001;
101   static constexpr CountType kPtrCountMask = 0x0000'0000'FFFF'FFFE;
102   static constexpr CountType kUnprotectedPtrCountMask = 0xFFFF'FFFC'0000'0000;
103   static constexpr CountType kDanglingRawPtrDetectedBit = 0x0000'0001'0000'0000;
104   static constexpr CountType kNeedsMac11MallocSizeHackBit =
105       0x0000'0002'0000'0000;
106 
107   static constexpr CountType kPtrInc = 0x0000'0000'0000'0002;
108   static constexpr CountType kUnprotectedPtrInc = 0x0000'0004'0000'0000;
109 #else
110   using CountType = uint32_t;
111   static constexpr CountType kMemoryHeldByAllocatorBit = 0x0000'0001;
112 
113   static constexpr CountType kPtrCountMask = 0x7FFF'FFFE;
114   static constexpr CountType kUnprotectedPtrCountMask = 0x0000'0000;
115   static constexpr CountType kDanglingRawPtrDetectedBit = 0x0000'0000;
116   static constexpr CountType kNeedsMac11MallocSizeHackBit = 0x8000'0000;
117 
118   static constexpr CountType kPtrInc = 0x0000'0002;
119 #endif
120 
121   PA_ALWAYS_INLINE explicit PartitionRefCount(
122       bool needs_mac11_malloc_size_hack);
123 
124   // Incrementing the counter doesn't imply any visibility about modified
125   // memory, hence relaxed atomics. For decrement, visibility is required before
126   // the memory gets freed, necessitating an acquire/release barrier before
127   // freeing the memory.
128   //
129   // For details, see base::AtomicRefCount, which has the same constraints and
130   // characteristics.
131   //
132   // FYI: The assembly produced by the compiler on every platform, in particular
133   // the uint64_t fetch_add on 32bit CPU.
134   // https://docs.google.com/document/d/1cSTVDVEE-8l2dXLPcfyN75r6ihMbeiSp1ncL9ae3RZE
135   PA_ALWAYS_INLINE void Acquire() {
136     CheckCookieIfSupported();
137 
138 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
139     constexpr CountType kInc = kUnprotectedPtrInc;
140     constexpr CountType kMask = kUnprotectedPtrCountMask;
141 #else
142     constexpr CountType kInc = kPtrInc;
143     constexpr CountType kMask = kPtrCountMask;
144 #endif
145     CountType old_count = count_.fetch_add(kInc, std::memory_order_relaxed);
146     // Check overflow.
147     PA_CHECK((old_count & kMask) != kMask);
148   }
149 
150   // Similar to |Acquire()|, but for raw_ptr<T, DisableDanglingPtrDetection>
151   // instead of raw_ptr<T>.
152   PA_ALWAYS_INLINE void AcquireFromUnprotectedPtr() {
153 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
154     CheckCookieIfSupported();
155     CountType old_count =
156         count_.fetch_add(kUnprotectedPtrInc, std::memory_order_relaxed);
157     // Check overflow.
158     PA_CHECK((old_count & kUnprotectedPtrCountMask) !=
159              kUnprotectedPtrCountMask);
160 #else
161     Acquire();
162 #endif
163   }
164 
165   // Returns true if the allocation should be reclaimed.
166   PA_ALWAYS_INLINE bool Release() {
167 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
168     constexpr CountType kInc = kUnprotectedPtrInc;
169     constexpr CountType kMask = kUnprotectedPtrCountMask;
170 #else
171     constexpr CountType kInc = kPtrInc;
172     constexpr CountType kMask = kPtrCountMask;
173 #endif
174     CheckCookieIfSupported();
175 
176     CountType old_count = count_.fetch_sub(kInc, std::memory_order_release);
177     // Check underflow.
178     PA_DCHECK(old_count & kMask);
179 
180 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
181     // If a dangling raw_ptr<> was detected, report it.
182     if (PA_UNLIKELY((old_count & kDanglingRawPtrDetectedBit) ==
183                     kDanglingRawPtrDetectedBit)) {
184       partition_alloc::internal::DanglingRawPtrReleased(
185           reinterpret_cast<uintptr_t>(this));
186     }
187 #endif
188 
189     return ReleaseCommon(old_count - kInc);
190   }
191 
192   // Similar to |Release()|, but for raw_ptr<T, DisableDanglingPtrDetection>
193   // instead of raw_ptr<T>.
194   PA_ALWAYS_INLINE bool ReleaseFromUnprotectedPtr() {
195 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
196     CheckCookieIfSupported();
197 
198     CountType old_count =
199         count_.fetch_sub(kUnprotectedPtrInc, std::memory_order_release);
200     // Check underflow.
201     PA_DCHECK(old_count & kUnprotectedPtrCountMask);
202 
203     return ReleaseCommon(old_count - kUnprotectedPtrInc);
204 #else
205     return Release();
206 #endif
207   }
208 
209   // Returns true if the allocation should be reclaimed.
210   // This function should be called by the allocator during Free().
211   PA_ALWAYS_INLINE bool ReleaseFromAllocator() {
212     CheckCookieIfSupported();
213 
214     // TODO(bartekn): Make the double-free check more effective. Once freed, the
215     // ref-count is overwritten by an encoded freelist-next pointer.
216     CountType old_count =
217         count_.fetch_and(~kMemoryHeldByAllocatorBit, std::memory_order_release);
218 
219     if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit))) {
220       DoubleFreeOrCorruptionDetected(old_count);
221     }
222 
223     // Release memory when no raw_ptr<> exists anymore:
224     static constexpr CountType mask = kPtrCountMask | kUnprotectedPtrCountMask;
225     if (PA_LIKELY((old_count & mask) == 0)) {
226       std::atomic_thread_fence(std::memory_order_acquire);
227       // The allocation is about to get freed, so clear the cookie.
228       ClearCookieIfSupported();
229       return true;
230     }
231 
232 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
233     // There are some dangling raw_ptr<>. Turn on the error flag if it exists
234     // some which have not opted-out of being checked against being dangling:
235     if (PA_UNLIKELY(old_count & kPtrCountMask)) {
236       count_.fetch_or(kDanglingRawPtrDetectedBit, std::memory_order_relaxed);
237       partition_alloc::internal::DanglingRawPtrDetected(
238           reinterpret_cast<uintptr_t>(this));
239     }
240 #endif  // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
241     return false;
242   }
243 
244   // "IsAlive" means is allocated and not freed. "KnownRefs" refers to
245   // raw_ptr<T> references. There may be other references from raw pointers or
246   // unique_ptr, but we have no way of tracking them, so we hope for the best.
247   // To summarize, the function returns whether we believe the allocation can be
248   // safely freed.
249   PA_ALWAYS_INLINE bool IsAliveWithNoKnownRefs() {
250     CheckCookieIfSupported();
251     static constexpr CountType mask =
252         kMemoryHeldByAllocatorBit | kPtrCountMask | kUnprotectedPtrCountMask;
253     return (count_.load(std::memory_order_acquire) & mask) ==
254            kMemoryHeldByAllocatorBit;
255   }
256 
257   PA_ALWAYS_INLINE bool IsAlive() {
258     bool alive =
259         count_.load(std::memory_order_relaxed) & kMemoryHeldByAllocatorBit;
260     if (alive) {
261       CheckCookieIfSupported();
262     }
263     return alive;
264   }
265 
266   // Called when a raw_ptr is not banning dangling ptrs, but the user still
267   // wants to ensure the pointer is not currently dangling. This is currently
268   // used in UnretainedWrapper to make sure callbacks are not invoked with
269   // dangling pointers. If such a raw_ptr exists but the allocation is no longer
270   // alive, then we have a dangling pointer to a dead object.
271   PA_ALWAYS_INLINE void ReportIfDangling() {
272     if (!IsAlive()) {
273       partition_alloc::internal::UnretainedDanglingRawPtrDetected(
274           reinterpret_cast<uintptr_t>(this));
275     }
276   }
277 
278   // GWP-ASan slots are assigned an extra reference (note `kPtrInc` below) to
279   // make sure the `raw_ptr<T>` release operation will never attempt to call the
280   // PA `free` on such a slot. GWP-ASan takes the extra reference into account
281   // when determining whether the slot can be reused.
282   PA_ALWAYS_INLINE void InitalizeForGwpAsan() {
283 #if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
284     brp_cookie_ = CalculateCookie();
285 #endif
286     count_.store(kPtrInc | kMemoryHeldByAllocatorBit,
287                  std::memory_order_release);
288   }
289 
290   PA_ALWAYS_INLINE bool CanBeReusedByGwpAsan() {
291     static constexpr CountType mask = kPtrCountMask | kUnprotectedPtrCountMask;
292     return (count_.load(std::memory_order_acquire) & mask) == kPtrInc;
293   }
294 
295   bool NeedsMac11MallocSizeHack() {
296     return count_.load(std::memory_order_relaxed) &
297            kNeedsMac11MallocSizeHackBit;
298   }
299 
300 #if PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
301   PA_ALWAYS_INLINE void SetRequestedSize(size_t size) {
302     requested_size_ = static_cast<uint32_t>(size);
303   }
304   PA_ALWAYS_INLINE uint32_t requested_size() const { return requested_size_; }
305 #endif  // PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
306 
307  private:
308   // The common parts shared by Release() and ReleaseFromUnprotectedPtr().
309   // Called after updating the ref counts, |count| is the new value of |count_|
310   // set by fetch_sub. Returns true if memory can be reclaimed.
311   PA_ALWAYS_INLINE bool ReleaseCommon(CountType count) {
312     // Do not release memory, if it is still held by any of:
313     // - The allocator
314     // - A raw_ptr<T>
315     // - A raw_ptr<T, DisableDanglingPtrDetection>
316     //
317     // Assuming this raw_ptr is not dangling, the memory must still be held at
318     // least by the allocator, so this is PA_LIKELY true.
319     if (PA_LIKELY((count & (kMemoryHeldByAllocatorBit | kPtrCountMask |
320                             kUnprotectedPtrCountMask)))) {
321       return false;  // Do not release the memory.
322     }
323 
324     // In most thread-safe reference count implementations, an acquire
325     // barrier is required so that all changes made to an object from other
326     // threads are visible to its destructor. In our case, the destructor
327     // finishes before the final `Release` call, so it shouldn't be a problem.
328     // However, we will keep it as a precautionary measure.
329     std::atomic_thread_fence(std::memory_order_acquire);
330 
331     // The allocation is about to get freed, so clear the cookie.
332     ClearCookieIfSupported();
333     return true;
334   }
335 
336   // The cookie helps us ensure that:
337   // 1) The reference count pointer calculation is correct.
338   // 2) The returned allocation slot is not freed.
339   PA_ALWAYS_INLINE void CheckCookieIfSupported() {
340 #if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
341     PA_CHECK(brp_cookie_ == CalculateCookie());
342 #endif
343   }
344 
345   PA_ALWAYS_INLINE void ClearCookieIfSupported() {
346 #if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
347     brp_cookie_ = 0;
348 #endif
349   }
350 
351 #if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
352   PA_ALWAYS_INLINE uint32_t CalculateCookie() {
353     return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this)) ^
354            kCookieSalt;
355   }
356 #endif  // PA_CONFIG(REF_COUNT_CHECK_COOKIE)
357 
358   [[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void
359   DoubleFreeOrCorruptionDetected(CountType count) {
360     PA_DEBUG_DATA_ON_STACK("refcount", count);
361     PA_NO_CODE_FOLDING();
362     PA_IMMEDIATE_CRASH();
363   }
364 
365   // Note that in free slots, this is overwritten by encoded freelist
366   // pointer(s). The way the pointers are encoded on 64-bit little-endian
367   // architectures, count_ happens stay even, which works well with the
368   // double-free-detection in ReleaseFromAllocator(). Don't change the layout of
369   // this class, to preserve this functionality.
370   std::atomic<CountType> count_;
371 
372 #if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
373   static constexpr uint32_t kCookieSalt = 0xc01dbeef;
374   volatile uint32_t brp_cookie_;
375 #endif
376 
377 #if PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
378   uint32_t requested_size_;
379 #endif
380 };
381 
PartitionRefCount(bool needs_mac11_malloc_size_hack)382 PA_ALWAYS_INLINE PartitionRefCount::PartitionRefCount(
383     bool needs_mac11_malloc_size_hack)
384     : count_(kMemoryHeldByAllocatorBit |
385              (needs_mac11_malloc_size_hack ? kNeedsMac11MallocSizeHackBit : 0))
386 #if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
387       ,
388       brp_cookie_(CalculateCookie())
389 #endif
390 {
391 }
392 
393 #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
394 
395 static_assert(kAlignment % alignof(PartitionRefCount) == 0,
396               "kAlignment must be multiples of alignof(PartitionRefCount).");
397 
398 // Allocate extra space for the reference count to satisfy the alignment
399 // requirement.
400 static constexpr size_t kInSlotRefCountBufferSize = sizeof(PartitionRefCount);
401 constexpr size_t kPartitionRefCountOffsetAdjustment = 0;
402 constexpr size_t kPartitionPastAllocationAdjustment = 0;
403 
404 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
405 
406 #if PA_CONFIG(REF_COUNT_CHECK_COOKIE) || \
407     PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
408 static constexpr size_t kPartitionRefCountSizeShift = 4;
409 #else
410 static constexpr size_t kPartitionRefCountSizeShift = 3;
411 #endif
412 
413 #else  // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
414 
415 #if PA_CONFIG(REF_COUNT_CHECK_COOKIE) && \
416     PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
417 static constexpr size_t kPartitionRefCountSizeShift = 4;
418 #elif PA_CONFIG(REF_COUNT_CHECK_COOKIE) || \
419     PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
420 static constexpr size_t kPartitionRefCountSizeShift = 3;
421 #else
422 static constexpr size_t kPartitionRefCountSizeShift = 2;
423 #endif
424 
425 #endif  // PA_CONFIG(REF_COUNT_CHECK_COOKIE)
426 static_assert((1 << kPartitionRefCountSizeShift) == sizeof(PartitionRefCount));
427 
428 // We need one PartitionRefCount for each system page in a super page. They take
429 // `x = sizeof(PartitionRefCount) * (kSuperPageSize / SystemPageSize())` space.
430 // They need to fit into a system page of metadata as sparsely as possible to
431 // minimize cache line sharing, hence we calculate a multiplier as
432 // `SystemPageSize() / x`.
433 //
434 // The multiplier is expressed as a bitshift to optimize the code generation.
435 // SystemPageSize() isn't always a constrexpr, in which case the compiler
436 // wouldn't know it's a power of two. The equivalence of these calculations is
437 // checked in PartitionAllocGlobalInit().
438 PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
GetPartitionRefCountIndexMultiplierShift()439 GetPartitionRefCountIndexMultiplierShift() {
440   return SystemPageShift() * 2 - kSuperPageShift - kPartitionRefCountSizeShift;
441 }
442 
PartitionRefCountPointer(uintptr_t slot_start)443 PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
444     uintptr_t slot_start) {
445   if (PA_LIKELY(slot_start & SystemPageOffsetMask())) {
446     uintptr_t refcount_address = slot_start - sizeof(PartitionRefCount);
447 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
448     PA_CHECK(refcount_address % alignof(PartitionRefCount) == 0);
449 #endif
450     // No need to tag because the ref count is not protected by MTE.
451     return reinterpret_cast<PartitionRefCount*>(refcount_address);
452   } else {
453     // No need to tag, as the metadata region isn't protected by MTE.
454     PartitionRefCount* bitmap_base = reinterpret_cast<PartitionRefCount*>(
455         (slot_start & kSuperPageBaseMask) + SystemPageSize() * 2);
456     size_t index = ((slot_start & kSuperPageOffsetMask) >> SystemPageShift())
457                    << GetPartitionRefCountIndexMultiplierShift();
458 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
459     PA_CHECK(sizeof(PartitionRefCount) * index <= SystemPageSize());
460 #endif
461     return bitmap_base + index;
462   }
463 }
464 
465 #else  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
466 
467 // Allocate extra space for the reference count to satisfy the alignment
468 // requirement.
469 static constexpr size_t kInSlotRefCountBufferSize = kAlignment;
470 constexpr size_t kPartitionRefCountOffsetAdjustment = kInSlotRefCountBufferSize;
471 
472 // This is for adjustment of pointers right past the allocation, which may point
473 // to the next slot. First subtract 1 to bring them to the intended slot, and
474 // only then we'll be able to find ref-count in that slot.
475 constexpr size_t kPartitionPastAllocationAdjustment = 1;
476 
PartitionRefCountPointer(uintptr_t slot_start)477 PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
478     uintptr_t slot_start) {
479   // Have to MTE-tag, because the address is untagged, but lies within a slot
480   // area, which is protected by MTE.
481   return static_cast<PartitionRefCount*>(TagAddr(slot_start));
482 }
483 
484 #endif  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
485 
486 static_assert(sizeof(PartitionRefCount) <= kInSlotRefCountBufferSize,
487               "PartitionRefCount should fit into the in-slot buffer.");
488 
489 #else  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
490 
491 static constexpr size_t kInSlotRefCountBufferSize = 0;
492 constexpr size_t kPartitionRefCountOffsetAdjustment = 0;
493 
494 #endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
495 
496 constexpr size_t kPartitionRefCountSizeAdjustment = kInSlotRefCountBufferSize;
497 
498 }  // namespace partition_alloc::internal
499 
500 #endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_REF_COUNT_H_
501