• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/allocator/partition_allocator/address_pool_manager.h"
6 
7 #include <algorithm>
8 #include <atomic>
9 #include <cstdint>
10 #include <limits>
11 
12 #include "base/allocator/partition_allocator/address_space_stats.h"
13 #include "base/allocator/partition_allocator/page_allocator.h"
14 #include "base/allocator/partition_allocator/page_allocator_constants.h"
15 #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
16 #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
17 #include "base/allocator/partition_allocator/partition_alloc_check.h"
18 #include "base/allocator/partition_allocator/partition_alloc_constants.h"
19 #include "base/allocator/partition_allocator/partition_alloc_notreached.h"
20 #include "base/allocator/partition_allocator/pkey.h"
21 #include "base/allocator/partition_allocator/reservation_offset_table.h"
22 #include "build/build_config.h"
23 
24 #if BUILDFLAG(IS_APPLE) || BUILDFLAG(ENABLE_PKEYS)
25 #include <sys/mman.h>
26 #endif
27 
28 namespace partition_alloc::internal {
29 
30 AddressPoolManager AddressPoolManager::singleton_;
31 
32 // static
GetInstance()33 AddressPoolManager& AddressPoolManager::GetInstance() {
34   return singleton_;
35 }
36 
37 #if BUILDFLAG(HAS_64_BIT_POINTERS)
38 
39 namespace {
40 
41 // This will crash if the range cannot be decommitted.
DecommitPages(uintptr_t address,size_t size)42 void DecommitPages(uintptr_t address, size_t size) {
43   // Callers rely on the pages being zero-initialized when recommitting them.
44   // |DecommitSystemPages| doesn't guarantee this on all operating systems, in
45   // particular on macOS, but |DecommitAndZeroSystemPages| does.
46   DecommitAndZeroSystemPages(address, size);
47 }
48 
49 }  // namespace
50 
Add(pool_handle handle,uintptr_t ptr,size_t length)51 void AddressPoolManager::Add(pool_handle handle, uintptr_t ptr, size_t length) {
52   PA_DCHECK(!(ptr & kSuperPageOffsetMask));
53   PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask));
54   PA_CHECK(handle > 0 && handle <= std::size(aligned_pools_.pools_));
55 
56   Pool* pool = GetPool(handle);
57   PA_CHECK(!pool->IsInitialized());
58   pool->Initialize(ptr, length);
59 }
60 
GetPoolUsedSuperPages(pool_handle handle,std::bitset<kMaxSuperPagesInPool> & used)61 void AddressPoolManager::GetPoolUsedSuperPages(
62     pool_handle handle,
63     std::bitset<kMaxSuperPagesInPool>& used) {
64   Pool* pool = GetPool(handle);
65   if (!pool) {
66     return;
67   }
68 
69   pool->GetUsedSuperPages(used);
70 }
71 
GetPoolBaseAddress(pool_handle handle)72 uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {
73   Pool* pool = GetPool(handle);
74   if (!pool) {
75     return 0;
76   }
77 
78   return pool->GetBaseAddress();
79 }
80 
ResetForTesting()81 void AddressPoolManager::ResetForTesting() {
82   for (size_t i = 0; i < std::size(aligned_pools_.pools_); ++i) {
83     aligned_pools_.pools_[i].Reset();
84   }
85 }
86 
Remove(pool_handle handle)87 void AddressPoolManager::Remove(pool_handle handle) {
88   Pool* pool = GetPool(handle);
89   PA_DCHECK(pool->IsInitialized());
90   pool->Reset();
91 }
92 
Reserve(pool_handle handle,uintptr_t requested_address,size_t length)93 uintptr_t AddressPoolManager::Reserve(pool_handle handle,
94                                       uintptr_t requested_address,
95                                       size_t length) {
96   Pool* pool = GetPool(handle);
97   if (!requested_address) {
98     return pool->FindChunk(length);
99   }
100   const bool is_available = pool->TryReserveChunk(requested_address, length);
101   if (is_available) {
102     return requested_address;
103   }
104   return pool->FindChunk(length);
105 }
106 
UnreserveAndDecommit(pool_handle handle,uintptr_t address,size_t length)107 void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
108                                               uintptr_t address,
109                                               size_t length) {
110   PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
111   Pool* pool = GetPool(handle);
112   PA_DCHECK(pool->IsInitialized());
113   DecommitPages(address, length);
114   pool->FreeChunk(address, length);
115 }
116 
Initialize(uintptr_t ptr,size_t length)117 void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
118   PA_CHECK(ptr != 0);
119   PA_CHECK(!(ptr & kSuperPageOffsetMask));
120   PA_CHECK(!(length & kSuperPageOffsetMask));
121   address_begin_ = ptr;
122 #if BUILDFLAG(PA_DCHECK_IS_ON)
123   address_end_ = ptr + length;
124   PA_DCHECK(address_begin_ < address_end_);
125 #endif
126 
127   total_bits_ = length / kSuperPageSize;
128   PA_CHECK(total_bits_ <= kMaxSuperPagesInPool);
129 
130   ScopedGuard scoped_lock(lock_);
131   alloc_bitset_.reset();
132   bit_hint_ = 0;
133 }
134 
IsInitialized()135 bool AddressPoolManager::Pool::IsInitialized() {
136   return address_begin_ != 0;
137 }
138 
Reset()139 void AddressPoolManager::Pool::Reset() {
140   address_begin_ = 0;
141 }
142 
GetUsedSuperPages(std::bitset<kMaxSuperPagesInPool> & used)143 void AddressPoolManager::Pool::GetUsedSuperPages(
144     std::bitset<kMaxSuperPagesInPool>& used) {
145   ScopedGuard scoped_lock(lock_);
146 
147   PA_DCHECK(IsInitialized());
148   used = alloc_bitset_;
149 }
150 
GetBaseAddress()151 uintptr_t AddressPoolManager::Pool::GetBaseAddress() {
152   PA_DCHECK(IsInitialized());
153   return address_begin_;
154 }
155 
FindChunk(size_t requested_size)156 uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
157   ScopedGuard scoped_lock(lock_);
158 
159   PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
160   const size_t need_bits = requested_size >> kSuperPageShift;
161 
162   // Use first-fit policy to find an available chunk from free chunks. Start
163   // from |bit_hint_|, because we know there are no free chunks before.
164   size_t beg_bit = bit_hint_;
165   size_t curr_bit = bit_hint_;
166   while (true) {
167     // |end_bit| points 1 past the last bit that needs to be 0. If it goes past
168     // |total_bits_|, return |nullptr| to signal no free chunk was found.
169     size_t end_bit = beg_bit + need_bits;
170     if (end_bit > total_bits_) {
171       return 0;
172     }
173 
174     bool found = true;
175     for (; curr_bit < end_bit; ++curr_bit) {
176       if (alloc_bitset_.test(curr_bit)) {
177         // The bit was set, so this chunk isn't entirely free. Set |found=false|
178         // to ensure the outer loop continues. However, continue the inner loop
179         // to set |beg_bit| just past the last set bit in the investigated
180         // chunk. |curr_bit| is advanced all the way to |end_bit| to prevent the
181         // next outer loop pass from checking the same bits.
182         beg_bit = curr_bit + 1;
183         found = false;
184         if (bit_hint_ == curr_bit) {
185           ++bit_hint_;
186         }
187       }
188     }
189 
190     // An entire [beg_bit;end_bit) region of 0s was found. Fill them with 1s (to
191     // mark as allocated) and return the allocated address.
192     if (found) {
193       for (size_t i = beg_bit; i < end_bit; ++i) {
194         PA_DCHECK(!alloc_bitset_.test(i));
195         alloc_bitset_.set(i);
196       }
197       if (bit_hint_ == beg_bit) {
198         bit_hint_ = end_bit;
199       }
200       uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
201 #if BUILDFLAG(PA_DCHECK_IS_ON)
202       PA_DCHECK(address + requested_size <= address_end_);
203 #endif
204       return address;
205     }
206   }
207 
208   PA_NOTREACHED();
209   return 0;
210 }
211 
TryReserveChunk(uintptr_t address,size_t requested_size)212 bool AddressPoolManager::Pool::TryReserveChunk(uintptr_t address,
213                                                size_t requested_size) {
214   ScopedGuard scoped_lock(lock_);
215   PA_DCHECK(!(address & kSuperPageOffsetMask));
216   PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
217   const size_t begin_bit = (address - address_begin_) / kSuperPageSize;
218   const size_t need_bits = requested_size / kSuperPageSize;
219   const size_t end_bit = begin_bit + need_bits;
220   // Check that requested address is not too high.
221   if (end_bit > total_bits_) {
222     return false;
223   }
224   // Check if any bit of the requested region is set already.
225   for (size_t i = begin_bit; i < end_bit; ++i) {
226     if (alloc_bitset_.test(i)) {
227       return false;
228     }
229   }
230   // Otherwise, set the bits.
231   for (size_t i = begin_bit; i < end_bit; ++i) {
232     alloc_bitset_.set(i);
233   }
234   return true;
235 }
236 
FreeChunk(uintptr_t address,size_t free_size)237 void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
238   ScopedGuard scoped_lock(lock_);
239 
240   PA_DCHECK(!(address & kSuperPageOffsetMask));
241   PA_DCHECK(!(free_size & kSuperPageOffsetMask));
242 
243   PA_DCHECK(address_begin_ <= address);
244 #if BUILDFLAG(PA_DCHECK_IS_ON)
245   PA_DCHECK(address + free_size <= address_end_);
246 #endif
247 
248   const size_t beg_bit = (address - address_begin_) / kSuperPageSize;
249   const size_t end_bit = beg_bit + free_size / kSuperPageSize;
250   for (size_t i = beg_bit; i < end_bit; ++i) {
251     PA_DCHECK(alloc_bitset_.test(i));
252     alloc_bitset_.reset(i);
253   }
254   bit_hint_ = std::min(bit_hint_, beg_bit);
255 }
256 
GetStats(PoolStats * stats)257 void AddressPoolManager::Pool::GetStats(PoolStats* stats) {
258   std::bitset<kMaxSuperPagesInPool> pages;
259   size_t i;
260   {
261     ScopedGuard scoped_lock(lock_);
262     pages = alloc_bitset_;
263     i = bit_hint_;
264   }
265 
266   stats->usage = pages.count();
267 
268   size_t largest_run = 0;
269   size_t current_run = 0;
270   for (; i < total_bits_; ++i) {
271     if (!pages[i]) {
272       current_run += 1;
273       continue;
274     } else if (current_run > largest_run) {
275       largest_run = current_run;
276     }
277     current_run = 0;
278   }
279 
280   // Fell out of the loop with last bit being zero. Check once more.
281   if (current_run > largest_run) {
282     largest_run = current_run;
283   }
284   stats->largest_available_reservation = largest_run;
285 }
286 
GetPoolStats(const pool_handle handle,PoolStats * stats)287 void AddressPoolManager::GetPoolStats(const pool_handle handle,
288                                       PoolStats* stats) {
289   Pool* pool = GetPool(handle);
290   if (!pool->IsInitialized()) {
291     return;
292   }
293   pool->GetStats(stats);
294 }
295 
GetStats(AddressSpaceStats * stats)296 bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
297   // Get 64-bit pool stats.
298   GetPoolStats(kRegularPoolHandle, &stats->regular_pool_stats);
299 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
300   GetPoolStats(kBRPPoolHandle, &stats->brp_pool_stats);
301 #endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
302   if (IsConfigurablePoolAvailable()) {
303     GetPoolStats(kConfigurablePoolHandle, &stats->configurable_pool_stats);
304   }
305 #if BUILDFLAG(ENABLE_PKEYS)
306   GetPoolStats(kPkeyPoolHandle, &stats->pkey_pool_stats);
307 #endif
308   return true;
309 }
310 
311 #else  // BUILDFLAG(HAS_64_BIT_POINTERS)
312 
313 static_assert(
314     kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
315         0,
316     "kSuperPageSize must be a multiple of kBytesPer1BitOfBRPPoolBitmap.");
317 static_assert(
318     kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap > 0,
319     "kSuperPageSize must be larger than kBytesPer1BitOfBRPPoolBitmap.");
320 static_assert(AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap >=
321                   AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
322               "kGuardBitsOfBRPPoolBitmap must be larger than or equal to "
323               "kGuardOffsetOfBRPPoolBitmap.");
324 
325 template <size_t bitsize>
SetBitmap(std::bitset<bitsize> & bitmap,size_t start_bit,size_t bit_length)326 void SetBitmap(std::bitset<bitsize>& bitmap,
327                size_t start_bit,
328                size_t bit_length) {
329   const size_t end_bit = start_bit + bit_length;
330   PA_DCHECK(start_bit <= bitsize);
331   PA_DCHECK(end_bit <= bitsize);
332 
333   for (size_t i = start_bit; i < end_bit; ++i) {
334     PA_DCHECK(!bitmap.test(i));
335     bitmap.set(i);
336   }
337 }
338 
339 template <size_t bitsize>
ResetBitmap(std::bitset<bitsize> & bitmap,size_t start_bit,size_t bit_length)340 void ResetBitmap(std::bitset<bitsize>& bitmap,
341                  size_t start_bit,
342                  size_t bit_length) {
343   const size_t end_bit = start_bit + bit_length;
344   PA_DCHECK(start_bit <= bitsize);
345   PA_DCHECK(end_bit <= bitsize);
346 
347   for (size_t i = start_bit; i < end_bit; ++i) {
348     PA_DCHECK(bitmap.test(i));
349     bitmap.reset(i);
350   }
351 }
352 
Reserve(pool_handle handle,uintptr_t requested_address,size_t length)353 uintptr_t AddressPoolManager::Reserve(pool_handle handle,
354                                       uintptr_t requested_address,
355                                       size_t length) {
356   PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
357   uintptr_t address =
358       AllocPages(requested_address, length, kSuperPageSize,
359                  PageAccessibilityConfiguration(
360                      PageAccessibilityConfiguration::kInaccessible),
361                  PageTag::kPartitionAlloc);
362   return address;
363 }
364 
UnreserveAndDecommit(pool_handle handle,uintptr_t address,size_t length)365 void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
366                                               uintptr_t address,
367                                               size_t length) {
368   PA_DCHECK(!(address & kSuperPageOffsetMask));
369   PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
370   FreePages(address, length);
371 }
372 
MarkUsed(pool_handle handle,uintptr_t address,size_t length)373 void AddressPoolManager::MarkUsed(pool_handle handle,
374                                   uintptr_t address,
375                                   size_t length) {
376   ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
377   // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
378 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
379   if (handle == kBRPPoolHandle) {
380     PA_DCHECK(
381         (length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
382 
383     // Make IsManagedByBRPPoolPool() return false when an address inside the
384     // first or the last PartitionPageSize()-bytes block is given:
385     //
386     //          ------+---+---------------+---+----
387     // memory   ..... | B | managed by PA | B | ...
388     // regions  ------+---+---------------+---+----
389     //
390     // B: PartitionPageSize()-bytes block. This is used internally by the
391     // allocator and is not available for callers.
392     //
393     // This is required to avoid crash caused by the following code:
394     //   {
395     //     // Assume this allocation happens outside of PartitionAlloc.
396     //     raw_ptr<T> ptr = new T[20];
397     //     for (size_t i = 0; i < 20; i ++) { ptr++; }
398     //     // |ptr| may point to an address inside 'B'.
399     //   }
400     //
401     // Suppose that |ptr| points to an address inside B after the loop. If
402     // IsManagedByBRPPoolPool(ptr) were to return true, ~raw_ptr<T>() would
403     // crash, since the memory is not allocated by PartitionAlloc.
404     SetBitmap(AddressPoolManagerBitmap::brp_pool_bits_,
405               (address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
406                   AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
407               (length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
408                   AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
409   } else
410 #endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
411   {
412     PA_DCHECK(handle == kRegularPoolHandle);
413     PA_DCHECK(
414         (length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
415         0);
416     SetBitmap(AddressPoolManagerBitmap::regular_pool_bits_,
417               address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
418               length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
419   }
420 }
421 
MarkUnused(pool_handle handle,uintptr_t address,size_t length)422 void AddressPoolManager::MarkUnused(pool_handle handle,
423                                     uintptr_t address,
424                                     size_t length) {
425   // Address regions allocated for normal buckets are never released, so this
426   // function can only be called for direct map. However, do not DCHECK on
427   // IsManagedByDirectMap(address), because many tests test this function using
428   // small allocations.
429 
430   ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
431   // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
432 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
433   if (handle == kBRPPoolHandle) {
434     PA_DCHECK(
435         (length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
436 
437     // Make IsManagedByBRPPoolPool() return false when an address inside the
438     // first or the last PartitionPageSize()-bytes block is given.
439     // (See MarkUsed comment)
440     ResetBitmap(
441         AddressPoolManagerBitmap::brp_pool_bits_,
442         (address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
443             AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
444         (length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
445             AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
446   } else
447 #endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
448   {
449     PA_DCHECK(handle == kRegularPoolHandle);
450     PA_DCHECK(
451         (length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
452         0);
453     ResetBitmap(
454         AddressPoolManagerBitmap::regular_pool_bits_,
455         address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
456         length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
457   }
458 }
459 
ResetForTesting()460 void AddressPoolManager::ResetForTesting() {
461   ScopedGuard guard(AddressPoolManagerBitmap::GetLock());
462   AddressPoolManagerBitmap::regular_pool_bits_.reset();
463   AddressPoolManagerBitmap::brp_pool_bits_.reset();
464 }
465 
466 namespace {
467 
468 // Counts super pages in use represented by `bitmap`.
469 template <size_t bitsize>
CountUsedSuperPages(const std::bitset<bitsize> & bitmap,const size_t bits_per_super_page)470 size_t CountUsedSuperPages(const std::bitset<bitsize>& bitmap,
471                            const size_t bits_per_super_page) {
472   size_t count = 0;
473   size_t bit_index = 0;
474 
475   // Stride over super pages.
476   for (size_t super_page_index = 0; bit_index < bitsize; ++super_page_index) {
477     // Stride over the bits comprising the super page.
478     for (bit_index = super_page_index * bits_per_super_page;
479          bit_index < (super_page_index + 1) * bits_per_super_page &&
480          bit_index < bitsize;
481          ++bit_index) {
482       if (bitmap[bit_index]) {
483         count += 1;
484         // Move on to the next super page.
485         break;
486       }
487     }
488   }
489   return count;
490 }
491 
492 }  // namespace
493 
GetStats(AddressSpaceStats * stats)494 bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
495   std::bitset<AddressPoolManagerBitmap::kRegularPoolBits> regular_pool_bits;
496   std::bitset<AddressPoolManagerBitmap::kBRPPoolBits> brp_pool_bits;
497   {
498     ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
499     regular_pool_bits = AddressPoolManagerBitmap::regular_pool_bits_;
500     brp_pool_bits = AddressPoolManagerBitmap::brp_pool_bits_;
501   }  // scoped_lock
502 
503   // Pool usage is read out from the address pool bitmaps.
504   // The output stats are sized in super pages, so we interpret
505   // the bitmaps into super page usage.
506   static_assert(
507       kSuperPageSize %
508               AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap ==
509           0,
510       "information loss when calculating metrics");
511   constexpr size_t kRegularPoolBitsPerSuperPage =
512       kSuperPageSize /
513       AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;
514 
515   // Get 32-bit pool usage.
516   stats->regular_pool_stats.usage =
517       CountUsedSuperPages(regular_pool_bits, kRegularPoolBitsPerSuperPage);
518 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
519   static_assert(
520       kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
521           0,
522       "information loss when calculating metrics");
523   constexpr size_t kBRPPoolBitsPerSuperPage =
524       kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap;
525   stats->brp_pool_stats.usage =
526       CountUsedSuperPages(brp_pool_bits, kBRPPoolBitsPerSuperPage);
527 
528   // Get blocklist size.
529   for (const auto& blocked :
530        AddressPoolManagerBitmap::brp_forbidden_super_page_map_) {
531     if (blocked.load(std::memory_order_relaxed)) {
532       stats->blocklist_size += 1;
533     }
534   }
535 
536   // Count failures in finding non-blocklisted addresses.
537   stats->blocklist_hit_count =
538       AddressPoolManagerBitmap::blocklist_hit_count_.load(
539           std::memory_order_relaxed);
540 #endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
541   return true;
542 }
543 
544 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
545 
DumpStats(AddressSpaceStatsDumper * dumper)546 void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {
547   AddressSpaceStats stats{};
548   if (GetStats(&stats)) {
549     dumper->DumpStats(&stats);
550   }
551 }
552 
553 }  // namespace partition_alloc::internal
554