• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/allocator/partition_allocator/thread_cache.h"
6 
7 #include <algorithm>
8 #include <atomic>
9 #include <vector>
10 
11 #include "base/allocator/partition_allocator/extended_api.h"
12 #include "base/allocator/partition_allocator/partition_address_space.h"
13 #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
14 #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h"
15 #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
16 #include "base/allocator/partition_allocator/partition_alloc_config.h"
17 #include "base/allocator/partition_allocator/partition_alloc_for_testing.h"
18 #include "base/allocator/partition_allocator/partition_lock.h"
19 #include "base/allocator/partition_allocator/tagging.h"
20 #include "build/build_config.h"
21 #include "testing/gtest/include/gtest/gtest.h"
22 
23 // With *SAN, PartitionAlloc is replaced in partition_alloc.h by ASAN, so we
24 // cannot test the thread cache.
25 //
26 // Finally, the thread cache is not supported on all platforms.
27 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && \
28     PA_CONFIG(THREAD_CACHE_SUPPORTED)
29 
30 namespace partition_alloc {
31 
32 using BucketDistribution = ThreadSafePartitionRoot::BucketDistribution;
33 namespace {
34 
35 constexpr size_t kSmallSize = 12;
36 constexpr size_t kDefaultCountForSmallBucket =
37     ThreadCache::kSmallBucketBaseCount * ThreadCache::kDefaultMultiplier;
38 constexpr size_t kFillCountForSmallBucket =
39     kDefaultCountForSmallBucket / ThreadCache::kBatchFillRatio;
40 
41 constexpr size_t kMediumSize = 200;
42 constexpr size_t kDefaultCountForMediumBucket = kDefaultCountForSmallBucket / 2;
43 constexpr size_t kFillCountForMediumBucket =
44     kDefaultCountForMediumBucket / ThreadCache::kBatchFillRatio;
45 
46 static_assert(kMediumSize <= ThreadCache::kDefaultSizeThreshold, "");
47 
48 class DeltaCounter {
49  public:
DeltaCounter(uint64_t & value)50   explicit DeltaCounter(uint64_t& value)
51       : current_value_(value), initial_value_(value) {}
Reset()52   void Reset() { initial_value_ = current_value_; }
Delta() const53   uint64_t Delta() const { return current_value_ - initial_value_; }
54 
55  private:
56   uint64_t& current_value_;
57   uint64_t initial_value_;
58 };
59 
60 // Forbid extras, since they make finding out which bucket is used harder.
CreateAllocator()61 std::unique_ptr<PartitionAllocatorForTesting> CreateAllocator() {
62   std::unique_ptr<PartitionAllocatorForTesting> allocator =
63       std::make_unique<PartitionAllocatorForTesting>();
64   allocator->init({
65     PartitionOptions::AlignedAlloc::kAllowed,
66 #if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
67         PartitionOptions::ThreadCache::kEnabled,
68 #else
69         PartitionOptions::ThreadCache::kDisabled,
70 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
71         PartitionOptions::Quarantine::kAllowed,
72         PartitionOptions::Cookie::kDisallowed,
73         PartitionOptions::BackupRefPtr::kDisabled,
74         PartitionOptions::BackupRefPtrZapping::kDisabled,
75         PartitionOptions::UseConfigurablePool::kNo,
76   });
77   allocator->root()->UncapEmptySlotSpanMemoryForTesting();
78 
79   return allocator;
80 }
81 
82 }  // namespace
83 
84 class PartitionAllocThreadCacheTest
85     : public ::testing::TestWithParam<
86           PartitionRoot<internal::ThreadSafe>::BucketDistribution> {
87  public:
PartitionAllocThreadCacheTest()88   PartitionAllocThreadCacheTest()
89       : allocator_(CreateAllocator()), scope_(allocator_->root()) {}
90 
~PartitionAllocThreadCacheTest()91   ~PartitionAllocThreadCacheTest() override {
92     ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
93 
94     // Cleanup the global state so next test can recreate ThreadCache.
95     if (ThreadCache::IsTombstone(ThreadCache::Get())) {
96       ThreadCache::RemoveTombstoneForTesting();
97     }
98   }
99 
100  protected:
SetUp()101   void SetUp() override {
102     ThreadSafePartitionRoot* root = allocator_->root();
103     switch (GetParam()) {
104       case BucketDistribution::kDefault:
105         root->ResetBucketDistributionForTesting();
106         break;
107       case BucketDistribution::kDenser:
108         root->SwitchToDenserBucketDistribution();
109         break;
110     }
111 
112     ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
113         ThreadCache::kDefaultMultiplier);
114     ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
115 
116     // Make sure that enough slot spans have been touched, otherwise cache fill
117     // becomes unpredictable (because it doesn't take slow paths in the
118     // allocator), which is an issue for tests.
119     FillThreadCacheAndReturnIndex(kSmallSize, 1000);
120     FillThreadCacheAndReturnIndex(kMediumSize, 1000);
121 
122     // There are allocations, a thread cache is created.
123     auto* tcache = root->thread_cache_for_testing();
124     ASSERT_TRUE(tcache);
125 
126     ThreadCacheRegistry::Instance().ResetForTesting();
127     tcache->ResetForTesting();
128   }
129 
TearDown()130   void TearDown() override {
131     auto* tcache = root()->thread_cache_for_testing();
132     ASSERT_TRUE(tcache);
133     tcache->Purge();
134 
135     ASSERT_EQ(root()->get_total_size_of_allocated_bytes(),
136               GetBucketSizeForThreadCache());
137   }
138 
root()139   ThreadSafePartitionRoot* root() { return allocator_->root(); }
140 
141   // Returns the size of the smallest bucket fitting an allocation of
142   // |sizeof(ThreadCache)| bytes.
GetBucketSizeForThreadCache()143   size_t GetBucketSizeForThreadCache() {
144     size_t tc_bucket_index = root()->SizeToBucketIndex(
145         sizeof(ThreadCache),
146         ThreadSafePartitionRoot::BucketDistribution::kDefault);
147     auto* tc_bucket = &root()->buckets[tc_bucket_index];
148     return tc_bucket->slot_size;
149   }
150 
SizeToIndex(size_t size)151   static size_t SizeToIndex(size_t size) {
152     return PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex(size,
153                                                                   GetParam());
154   }
155 
FillThreadCacheAndReturnIndex(size_t size,size_t count=1)156   size_t FillThreadCacheAndReturnIndex(size_t size, size_t count = 1) {
157     uint16_t bucket_index = SizeToIndex(size);
158     std::vector<void*> allocated_data;
159 
160     for (size_t i = 0; i < count; ++i) {
161       allocated_data.push_back(root()->Alloc(size, ""));
162     }
163     for (void* ptr : allocated_data) {
164       root()->Free(ptr);
165     }
166 
167     return bucket_index;
168   }
169 
FillThreadCacheWithMemory(size_t target_cached_memory)170   void FillThreadCacheWithMemory(size_t target_cached_memory) {
171     for (int batch : {1, 2, 4, 8, 16}) {
172       for (size_t allocation_size = 1;
173            allocation_size <= ThreadCache::kLargeSizeThreshold;
174            allocation_size++) {
175         FillThreadCacheAndReturnIndex(allocation_size, batch);
176 
177         if (ThreadCache::Get()->CachedMemory() >= target_cached_memory) {
178           return;
179         }
180       }
181     }
182 
183     ASSERT_GE(ThreadCache::Get()->CachedMemory(), target_cached_memory);
184   }
185 
186   std::unique_ptr<PartitionAllocatorForTesting> allocator_;
187   internal::ThreadCacheProcessScopeForTesting scope_;
188 };
189 
190 INSTANTIATE_TEST_SUITE_P(AlternateBucketDistribution,
191                          PartitionAllocThreadCacheTest,
192                          ::testing::Values(BucketDistribution::kDefault,
193                                            BucketDistribution::kDenser));
194 
TEST_P(PartitionAllocThreadCacheTest,Simple)195 TEST_P(PartitionAllocThreadCacheTest, Simple) {
196   // There is a cache.
197   auto* tcache = root()->thread_cache_for_testing();
198   EXPECT_TRUE(tcache);
199   DeltaCounter batch_fill_counter{tcache->stats_.batch_fill_count};
200 
201   void* ptr = root()->Alloc(kSmallSize, "");
202   ASSERT_TRUE(ptr);
203 
204   uint16_t index = SizeToIndex(kSmallSize);
205   EXPECT_EQ(kFillCountForSmallBucket - 1,
206             tcache->bucket_count_for_testing(index));
207 
208   root()->Free(ptr);
209   // Freeing fills the thread cache.
210   EXPECT_EQ(kFillCountForSmallBucket, tcache->bucket_count_for_testing(index));
211 
212   void* ptr2 = root()->Alloc(kSmallSize, "");
213   // MTE-untag, because Free() changes tag.
214   EXPECT_EQ(UntagPtr(ptr), UntagPtr(ptr2));
215   // Allocated from the thread cache.
216   EXPECT_EQ(kFillCountForSmallBucket - 1,
217             tcache->bucket_count_for_testing(index));
218 
219   EXPECT_EQ(1u, batch_fill_counter.Delta());
220 
221   root()->Free(ptr2);
222 }
223 
TEST_P(PartitionAllocThreadCacheTest,InexactSizeMatch)224 TEST_P(PartitionAllocThreadCacheTest, InexactSizeMatch) {
225   void* ptr = root()->Alloc(kSmallSize, "");
226   ASSERT_TRUE(ptr);
227 
228   // There is a cache.
229   auto* tcache = root()->thread_cache_for_testing();
230   EXPECT_TRUE(tcache);
231 
232   uint16_t index = SizeToIndex(kSmallSize);
233   EXPECT_EQ(kFillCountForSmallBucket - 1,
234             tcache->bucket_count_for_testing(index));
235 
236   root()->Free(ptr);
237   // Freeing fills the thread cache.
238   EXPECT_EQ(kFillCountForSmallBucket, tcache->bucket_count_for_testing(index));
239 
240   void* ptr2 = root()->Alloc(kSmallSize + 1, "");
241   // MTE-untag, because Free() changes tag.
242   EXPECT_EQ(UntagPtr(ptr), UntagPtr(ptr2));
243   // Allocated from the thread cache.
244   EXPECT_EQ(kFillCountForSmallBucket - 1,
245             tcache->bucket_count_for_testing(index));
246   root()->Free(ptr2);
247 }
248 
TEST_P(PartitionAllocThreadCacheTest,MultipleObjectsCachedPerBucket)249 TEST_P(PartitionAllocThreadCacheTest, MultipleObjectsCachedPerBucket) {
250   auto* tcache = root()->thread_cache_for_testing();
251   DeltaCounter batch_fill_counter{tcache->stats_.batch_fill_count};
252   size_t bucket_index =
253       FillThreadCacheAndReturnIndex(kMediumSize, kFillCountForMediumBucket + 2);
254   EXPECT_EQ(2 * kFillCountForMediumBucket,
255             tcache->bucket_count_for_testing(bucket_index));
256   // 2 batches, since there were more than |kFillCountForMediumBucket|
257   // allocations.
258   EXPECT_EQ(2u, batch_fill_counter.Delta());
259 }
260 
TEST_P(PartitionAllocThreadCacheTest,ObjectsCachedCountIsLimited)261 TEST_P(PartitionAllocThreadCacheTest, ObjectsCachedCountIsLimited) {
262   size_t bucket_index = FillThreadCacheAndReturnIndex(kMediumSize, 1000);
263   auto* tcache = root()->thread_cache_for_testing();
264   EXPECT_LT(tcache->bucket_count_for_testing(bucket_index), 1000u);
265 }
266 
TEST_P(PartitionAllocThreadCacheTest,Purge)267 TEST_P(PartitionAllocThreadCacheTest, Purge) {
268   size_t allocations = 10;
269   size_t bucket_index = FillThreadCacheAndReturnIndex(kMediumSize, allocations);
270   auto* tcache = root()->thread_cache_for_testing();
271   EXPECT_EQ(
272       (1 + allocations / kFillCountForMediumBucket) * kFillCountForMediumBucket,
273       tcache->bucket_count_for_testing(bucket_index));
274   tcache->Purge();
275   EXPECT_EQ(0u, tcache->bucket_count_for_testing(bucket_index));
276 }
277 
TEST_P(PartitionAllocThreadCacheTest,NoCrossPartitionCache)278 TEST_P(PartitionAllocThreadCacheTest, NoCrossPartitionCache) {
279   PartitionAllocatorForTesting allocator;
280   allocator.init({
281       PartitionOptions::AlignedAlloc::kAllowed,
282       PartitionOptions::ThreadCache::kDisabled,
283       PartitionOptions::Quarantine::kAllowed,
284       PartitionOptions::Cookie::kDisallowed,
285       PartitionOptions::BackupRefPtr::kDisabled,
286       PartitionOptions::BackupRefPtrZapping::kDisabled,
287       PartitionOptions::UseConfigurablePool::kNo,
288   });
289 
290   size_t bucket_index = FillThreadCacheAndReturnIndex(kSmallSize);
291   void* ptr = allocator.root()->Alloc(kSmallSize, "");
292   ASSERT_TRUE(ptr);
293 
294   auto* tcache = root()->thread_cache_for_testing();
295   EXPECT_EQ(kFillCountForSmallBucket,
296             tcache->bucket_count_for_testing(bucket_index));
297 
298   ThreadSafePartitionRoot::Free(ptr);
299   EXPECT_EQ(kFillCountForSmallBucket,
300             tcache->bucket_count_for_testing(bucket_index));
301 }
302 
303 // Required to record hits and misses.
304 #if PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
TEST_P(PartitionAllocThreadCacheTest,LargeAllocationsAreNotCached)305 TEST_P(PartitionAllocThreadCacheTest, LargeAllocationsAreNotCached) {
306   auto* tcache = root()->thread_cache_for_testing();
307   DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
308   DeltaCounter alloc_miss_too_large_counter{
309       tcache->stats_.alloc_miss_too_large};
310   DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
311   DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
312 
313   FillThreadCacheAndReturnIndex(100 * 1024);
314   tcache = root()->thread_cache_for_testing();
315   EXPECT_EQ(1u, alloc_miss_counter.Delta());
316   EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
317   EXPECT_EQ(1u, cache_fill_counter.Delta());
318   EXPECT_EQ(1u, cache_fill_misses_counter.Delta());
319 }
320 #endif  // PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
321 
TEST_P(PartitionAllocThreadCacheTest,DirectMappedAllocationsAreNotCached)322 TEST_P(PartitionAllocThreadCacheTest, DirectMappedAllocationsAreNotCached) {
323   FillThreadCacheAndReturnIndex(1024 * 1024);
324   // The line above would crash due to out of bounds access if this wasn't
325   // properly handled.
326 }
327 
328 // This tests that Realloc properly handles bookkeeping, specifically the path
329 // that reallocates in place.
TEST_P(PartitionAllocThreadCacheTest,DirectMappedReallocMetrics)330 TEST_P(PartitionAllocThreadCacheTest, DirectMappedReallocMetrics) {
331   root()->ResetBookkeepingForTesting();
332 
333   size_t expected_allocated_size = root()->get_total_size_of_allocated_bytes();
334 
335   EXPECT_EQ(expected_allocated_size,
336             root()->get_total_size_of_allocated_bytes());
337   EXPECT_EQ(expected_allocated_size, root()->get_max_size_of_allocated_bytes());
338 
339   void* ptr = root()->Alloc(10 * internal::kMaxBucketed, "");
340 
341   EXPECT_EQ(expected_allocated_size + 10 * internal::kMaxBucketed,
342             root()->get_total_size_of_allocated_bytes());
343 
344   void* ptr2 = root()->Realloc(ptr, 9 * internal::kMaxBucketed, "");
345 
346   ASSERT_EQ(ptr, ptr2);
347   EXPECT_EQ(expected_allocated_size + 9 * internal::kMaxBucketed,
348             root()->get_total_size_of_allocated_bytes());
349 
350   ptr2 = root()->Realloc(ptr, 10 * internal::kMaxBucketed, "");
351 
352   ASSERT_EQ(ptr, ptr2);
353   EXPECT_EQ(expected_allocated_size + 10 * internal::kMaxBucketed,
354             root()->get_total_size_of_allocated_bytes());
355 
356   root()->Free(ptr);
357 }
358 
359 namespace {
360 
FillThreadCacheAndReturnIndex(ThreadSafePartitionRoot * root,size_t size,BucketDistribution bucket_distribution,size_t count=1)361 size_t FillThreadCacheAndReturnIndex(ThreadSafePartitionRoot* root,
362                                      size_t size,
363                                      BucketDistribution bucket_distribution,
364                                      size_t count = 1) {
365   uint16_t bucket_index =
366       PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex(
367           size, bucket_distribution);
368   std::vector<void*> allocated_data;
369 
370   for (size_t i = 0; i < count; ++i) {
371     allocated_data.push_back(root->Alloc(size, ""));
372   }
373   for (void* ptr : allocated_data) {
374     root->Free(ptr);
375   }
376 
377   return bucket_index;
378 }
379 
380 // TODO(1151236): To remove callback from partition allocator's DEPS,
381 // rewrite the tests without BindLambdaForTesting and RepeatingClosure.
382 // However this makes a little annoying to add more tests using their
383 // own threads. Need to support an easier way to implement tests using
384 // PlatformThreadForTesting::Create().
385 class ThreadDelegateForMultipleThreadCaches
386     : public internal::base::PlatformThreadForTesting::Delegate {
387  public:
ThreadDelegateForMultipleThreadCaches(ThreadCache * parent_thread_cache,ThreadSafePartitionRoot * root,BucketDistribution bucket_distribution)388   ThreadDelegateForMultipleThreadCaches(ThreadCache* parent_thread_cache,
389                                         ThreadSafePartitionRoot* root,
390                                         BucketDistribution bucket_distribution)
391       : parent_thread_tcache_(parent_thread_cache),
392         root_(root),
393         bucket_distribution_(bucket_distribution) {}
394 
ThreadMain()395   void ThreadMain() override {
396     EXPECT_FALSE(root_->thread_cache_for_testing());  // No allocations yet.
397     FillThreadCacheAndReturnIndex(root_, kMediumSize, bucket_distribution_);
398     auto* tcache = root_->thread_cache_for_testing();
399     EXPECT_TRUE(tcache);
400 
401     EXPECT_NE(parent_thread_tcache_, tcache);
402   }
403 
404  private:
405   ThreadCache* parent_thread_tcache_ = nullptr;
406   ThreadSafePartitionRoot* root_ = nullptr;
407   PartitionRoot<internal::ThreadSafe>::BucketDistribution bucket_distribution_;
408 };
409 
410 }  // namespace
411 
TEST_P(PartitionAllocThreadCacheTest,MultipleThreadCaches)412 TEST_P(PartitionAllocThreadCacheTest, MultipleThreadCaches) {
413   FillThreadCacheAndReturnIndex(kMediumSize);
414   auto* parent_thread_tcache = root()->thread_cache_for_testing();
415   ASSERT_TRUE(parent_thread_tcache);
416 
417   ThreadDelegateForMultipleThreadCaches delegate(parent_thread_tcache, root(),
418                                                  GetParam());
419 
420   internal::base::PlatformThreadHandle thread_handle;
421   internal::base::PlatformThreadForTesting::Create(0, &delegate,
422                                                    &thread_handle);
423   internal::base::PlatformThreadForTesting::Join(thread_handle);
424 }
425 
426 namespace {
427 
428 class ThreadDelegateForThreadCacheReclaimedWhenThreadExits
429     : public internal::base::PlatformThreadForTesting::Delegate {
430  public:
ThreadDelegateForThreadCacheReclaimedWhenThreadExits(ThreadSafePartitionRoot * root,void * & other_thread_ptr)431   ThreadDelegateForThreadCacheReclaimedWhenThreadExits(
432       ThreadSafePartitionRoot* root,
433       void*& other_thread_ptr)
434       : root_(root), other_thread_ptr_(other_thread_ptr) {}
435 
ThreadMain()436   void ThreadMain() override {
437     EXPECT_FALSE(root_->thread_cache_for_testing());  // No allocations yet.
438     other_thread_ptr_ = root_->Alloc(kMediumSize, "");
439     root_->Free(other_thread_ptr_);
440     // |other_thread_ptr| is now in the thread cache.
441   }
442 
443  private:
444   ThreadSafePartitionRoot* root_ = nullptr;
445   void*& other_thread_ptr_;
446 };
447 
448 }  // namespace
449 
TEST_P(PartitionAllocThreadCacheTest,ThreadCacheReclaimedWhenThreadExits)450 TEST_P(PartitionAllocThreadCacheTest, ThreadCacheReclaimedWhenThreadExits) {
451   // Make sure that there is always at least one object allocated in the test
452   // bucket, so that the PartitionPage is no reclaimed.
453   //
454   // Allocate enough objects to force a cache fill at the next allocation.
455   std::vector<void*> tmp;
456   for (size_t i = 0; i < kDefaultCountForMediumBucket / 4; i++) {
457     tmp.push_back(root()->Alloc(kMediumSize, ""));
458   }
459 
460   void* other_thread_ptr = nullptr;
461   ThreadDelegateForThreadCacheReclaimedWhenThreadExits delegate(
462       root(), other_thread_ptr);
463 
464   internal::base::PlatformThreadHandle thread_handle;
465   internal::base::PlatformThreadForTesting::Create(0, &delegate,
466                                                    &thread_handle);
467   internal::base::PlatformThreadForTesting::Join(thread_handle);
468 
469   void* this_thread_ptr = root()->Alloc(kMediumSize, "");
470   // |other_thread_ptr| was returned to the central allocator, and is returned
471   // here, as it comes from the freelist.
472   EXPECT_EQ(UntagPtr(this_thread_ptr), UntagPtr(other_thread_ptr));
473   root()->Free(other_thread_ptr);
474 
475   for (void* ptr : tmp) {
476     root()->Free(ptr);
477   }
478 }
479 
480 namespace {
481 
482 class ThreadDelegateForThreadCacheRegistry
483     : public internal::base::PlatformThreadForTesting::Delegate {
484  public:
ThreadDelegateForThreadCacheRegistry(ThreadCache * parent_thread_cache,ThreadSafePartitionRoot * root,BucketDistribution bucket_distribution)485   ThreadDelegateForThreadCacheRegistry(ThreadCache* parent_thread_cache,
486                                        ThreadSafePartitionRoot* root,
487                                        BucketDistribution bucket_distribution)
488       : parent_thread_tcache_(parent_thread_cache),
489         root_(root),
490         bucket_distribution_(bucket_distribution) {}
491 
ThreadMain()492   void ThreadMain() override {
493     EXPECT_FALSE(root_->thread_cache_for_testing());  // No allocations yet.
494     FillThreadCacheAndReturnIndex(root_, kSmallSize, bucket_distribution_);
495     auto* tcache = root_->thread_cache_for_testing();
496     EXPECT_TRUE(tcache);
497 
498     internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
499     EXPECT_EQ(tcache->prev_for_testing(), nullptr);
500     EXPECT_EQ(tcache->next_for_testing(), parent_thread_tcache_);
501   }
502 
503  private:
504   ThreadCache* parent_thread_tcache_ = nullptr;
505   ThreadSafePartitionRoot* root_ = nullptr;
506   BucketDistribution bucket_distribution_;
507 };
508 
509 }  // namespace
510 
TEST_P(PartitionAllocThreadCacheTest,ThreadCacheRegistry)511 TEST_P(PartitionAllocThreadCacheTest, ThreadCacheRegistry) {
512   auto* parent_thread_tcache = root()->thread_cache_for_testing();
513   ASSERT_TRUE(parent_thread_tcache);
514 
515   {
516     internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
517     EXPECT_EQ(parent_thread_tcache->prev_, nullptr);
518     EXPECT_EQ(parent_thread_tcache->next_, nullptr);
519   }
520 
521   ThreadDelegateForThreadCacheRegistry delegate(parent_thread_tcache, root(),
522                                                 GetParam());
523 
524   internal::base::PlatformThreadHandle thread_handle;
525   internal::base::PlatformThreadForTesting::Create(0, &delegate,
526                                                    &thread_handle);
527   internal::base::PlatformThreadForTesting::Join(thread_handle);
528 
529   internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
530   EXPECT_EQ(parent_thread_tcache->prev_, nullptr);
531   EXPECT_EQ(parent_thread_tcache->next_, nullptr);
532 }
533 
534 #if PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
TEST_P(PartitionAllocThreadCacheTest,RecordStats)535 TEST_P(PartitionAllocThreadCacheTest, RecordStats) {
536   auto* tcache = root()->thread_cache_for_testing();
537   DeltaCounter alloc_counter{tcache->stats_.alloc_count};
538   DeltaCounter alloc_hits_counter{tcache->stats_.alloc_hits};
539   DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
540 
541   DeltaCounter alloc_miss_empty_counter{tcache->stats_.alloc_miss_empty};
542 
543   DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
544   DeltaCounter cache_fill_hits_counter{tcache->stats_.cache_fill_hits};
545   DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
546 
547   // Cache has been purged, first allocation is a miss.
548   void* data = root()->Alloc(kMediumSize, "");
549   EXPECT_EQ(1u, alloc_counter.Delta());
550   EXPECT_EQ(1u, alloc_miss_counter.Delta());
551   EXPECT_EQ(0u, alloc_hits_counter.Delta());
552 
553   // Cache fill worked.
554   root()->Free(data);
555   EXPECT_EQ(1u, cache_fill_counter.Delta());
556   EXPECT_EQ(1u, cache_fill_hits_counter.Delta());
557   EXPECT_EQ(0u, cache_fill_misses_counter.Delta());
558 
559   tcache->Purge();
560   cache_fill_counter.Reset();
561   // Buckets are never full, fill always succeeds.
562   size_t allocations = 10;
563   size_t bucket_index = FillThreadCacheAndReturnIndex(
564       kMediumSize, kDefaultCountForMediumBucket + allocations);
565   EXPECT_EQ(kDefaultCountForMediumBucket + allocations,
566             cache_fill_counter.Delta());
567   EXPECT_EQ(0u, cache_fill_misses_counter.Delta());
568 
569   // Memory footprint.
570   ThreadCacheStats stats;
571   ThreadCacheRegistry::Instance().DumpStats(true, &stats);
572   // Bucket was cleared (set to kDefaultCountForMediumBucket / 2) after going
573   // above the limit (-1), then refilled by batches (1 + floor(allocations /
574   // kFillCountForSmallBucket) times).
575   size_t expected_count =
576       kDefaultCountForMediumBucket / 2 - 1 +
577       (1 + allocations / kFillCountForMediumBucket) * kFillCountForMediumBucket;
578   EXPECT_EQ(root()->buckets[bucket_index].slot_size * expected_count,
579             stats.bucket_total_memory);
580   EXPECT_EQ(sizeof(ThreadCache), stats.metadata_overhead);
581 }
582 
583 namespace {
584 
585 class ThreadDelegateForMultipleThreadCachesAccounting
586     : public internal::base::PlatformThreadForTesting::Delegate {
587  public:
ThreadDelegateForMultipleThreadCachesAccounting(ThreadSafePartitionRoot * root,int alloc_count,BucketDistribution bucket_distribution)588   ThreadDelegateForMultipleThreadCachesAccounting(
589       ThreadSafePartitionRoot* root,
590       int alloc_count,
591       BucketDistribution bucket_distribution)
592       : root_(root),
593         bucket_distribution_(bucket_distribution),
594         alloc_count_(alloc_count) {}
595 
ThreadMain()596   void ThreadMain() override {
597     EXPECT_FALSE(root_->thread_cache_for_testing());  // No allocations yet.
598     size_t bucket_index =
599         FillThreadCacheAndReturnIndex(root_, kMediumSize, bucket_distribution_);
600 
601     ThreadCacheStats stats;
602     ThreadCacheRegistry::Instance().DumpStats(false, &stats);
603     // 2* for this thread and the parent one.
604     EXPECT_EQ(
605         2 * root_->buckets[bucket_index].slot_size * kFillCountForMediumBucket,
606         stats.bucket_total_memory);
607     EXPECT_EQ(2 * sizeof(ThreadCache), stats.metadata_overhead);
608 
609     ThreadCacheStats this_thread_cache_stats{};
610     root_->thread_cache_for_testing()->AccumulateStats(
611         &this_thread_cache_stats);
612     EXPECT_EQ(alloc_count_ + this_thread_cache_stats.alloc_count,
613               stats.alloc_count);
614   }
615 
616  private:
617   ThreadSafePartitionRoot* root_ = nullptr;
618   BucketDistribution bucket_distribution_;
619   const int alloc_count_;
620 };
621 
622 }  // namespace
623 
TEST_P(PartitionAllocThreadCacheTest,MultipleThreadCachesAccounting)624 TEST_P(PartitionAllocThreadCacheTest, MultipleThreadCachesAccounting) {
625   FillThreadCacheAndReturnIndex(kMediumSize);
626   uint64_t alloc_count = root()->thread_cache_for_testing()->stats_.alloc_count;
627 
628   ThreadDelegateForMultipleThreadCachesAccounting delegate(root(), alloc_count,
629                                                            GetParam());
630 
631   internal::base::PlatformThreadHandle thread_handle;
632   internal::base::PlatformThreadForTesting::Create(0, &delegate,
633                                                    &thread_handle);
634   internal::base::PlatformThreadForTesting::Join(thread_handle);
635 }
636 
637 #endif  // PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
638 
639 // TODO(https://crbug.com/1287799): Flaky on IOS.
640 #if BUILDFLAG(IS_IOS)
641 #define MAYBE_PurgeAll DISABLED_PurgeAll
642 #else
643 #define MAYBE_PurgeAll PurgeAll
644 #endif
645 
646 namespace {
647 
648 class ThreadDelegateForPurgeAll
649     : public internal::base::PlatformThreadForTesting::Delegate {
650  public:
ThreadDelegateForPurgeAll(ThreadSafePartitionRoot * root,ThreadCache * & other_thread_tcache,std::atomic<bool> & other_thread_started,std::atomic<bool> & purge_called,int bucket_index,BucketDistribution bucket_distribution)651   ThreadDelegateForPurgeAll(ThreadSafePartitionRoot* root,
652                             ThreadCache*& other_thread_tcache,
653                             std::atomic<bool>& other_thread_started,
654                             std::atomic<bool>& purge_called,
655                             int bucket_index,
656                             BucketDistribution bucket_distribution)
657       : root_(root),
658         other_thread_tcache_(other_thread_tcache),
659         other_thread_started_(other_thread_started),
660         purge_called_(purge_called),
661         bucket_index_(bucket_index),
662         bucket_distribution_(bucket_distribution) {}
663 
ThreadMain()664   void ThreadMain() override PA_NO_THREAD_SAFETY_ANALYSIS {
665     FillThreadCacheAndReturnIndex(root_, kSmallSize, bucket_distribution_);
666     other_thread_tcache_ = root_->thread_cache_for_testing();
667 
668     other_thread_started_.store(true, std::memory_order_release);
669     while (!purge_called_.load(std::memory_order_acquire)) {
670     }
671 
672     // Purge() was not triggered from the other thread.
673     EXPECT_EQ(kFillCountForSmallBucket,
674               other_thread_tcache_->bucket_count_for_testing(bucket_index_));
675     // Allocations do not trigger Purge().
676     void* data = root_->Alloc(kSmallSize, "");
677     EXPECT_EQ(kFillCountForSmallBucket - 1,
678               other_thread_tcache_->bucket_count_for_testing(bucket_index_));
679     // But deallocations do.
680     root_->Free(data);
681     EXPECT_EQ(0u,
682               other_thread_tcache_->bucket_count_for_testing(bucket_index_));
683   }
684 
685  private:
686   ThreadSafePartitionRoot* root_ = nullptr;
687   ThreadCache*& other_thread_tcache_;
688   std::atomic<bool>& other_thread_started_;
689   std::atomic<bool>& purge_called_;
690   const int bucket_index_;
691   BucketDistribution bucket_distribution_;
692 };
693 
694 }  // namespace
695 
TEST_P(PartitionAllocThreadCacheTest,MAYBE_PurgeAll)696 TEST_P(PartitionAllocThreadCacheTest, MAYBE_PurgeAll)
697 PA_NO_THREAD_SAFETY_ANALYSIS {
698   std::atomic<bool> other_thread_started{false};
699   std::atomic<bool> purge_called{false};
700 
701   size_t bucket_index = FillThreadCacheAndReturnIndex(kSmallSize);
702   ThreadCache* this_thread_tcache = root()->thread_cache_for_testing();
703   ThreadCache* other_thread_tcache = nullptr;
704 
705   ThreadDelegateForPurgeAll delegate(root(), other_thread_tcache,
706                                      other_thread_started, purge_called,
707                                      bucket_index, GetParam());
708   internal::base::PlatformThreadHandle thread_handle;
709   internal::base::PlatformThreadForTesting::Create(0, &delegate,
710                                                    &thread_handle);
711 
712   while (!other_thread_started.load(std::memory_order_acquire)) {
713   }
714 
715   EXPECT_EQ(kFillCountForSmallBucket,
716             this_thread_tcache->bucket_count_for_testing(bucket_index));
717   EXPECT_EQ(kFillCountForSmallBucket,
718             other_thread_tcache->bucket_count_for_testing(bucket_index));
719 
720   ThreadCacheRegistry::Instance().PurgeAll();
721   // This thread is synchronously purged.
722   EXPECT_EQ(0u, this_thread_tcache->bucket_count_for_testing(bucket_index));
723   // Not the other one.
724   EXPECT_EQ(kFillCountForSmallBucket,
725             other_thread_tcache->bucket_count_for_testing(bucket_index));
726 
727   purge_called.store(true, std::memory_order_release);
728   internal::base::PlatformThreadForTesting::Join(thread_handle);
729 }
730 
TEST_P(PartitionAllocThreadCacheTest,PeriodicPurge)731 TEST_P(PartitionAllocThreadCacheTest, PeriodicPurge) {
732   auto& registry = ThreadCacheRegistry::Instance();
733   auto NextInterval = [&registry]() {
734     return internal::base::Microseconds(
735         registry.GetPeriodicPurgeNextIntervalInMicroseconds());
736   };
737 
738   EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kDefaultPurgeInterval);
739 
740   // Small amount of memory, the period gets longer.
741   auto* tcache = ThreadCache::Get();
742   ASSERT_LT(tcache->CachedMemory(),
743             ThreadCacheRegistry::kMinCachedMemoryForPurging);
744   registry.RunPeriodicPurge();
745   EXPECT_EQ(NextInterval(), 2 * ThreadCacheRegistry::kDefaultPurgeInterval);
746   registry.RunPeriodicPurge();
747   EXPECT_EQ(NextInterval(), 4 * ThreadCacheRegistry::kDefaultPurgeInterval);
748 
749   // Check that the purge interval is clamped at the maximum value.
750   while (NextInterval() < ThreadCacheRegistry::kMaxPurgeInterval) {
751     registry.RunPeriodicPurge();
752   }
753   registry.RunPeriodicPurge();
754 
755   // Not enough memory to decrease the interval.
756   FillThreadCacheWithMemory(ThreadCacheRegistry::kMinCachedMemoryForPurging +
757                             1);
758   registry.RunPeriodicPurge();
759   EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval);
760 
761   FillThreadCacheWithMemory(
762       2 * ThreadCacheRegistry::kMinCachedMemoryForPurging + 1);
763   registry.RunPeriodicPurge();
764   EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval / 2);
765 
766   // Enough memory, interval doesn't change.
767   FillThreadCacheWithMemory(ThreadCacheRegistry::kMinCachedMemoryForPurging);
768   registry.RunPeriodicPurge();
769   EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval / 2);
770 
771   // No cached memory, increase the interval.
772   registry.RunPeriodicPurge();
773   EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval);
774 
775   // Cannot test the very large size with only one thread, this is tested below
776   // in the multiple threads test.
777 }
778 
779 namespace {
780 
FillThreadCacheWithMemory(ThreadSafePartitionRoot * root,size_t target_cached_memory,BucketDistribution bucket_distribution)781 void FillThreadCacheWithMemory(ThreadSafePartitionRoot* root,
782                                size_t target_cached_memory,
783                                BucketDistribution bucket_distribution) {
784   for (int batch : {1, 2, 4, 8, 16}) {
785     for (size_t allocation_size = 1;
786          allocation_size <= ThreadCache::kLargeSizeThreshold;
787          allocation_size++) {
788       FillThreadCacheAndReturnIndex(root, allocation_size, bucket_distribution,
789                                     batch);
790 
791       if (ThreadCache::Get()->CachedMemory() >= target_cached_memory) {
792         return;
793       }
794     }
795   }
796 
797   ASSERT_GE(ThreadCache::Get()->CachedMemory(), target_cached_memory);
798 }
799 
800 class ThreadDelegateForPeriodicPurgeSumsOverAllThreads
801     : public internal::base::PlatformThreadForTesting::Delegate {
802  public:
ThreadDelegateForPeriodicPurgeSumsOverAllThreads(ThreadSafePartitionRoot * root,std::atomic<int> & allocations_done,std::atomic<bool> & can_finish,BucketDistribution bucket_distribution)803   ThreadDelegateForPeriodicPurgeSumsOverAllThreads(
804       ThreadSafePartitionRoot* root,
805       std::atomic<int>& allocations_done,
806       std::atomic<bool>& can_finish,
807       BucketDistribution bucket_distribution)
808       : root_(root),
809         allocations_done_(allocations_done),
810         can_finish_(can_finish),
811         bucket_distribution_(bucket_distribution) {}
812 
ThreadMain()813   void ThreadMain() override {
814     FillThreadCacheWithMemory(
815         root_, 5 * ThreadCacheRegistry::kMinCachedMemoryForPurging,
816         bucket_distribution_);
817     allocations_done_.fetch_add(1, std::memory_order_release);
818 
819     // This thread needs to be alive when the next periodic purge task runs.
820     while (!can_finish_.load(std::memory_order_acquire)) {
821     }
822   }
823 
824  private:
825   ThreadSafePartitionRoot* root_ = nullptr;
826   std::atomic<int>& allocations_done_;
827   std::atomic<bool>& can_finish_;
828   BucketDistribution bucket_distribution_;
829 };
830 
831 }  // namespace
832 
833 // Disabled due to flakiness: crbug.com/1220371
TEST_P(PartitionAllocThreadCacheTest,DISABLED_PeriodicPurgeSumsOverAllThreads)834 TEST_P(PartitionAllocThreadCacheTest,
835        DISABLED_PeriodicPurgeSumsOverAllThreads) {
836   auto& registry = ThreadCacheRegistry::Instance();
837   auto NextInterval = [&registry]() {
838     return internal::base::Microseconds(
839         registry.GetPeriodicPurgeNextIntervalInMicroseconds());
840   };
841   EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kDefaultPurgeInterval);
842 
843   // Small amount of memory, the period gets longer.
844   auto* tcache = ThreadCache::Get();
845   ASSERT_LT(tcache->CachedMemory(),
846             ThreadCacheRegistry::kMinCachedMemoryForPurging);
847   registry.RunPeriodicPurge();
848   EXPECT_EQ(NextInterval(), 2 * ThreadCacheRegistry::kDefaultPurgeInterval);
849   registry.RunPeriodicPurge();
850   EXPECT_EQ(NextInterval(), 4 * ThreadCacheRegistry::kDefaultPurgeInterval);
851 
852   // Check that the purge interval is clamped at the maximum value.
853   while (NextInterval() < ThreadCacheRegistry::kMaxPurgeInterval) {
854     registry.RunPeriodicPurge();
855   }
856   registry.RunPeriodicPurge();
857 
858   // Not enough memory on this thread to decrease the interval.
859   FillThreadCacheWithMemory(ThreadCacheRegistry::kMinCachedMemoryForPurging /
860                             2);
861   registry.RunPeriodicPurge();
862   EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval);
863 
864   std::atomic<int> allocations_done{0};
865   std::atomic<bool> can_finish{false};
866   ThreadDelegateForPeriodicPurgeSumsOverAllThreads delegate(
867       root(), allocations_done, can_finish, GetParam());
868 
869   internal::base::PlatformThreadHandle thread_handle;
870   internal::base::PlatformThreadForTesting::Create(0, &delegate,
871                                                    &thread_handle);
872   internal::base::PlatformThreadHandle thread_handle_2;
873   internal::base::PlatformThreadForTesting::Create(0, &delegate,
874                                                    &thread_handle_2);
875 
876   while (allocations_done.load(std::memory_order_acquire) != 2) {
877     internal::base::PlatformThreadForTesting::YieldCurrentThread();
878   }
879 
880   // Many allocations on the other thread.
881   registry.RunPeriodicPurge();
882   EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kDefaultPurgeInterval);
883 
884   can_finish.store(true, std::memory_order_release);
885   internal::base::PlatformThreadForTesting::Join(thread_handle);
886   internal::base::PlatformThreadForTesting::Join(thread_handle_2);
887 }
888 
889 // TODO(https://crbug.com/1287799): Flaky on IOS.
890 #if BUILDFLAG(IS_IOS)
891 #define MAYBE_DynamicCountPerBucket DISABLED_DynamicCountPerBucket
892 #else
893 #define MAYBE_DynamicCountPerBucket DynamicCountPerBucket
894 #endif
TEST_P(PartitionAllocThreadCacheTest,MAYBE_DynamicCountPerBucket)895 TEST_P(PartitionAllocThreadCacheTest, MAYBE_DynamicCountPerBucket) {
896   auto* tcache = root()->thread_cache_for_testing();
897   size_t bucket_index =
898       FillThreadCacheAndReturnIndex(kMediumSize, kDefaultCountForMediumBucket);
899 
900   EXPECT_EQ(kDefaultCountForMediumBucket, tcache->buckets_[bucket_index].count);
901 
902   ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
903       ThreadCache::kDefaultMultiplier / 2);
904   // No immediate batch deallocation.
905   EXPECT_EQ(kDefaultCountForMediumBucket, tcache->buckets_[bucket_index].count);
906   void* data = root()->Alloc(kMediumSize, "");
907   // Not triggered by allocations.
908   EXPECT_EQ(kDefaultCountForMediumBucket - 1,
909             tcache->buckets_[bucket_index].count);
910 
911   // Free() triggers the purge within limits.
912   root()->Free(data);
913   EXPECT_LE(tcache->buckets_[bucket_index].count,
914             kDefaultCountForMediumBucket / 2);
915 
916   // Won't go above anymore.
917   FillThreadCacheAndReturnIndex(kMediumSize, 1000);
918   EXPECT_LE(tcache->buckets_[bucket_index].count,
919             kDefaultCountForMediumBucket / 2);
920 
921   // Limit can be raised.
922   ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
923       ThreadCache::kDefaultMultiplier * 2);
924   FillThreadCacheAndReturnIndex(kMediumSize, 1000);
925   EXPECT_GT(tcache->buckets_[bucket_index].count,
926             kDefaultCountForMediumBucket / 2);
927 }
928 
TEST_P(PartitionAllocThreadCacheTest,DynamicCountPerBucketClamping)929 TEST_P(PartitionAllocThreadCacheTest, DynamicCountPerBucketClamping) {
930   auto* tcache = root()->thread_cache_for_testing();
931 
932   ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
933       ThreadCache::kDefaultMultiplier / 1000.);
934   for (size_t i = 0; i < ThreadCache::kBucketCount; i++) {
935     // Invalid bucket.
936     if (!tcache->buckets_[i].limit.load(std::memory_order_relaxed)) {
937       EXPECT_EQ(root()->buckets[i].active_slot_spans_head, nullptr);
938       continue;
939     }
940     EXPECT_GE(tcache->buckets_[i].limit.load(std::memory_order_relaxed), 1u);
941   }
942 
943   ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
944       ThreadCache::kDefaultMultiplier * 1000.);
945   for (size_t i = 0; i < ThreadCache::kBucketCount; i++) {
946     // Invalid bucket.
947     if (!tcache->buckets_[i].limit.load(std::memory_order_relaxed)) {
948       EXPECT_EQ(root()->buckets[i].active_slot_spans_head, nullptr);
949       continue;
950     }
951     EXPECT_LT(tcache->buckets_[i].limit.load(std::memory_order_relaxed), 0xff);
952   }
953 }
954 
955 // TODO(https://crbug.com/1287799): Flaky on IOS.
956 #if BUILDFLAG(IS_IOS)
957 #define MAYBE_DynamicCountPerBucketMultipleThreads \
958   DISABLED_DynamicCountPerBucketMultipleThreads
959 #else
960 #define MAYBE_DynamicCountPerBucketMultipleThreads \
961   DynamicCountPerBucketMultipleThreads
962 #endif
963 
964 namespace {
965 
966 class ThreadDelegateForDynamicCountPerBucketMultipleThreads
967     : public internal::base::PlatformThreadForTesting::Delegate {
968  public:
ThreadDelegateForDynamicCountPerBucketMultipleThreads(ThreadSafePartitionRoot * root,std::atomic<bool> & other_thread_started,std::atomic<bool> & threshold_changed,int bucket_index,BucketDistribution bucket_distribution)969   ThreadDelegateForDynamicCountPerBucketMultipleThreads(
970       ThreadSafePartitionRoot* root,
971       std::atomic<bool>& other_thread_started,
972       std::atomic<bool>& threshold_changed,
973       int bucket_index,
974       BucketDistribution bucket_distribution)
975       : root_(root),
976         other_thread_started_(other_thread_started),
977         threshold_changed_(threshold_changed),
978         bucket_index_(bucket_index),
979         bucket_distribution_(bucket_distribution) {}
980 
ThreadMain()981   void ThreadMain() override {
982     FillThreadCacheAndReturnIndex(root_, kSmallSize, bucket_distribution_,
983                                   kDefaultCountForSmallBucket + 10);
984     auto* this_thread_tcache = root_->thread_cache_for_testing();
985     // More than the default since the multiplier has changed.
986     EXPECT_GT(this_thread_tcache->bucket_count_for_testing(bucket_index_),
987               kDefaultCountForSmallBucket + 10);
988 
989     other_thread_started_.store(true, std::memory_order_release);
990     while (!threshold_changed_.load(std::memory_order_acquire)) {
991     }
992 
993     void* data = root_->Alloc(kSmallSize, "");
994     // Deallocations trigger limit enforcement.
995     root_->Free(data);
996     // Since the bucket is too full, it gets halved by batched deallocation.
997     EXPECT_EQ(static_cast<uint8_t>(ThreadCache::kSmallBucketBaseCount / 2),
998               this_thread_tcache->bucket_count_for_testing(bucket_index_));
999   }
1000 
1001  private:
1002   ThreadSafePartitionRoot* root_ = nullptr;
1003   std::atomic<bool>& other_thread_started_;
1004   std::atomic<bool>& threshold_changed_;
1005   const int bucket_index_;
1006   PartitionRoot<internal::ThreadSafe>::BucketDistribution bucket_distribution_;
1007 };
1008 
1009 }  // namespace
1010 
TEST_P(PartitionAllocThreadCacheTest,MAYBE_DynamicCountPerBucketMultipleThreads)1011 TEST_P(PartitionAllocThreadCacheTest,
1012        MAYBE_DynamicCountPerBucketMultipleThreads) {
1013   std::atomic<bool> other_thread_started{false};
1014   std::atomic<bool> threshold_changed{false};
1015 
1016   auto* tcache = root()->thread_cache_for_testing();
1017   size_t bucket_index =
1018       FillThreadCacheAndReturnIndex(kSmallSize, kDefaultCountForSmallBucket);
1019   EXPECT_EQ(kDefaultCountForSmallBucket, tcache->buckets_[bucket_index].count);
1020 
1021   // Change the ratio before starting the threads, checking that it will applied
1022   // to newly-created threads.
1023   ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
1024       ThreadCache::kDefaultMultiplier + 1);
1025 
1026   ThreadDelegateForDynamicCountPerBucketMultipleThreads delegate(
1027       root(), other_thread_started, threshold_changed, bucket_index,
1028       GetParam());
1029 
1030   internal::base::PlatformThreadHandle thread_handle;
1031   internal::base::PlatformThreadForTesting::Create(0, &delegate,
1032                                                    &thread_handle);
1033 
1034   while (!other_thread_started.load(std::memory_order_acquire)) {
1035   }
1036 
1037   ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(1.);
1038   threshold_changed.store(true, std::memory_order_release);
1039 
1040   internal::base::PlatformThreadForTesting::Join(thread_handle);
1041 }
1042 
TEST_P(PartitionAllocThreadCacheTest,DynamicSizeThreshold)1043 TEST_P(PartitionAllocThreadCacheTest, DynamicSizeThreshold) {
1044   auto* tcache = root()->thread_cache_for_testing();
1045   DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
1046   DeltaCounter alloc_miss_too_large_counter{
1047       tcache->stats_.alloc_miss_too_large};
1048   DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
1049   DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
1050 
1051   // Default threshold at first.
1052   ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
1053   FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold);
1054 
1055   EXPECT_EQ(0u, alloc_miss_too_large_counter.Delta());
1056   EXPECT_EQ(1u, cache_fill_counter.Delta());
1057 
1058   // Too large to be cached.
1059   FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
1060   EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
1061 
1062   // Increase.
1063   ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
1064   FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
1065   // No new miss.
1066   EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
1067 
1068   // Lower.
1069   ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
1070   FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
1071   EXPECT_EQ(2u, alloc_miss_too_large_counter.Delta());
1072 
1073   // Value is clamped.
1074   size_t too_large = 1024 * 1024;
1075   ThreadCache::SetLargestCachedSize(too_large);
1076   FillThreadCacheAndReturnIndex(too_large);
1077   EXPECT_EQ(3u, alloc_miss_too_large_counter.Delta());
1078 }
1079 
1080 // Disabled due to flakiness: crbug.com/1287811
TEST_P(PartitionAllocThreadCacheTest,DISABLED_DynamicSizeThresholdPurge)1081 TEST_P(PartitionAllocThreadCacheTest, DISABLED_DynamicSizeThresholdPurge) {
1082   auto* tcache = root()->thread_cache_for_testing();
1083   DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
1084   DeltaCounter alloc_miss_too_large_counter{
1085       tcache->stats_.alloc_miss_too_large};
1086   DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
1087   DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
1088 
1089   // Cache large allocations.
1090   size_t large_allocation_size = ThreadCache::kLargeSizeThreshold;
1091   ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
1092   size_t index = FillThreadCacheAndReturnIndex(large_allocation_size);
1093   EXPECT_EQ(0u, alloc_miss_too_large_counter.Delta());
1094 
1095   // Lower.
1096   ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
1097   FillThreadCacheAndReturnIndex(large_allocation_size);
1098   EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
1099 
1100   // There is memory trapped in the cache bucket.
1101   EXPECT_GT(tcache->buckets_[index].count, 0u);
1102 
1103   // Which is reclaimed by Purge().
1104   tcache->Purge();
1105   EXPECT_EQ(0u, tcache->buckets_[index].count);
1106 }
1107 
TEST_P(PartitionAllocThreadCacheTest,ClearFromTail)1108 TEST_P(PartitionAllocThreadCacheTest, ClearFromTail) {
1109   auto count_items = [](ThreadCache* tcache, size_t index) {
1110     uint8_t count = 0;
1111     auto* head = tcache->buckets_[index].freelist_head;
1112     while (head) {
1113       head =
1114           head->GetNextForThreadCache<true>(tcache->buckets_[index].slot_size);
1115       count++;
1116     }
1117     return count;
1118   };
1119 
1120   auto* tcache = root()->thread_cache_for_testing();
1121   size_t index = FillThreadCacheAndReturnIndex(kSmallSize, 10);
1122   ASSERT_GE(count_items(tcache, index), 10);
1123   void* head = tcache->buckets_[index].freelist_head;
1124 
1125   for (size_t limit : {8, 3, 1}) {
1126     tcache->ClearBucket(tcache->buckets_[index], limit);
1127     EXPECT_EQ(head, static_cast<void*>(tcache->buckets_[index].freelist_head));
1128     EXPECT_EQ(count_items(tcache, index), limit);
1129   }
1130   tcache->ClearBucket(tcache->buckets_[index], 0);
1131   EXPECT_EQ(nullptr, static_cast<void*>(tcache->buckets_[index].freelist_head));
1132 }
1133 
1134 // TODO(https://crbug.com/1287799): Flaky on IOS.
1135 #if BUILDFLAG(IS_IOS)
1136 #define MAYBE_Bookkeeping DISABLED_Bookkeeping
1137 #else
1138 #define MAYBE_Bookkeeping Bookkeeping
1139 #endif
TEST_P(PartitionAllocThreadCacheTest,MAYBE_Bookkeeping)1140 TEST_P(PartitionAllocThreadCacheTest, MAYBE_Bookkeeping) {
1141   void* arr[kFillCountForMediumBucket] = {};
1142   auto* tcache = root()->thread_cache_for_testing();
1143 
1144   root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
1145                       PurgeFlags::kDiscardUnusedSystemPages);
1146   root()->ResetBookkeepingForTesting();
1147 
1148   // The ThreadCache is allocated before we change buckets, so its size is
1149   // always based on the sparser distribution.
1150   size_t tc_bucket_index = root()->SizeToBucketIndex(
1151       sizeof(ThreadCache),
1152       ThreadSafePartitionRoot::BucketDistribution::kDefault);
1153   auto* tc_bucket = &root()->buckets[tc_bucket_index];
1154   size_t expected_allocated_size =
1155       tc_bucket->slot_size;  // For the ThreadCache itself.
1156   size_t expected_committed_size = kUseLazyCommit
1157                                        ? internal::SystemPageSize()
1158                                        : tc_bucket->get_bytes_per_span();
1159 
1160   EXPECT_EQ(expected_committed_size, root()->total_size_of_committed_pages);
1161   EXPECT_EQ(expected_committed_size, root()->max_size_of_committed_pages);
1162   EXPECT_EQ(expected_allocated_size,
1163             root()->get_total_size_of_allocated_bytes());
1164   EXPECT_EQ(expected_allocated_size, root()->get_max_size_of_allocated_bytes());
1165 
1166   void* ptr = root()->Alloc(kMediumSize, "");
1167 
1168   auto* medium_bucket = root()->buckets + SizeToIndex(kMediumSize);
1169   size_t medium_alloc_size = medium_bucket->slot_size;
1170   expected_allocated_size += medium_alloc_size;
1171   expected_committed_size += kUseLazyCommit
1172                                  ? internal::SystemPageSize()
1173                                  : medium_bucket->get_bytes_per_span();
1174 
1175   EXPECT_EQ(expected_committed_size, root()->total_size_of_committed_pages);
1176   EXPECT_EQ(expected_committed_size, root()->max_size_of_committed_pages);
1177   EXPECT_EQ(expected_allocated_size,
1178             root()->get_total_size_of_allocated_bytes());
1179   EXPECT_EQ(expected_allocated_size, root()->get_max_size_of_allocated_bytes());
1180 
1181   expected_allocated_size += kFillCountForMediumBucket * medium_alloc_size;
1182 
1183   // These allocations all come from the thread-cache.
1184   for (size_t i = 0; i < kFillCountForMediumBucket; i++) {
1185     arr[i] = root()->Alloc(kMediumSize, "");
1186     EXPECT_EQ(expected_committed_size, root()->total_size_of_committed_pages);
1187     EXPECT_EQ(expected_committed_size, root()->max_size_of_committed_pages);
1188     EXPECT_EQ(expected_allocated_size,
1189               root()->get_total_size_of_allocated_bytes());
1190     EXPECT_EQ(expected_allocated_size,
1191               root()->get_max_size_of_allocated_bytes());
1192     EXPECT_EQ((kFillCountForMediumBucket - 1 - i) * medium_alloc_size,
1193               tcache->CachedMemory());
1194   }
1195 
1196   EXPECT_EQ(0U, tcache->CachedMemory());
1197 
1198   root()->Free(ptr);
1199 
1200   for (auto*& el : arr) {
1201     root()->Free(el);
1202   }
1203   EXPECT_EQ(root()->get_total_size_of_allocated_bytes(),
1204             expected_allocated_size);
1205   tcache->Purge();
1206   EXPECT_EQ(root()->get_total_size_of_allocated_bytes(),
1207             GetBucketSizeForThreadCache());
1208 }
1209 
TEST_P(PartitionAllocThreadCacheTest,TryPurgeNoAllocs)1210 TEST_P(PartitionAllocThreadCacheTest, TryPurgeNoAllocs) {
1211   auto* tcache = root()->thread_cache_for_testing();
1212   tcache->TryPurge();
1213 }
1214 
TEST_P(PartitionAllocThreadCacheTest,TryPurgeMultipleCorrupted)1215 TEST_P(PartitionAllocThreadCacheTest, TryPurgeMultipleCorrupted) {
1216   auto* tcache = root()->thread_cache_for_testing();
1217 
1218   void* ptr = root()->Alloc(kMediumSize, "");
1219 
1220   auto* medium_bucket = root()->buckets + SizeToIndex(kMediumSize);
1221 
1222   auto* curr = medium_bucket->active_slot_spans_head->get_freelist_head();
1223   curr = curr->GetNextForThreadCache<true>(kMediumSize);
1224   curr->CorruptNextForTesting(0x12345678);
1225   tcache->TryPurge();
1226   curr->SetNext(nullptr);
1227   root()->Free(ptr);
1228 }
1229 
TEST(AlternateBucketDistributionTest,SizeToIndex)1230 TEST(AlternateBucketDistributionTest, SizeToIndex) {
1231   using internal::BucketIndexLookup;
1232 
1233   // The first 12 buckets are the same as the default bucket index.
1234   for (size_t i = 1 << 0; i < 1 << 8; i <<= 1) {
1235     for (size_t offset = 0; offset < 4; offset++) {
1236       size_t n = i * (4 + offset) / 4;
1237       EXPECT_EQ(BucketIndexLookup::GetIndex(n),
1238                 BucketIndexLookup::GetIndexForDefaultBuckets(n));
1239     }
1240   }
1241 
1242   // The alternate bucket distribution is different in the middle values.
1243   //
1244   // For each order, the top two buckets are removed compared with the default
1245   // distribution. Values that would be allocated in those two buckets are
1246   // instead allocated in the next power of two bucket.
1247   //
1248   // The first two buckets (each power of two and the next bucket up) remain
1249   // the same between the two bucket distributions.
1250   size_t expected_index = BucketIndexLookup::GetIndex(1 << 8);
1251   for (size_t i = 1 << 8; i < internal::kHighThresholdForAlternateDistribution;
1252        i <<= 1) {
1253     // The first two buckets in the order should match up to the normal bucket
1254     // distribution.
1255     for (size_t offset = 0; offset < 2; offset++) {
1256       size_t n = i * (4 + offset) / 4;
1257       EXPECT_EQ(BucketIndexLookup::GetIndex(n),
1258                 BucketIndexLookup::GetIndexForDefaultBuckets(n));
1259       EXPECT_EQ(BucketIndexLookup::GetIndex(n), expected_index);
1260       expected_index += 2;
1261     }
1262     // The last two buckets in the order are "rounded up" to the same bucket
1263     // as the next power of two.
1264     expected_index += 4;
1265     for (size_t offset = 2; offset < 4; offset++) {
1266       size_t n = i * (4 + offset) / 4;
1267       // These two are rounded up in the alternate distribution, so we expect
1268       // the bucket index to be larger than the bucket index for the same
1269       // allocation under the default distribution.
1270       EXPECT_GT(BucketIndexLookup::GetIndex(n),
1271                 BucketIndexLookup::GetIndexForDefaultBuckets(n));
1272       // We expect both allocations in this loop to be rounded up to the next
1273       // power of two bucket.
1274       EXPECT_EQ(BucketIndexLookup::GetIndex(n), expected_index);
1275     }
1276   }
1277 
1278   // The rest of the buckets all match up exactly with the existing
1279   // bucket distribution.
1280   for (size_t i = internal::kHighThresholdForAlternateDistribution;
1281        i < internal::kMaxBucketed; i <<= 1) {
1282     for (size_t offset = 0; offset < 4; offset++) {
1283       size_t n = i * (4 + offset) / 4;
1284       EXPECT_EQ(BucketIndexLookup::GetIndex(n),
1285                 BucketIndexLookup::GetIndexForDefaultBuckets(n));
1286     }
1287   }
1288 }
1289 
TEST_P(PartitionAllocThreadCacheTest,AllocationRecording)1290 TEST_P(PartitionAllocThreadCacheTest, AllocationRecording) {
1291   // There is a cache.
1292   auto* tcache = root()->thread_cache_for_testing();
1293   EXPECT_TRUE(tcache);
1294   tcache->ResetPerThreadAllocationStatsForTesting();
1295 
1296   constexpr size_t kBucketedNotCached = 1 << 12;
1297   constexpr size_t kDirectMapped = 4 * (1 << 20);
1298   // Not a "nice" size on purpose, to check that the raw size accounting works.
1299   const size_t kSingleSlot = internal::PartitionPageSize() + 1;
1300 
1301   size_t expected_total_size = 0;
1302   void* ptr = root()->Alloc(kSmallSize, "");
1303   ASSERT_TRUE(ptr);
1304   expected_total_size += root()->GetUsableSize(ptr);
1305   void* ptr2 = root()->Alloc(kBucketedNotCached, "");
1306   ASSERT_TRUE(ptr2);
1307   expected_total_size += root()->GetUsableSize(ptr2);
1308   void* ptr3 = root()->Alloc(kDirectMapped, "");
1309   ASSERT_TRUE(ptr3);
1310   expected_total_size += root()->GetUsableSize(ptr3);
1311   void* ptr4 = root()->Alloc(kSingleSlot, "");
1312   ASSERT_TRUE(ptr4);
1313   expected_total_size += root()->GetUsableSize(ptr4);
1314 
1315   EXPECT_EQ(4u, tcache->thread_alloc_stats().alloc_count);
1316   EXPECT_EQ(expected_total_size, tcache->thread_alloc_stats().alloc_total_size);
1317 
1318   root()->Free(ptr);
1319   root()->Free(ptr2);
1320   root()->Free(ptr3);
1321   root()->Free(ptr4);
1322 
1323   EXPECT_EQ(4u, tcache->thread_alloc_stats().alloc_count);
1324   EXPECT_EQ(expected_total_size, tcache->thread_alloc_stats().alloc_total_size);
1325   EXPECT_EQ(4u, tcache->thread_alloc_stats().dealloc_count);
1326   EXPECT_EQ(expected_total_size,
1327             tcache->thread_alloc_stats().dealloc_total_size);
1328 
1329   auto stats = internal::GetAllocStatsForCurrentThread();
1330   EXPECT_EQ(4u, stats.alloc_count);
1331   EXPECT_EQ(expected_total_size, stats.alloc_total_size);
1332   EXPECT_EQ(4u, stats.dealloc_count);
1333   EXPECT_EQ(expected_total_size, stats.dealloc_total_size);
1334 }
1335 
TEST_P(PartitionAllocThreadCacheTest,AllocationRecordingAligned)1336 TEST_P(PartitionAllocThreadCacheTest, AllocationRecordingAligned) {
1337   // There is a cache.
1338   auto* tcache = root()->thread_cache_for_testing();
1339   EXPECT_TRUE(tcache);
1340   tcache->ResetPerThreadAllocationStatsForTesting();
1341 
1342   // Aligned allocations take different paths depending on whether they are (in
1343   // the same order as the test cases below):
1344   // - Not really aligned (since alignment is always good-enough)
1345   // - Already satisfied by PA's alignment guarantees
1346   // - Requiring extra padding
1347   // - Already satisfied by PA's alignment guarantees
1348   // - In need of a special slot span (very large alignment)
1349   // - Direct-mapped with large alignment
1350   size_t alloc_count = 0;
1351   size_t total_size = 0;
1352   size_t size_alignments[][2] = {{128, 4},
1353                                  {128, 128},
1354                                  {1024, 128},
1355                                  {128, 1024},
1356                                  {128, 2 * internal::PartitionPageSize()},
1357                                  {(4 << 20) + 1, 1 << 19}};
1358   for (auto [requested_size, alignment] : size_alignments) {
1359     void* ptr = root()->AlignedAllocWithFlags(0, alignment, requested_size);
1360     ASSERT_TRUE(ptr);
1361     alloc_count++;
1362     total_size += root()->GetUsableSize(ptr);
1363     EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().alloc_count);
1364     EXPECT_EQ(total_size, tcache->thread_alloc_stats().alloc_total_size);
1365     root()->Free(ptr);
1366     EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().dealloc_count);
1367     EXPECT_EQ(total_size, tcache->thread_alloc_stats().dealloc_total_size);
1368   }
1369 
1370   EXPECT_EQ(tcache->thread_alloc_stats().alloc_total_size,
1371             tcache->thread_alloc_stats().dealloc_total_size);
1372 
1373   auto stats = internal::GetAllocStatsForCurrentThread();
1374   EXPECT_EQ(alloc_count, stats.alloc_count);
1375   EXPECT_EQ(total_size, stats.alloc_total_size);
1376   EXPECT_EQ(alloc_count, stats.dealloc_count);
1377   EXPECT_EQ(total_size, stats.dealloc_total_size);
1378 }
1379 
TEST_P(PartitionAllocThreadCacheTest,AllocationRecordingRealloc)1380 TEST_P(PartitionAllocThreadCacheTest, AllocationRecordingRealloc) {
1381   // There is a cache.
1382   auto* tcache = root()->thread_cache_for_testing();
1383   EXPECT_TRUE(tcache);
1384   tcache->ResetPerThreadAllocationStatsForTesting();
1385 
1386   size_t alloc_count = 0;
1387   size_t dealloc_count = 0;
1388   size_t total_alloc_size = 0;
1389   size_t total_dealloc_size = 0;
1390   size_t size_new_sizes[][2] = {
1391       {16, 15},
1392       {16, 64},
1393       {16, internal::PartitionPageSize() + 1},
1394       {4 << 20, 8 << 20},
1395       {8 << 20, 4 << 20},
1396       {(8 << 20) - internal::SystemPageSize(), 8 << 20}};
1397   for (auto [size, new_size] : size_new_sizes) {
1398     void* ptr = root()->Alloc(size, "");
1399     ASSERT_TRUE(ptr);
1400     alloc_count++;
1401     size_t usable_size = root()->GetUsableSize(ptr);
1402     total_alloc_size += usable_size;
1403 
1404     ptr = root()->Realloc(ptr, new_size, "");
1405     ASSERT_TRUE(ptr);
1406     total_dealloc_size += usable_size;
1407     dealloc_count++;
1408     usable_size = root()->GetUsableSize(ptr);
1409     total_alloc_size += usable_size;
1410     alloc_count++;
1411 
1412     EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().alloc_count);
1413     EXPECT_EQ(total_alloc_size, tcache->thread_alloc_stats().alloc_total_size);
1414     EXPECT_EQ(dealloc_count, tcache->thread_alloc_stats().dealloc_count);
1415     EXPECT_EQ(total_dealloc_size,
1416               tcache->thread_alloc_stats().dealloc_total_size)
1417         << new_size;
1418 
1419     root()->Free(ptr);
1420     dealloc_count++;
1421     total_dealloc_size += usable_size;
1422 
1423     EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().alloc_count);
1424     EXPECT_EQ(total_alloc_size, tcache->thread_alloc_stats().alloc_total_size);
1425     EXPECT_EQ(dealloc_count, tcache->thread_alloc_stats().dealloc_count);
1426     EXPECT_EQ(total_dealloc_size,
1427               tcache->thread_alloc_stats().dealloc_total_size);
1428   }
1429   EXPECT_EQ(tcache->thread_alloc_stats().alloc_total_size,
1430             tcache->thread_alloc_stats().dealloc_total_size);
1431 }
1432 
1433 // This test makes sure it's safe to switch to the alternate bucket distribution
1434 // at runtime. This is intended to happen once, near the start of Chrome,
1435 // once we have enabled features.
TEST(AlternateBucketDistributionTest,SwitchBeforeAlloc)1436 TEST(AlternateBucketDistributionTest, SwitchBeforeAlloc) {
1437   std::unique_ptr<PartitionAllocatorForTesting> allocator(CreateAllocator());
1438   ThreadSafePartitionRoot* root = allocator->root();
1439 
1440   root->SwitchToDenserBucketDistribution();
1441   constexpr size_t n = (1 << 12) * 3 / 2;
1442   EXPECT_NE(internal::BucketIndexLookup::GetIndex(n),
1443             internal::BucketIndexLookup::GetIndexForDefaultBuckets(n));
1444 
1445   void* ptr = root->Alloc(n, "");
1446 
1447   root->ResetBucketDistributionForTesting();
1448 
1449   root->Free(ptr);
1450 }
1451 
1452 // This test makes sure it's safe to switch to the alternate bucket distribution
1453 // at runtime. This is intended to happen once, near the start of Chrome,
1454 // once we have enabled features.
TEST(AlternateBucketDistributionTest,SwitchAfterAlloc)1455 TEST(AlternateBucketDistributionTest, SwitchAfterAlloc) {
1456   std::unique_ptr<PartitionAllocatorForTesting> allocator(CreateAllocator());
1457   constexpr size_t n = (1 << 12) * 3 / 2;
1458   EXPECT_NE(internal::BucketIndexLookup::GetIndex(n),
1459             internal::BucketIndexLookup::GetIndexForDefaultBuckets(n));
1460 
1461   ThreadSafePartitionRoot* root = allocator->root();
1462   void* ptr = root->Alloc(n, "");
1463 
1464   root->SwitchToDenserBucketDistribution();
1465 
1466   void* ptr2 = root->Alloc(n, "");
1467 
1468   root->Free(ptr2);
1469   root->Free(ptr);
1470 }
1471 
1472 }  // namespace partition_alloc
1473 
1474 #endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) &&
1475         // PA_CONFIG(THREAD_CACHE_SUPPORTED)
1476