• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "partition_alloc/thread_cache.h"
6 
7 #include <algorithm>
8 #include <atomic>
9 #include <vector>
10 
11 #include "build/build_config.h"
12 #include "partition_alloc/extended_api.h"
13 #include "partition_alloc/partition_address_space.h"
14 #include "partition_alloc/partition_alloc_base/thread_annotations.h"
15 #include "partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
16 #include "partition_alloc/partition_alloc_buildflags.h"
17 #include "partition_alloc/partition_alloc_config.h"
18 #include "partition_alloc/partition_alloc_for_testing.h"
19 #include "partition_alloc/partition_lock.h"
20 #include "partition_alloc/partition_root.h"
21 #include "partition_alloc/tagging.h"
22 #include "testing/gtest/include/gtest/gtest.h"
23 
24 // With *SAN, PartitionAlloc is replaced in partition_alloc.h by ASAN, so we
25 // cannot test the thread cache.
26 //
27 // Finally, the thread cache is not supported on all platforms.
28 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && \
29     PA_CONFIG(THREAD_CACHE_SUPPORTED)
30 
31 namespace partition_alloc {
32 
33 using BucketDistribution = PartitionRoot::BucketDistribution;
34 namespace {
35 
36 constexpr size_t kSmallSize = 33;  // Must be large enough to fit extras.
37 constexpr size_t kDefaultCountForSmallBucket =
38     ThreadCache::kSmallBucketBaseCount * ThreadCache::kDefaultMultiplier;
39 constexpr size_t kFillCountForSmallBucket =
40     kDefaultCountForSmallBucket / ThreadCache::kBatchFillRatio;
41 
42 constexpr size_t kMediumSize = 200;
43 constexpr size_t kDefaultCountForMediumBucket = kDefaultCountForSmallBucket / 2;
44 constexpr size_t kFillCountForMediumBucket =
45     kDefaultCountForMediumBucket / ThreadCache::kBatchFillRatio;
46 
47 static_assert(kMediumSize <= ThreadCache::kDefaultSizeThreshold, "");
48 
49 class DeltaCounter {
50  public:
DeltaCounter(uint64_t & value)51   explicit DeltaCounter(uint64_t& value)
52       : current_value_(value), initial_value_(value) {}
Reset()53   void Reset() { initial_value_ = current_value_; }
Delta() const54   uint64_t Delta() const { return current_value_ - initial_value_; }
55 
56  private:
57   uint64_t& current_value_;
58   uint64_t initial_value_;
59 };
60 
61 // Forbid extras, since they make finding out which bucket is used harder.
CreateAllocator()62 std::unique_ptr<PartitionAllocatorForTesting> CreateAllocator() {
63   PartitionOptions opts;
64   opts.aligned_alloc = PartitionOptions::kAllowed;
65 #if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
66   opts.thread_cache = PartitionOptions::kEnabled;
67 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
68   opts.star_scan_quarantine = PartitionOptions::kAllowed;
69   std::unique_ptr<PartitionAllocatorForTesting> allocator =
70       std::make_unique<PartitionAllocatorForTesting>(opts);
71   allocator->root()->UncapEmptySlotSpanMemoryForTesting();
72 
73   return allocator;
74 }
75 
76 }  // namespace
77 
78 class PartitionAllocThreadCacheTest
79     : public ::testing::TestWithParam<PartitionRoot::BucketDistribution> {
80  public:
PartitionAllocThreadCacheTest()81   PartitionAllocThreadCacheTest()
82       : allocator_(CreateAllocator()), scope_(allocator_->root()) {}
83 
~PartitionAllocThreadCacheTest()84   ~PartitionAllocThreadCacheTest() override {
85     ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
86 
87     // Cleanup the global state so next test can recreate ThreadCache.
88     if (ThreadCache::IsTombstone(ThreadCache::Get())) {
89       ThreadCache::RemoveTombstoneForTesting();
90     }
91   }
92 
93  protected:
SetUp()94   void SetUp() override {
95     PartitionRoot* root = allocator_->root();
96     switch (GetParam()) {
97       case BucketDistribution::kNeutral:
98         root->ResetBucketDistributionForTesting();
99         break;
100       case BucketDistribution::kDenser:
101         root->SwitchToDenserBucketDistribution();
102         break;
103     }
104 
105     ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
106         ThreadCache::kDefaultMultiplier);
107     ThreadCacheRegistry::Instance().SetPurgingConfiguration(
108         kMinPurgeInterval, kMaxPurgeInterval, kDefaultPurgeInterval,
109         kMinCachedMemoryForPurgingBytes);
110     ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
111 
112     // Make sure that enough slot spans have been touched, otherwise cache fill
113     // becomes unpredictable (because it doesn't take slow paths in the
114     // allocator), which is an issue for tests.
115     FillThreadCacheAndReturnIndex(kSmallSize, 1000);
116     FillThreadCacheAndReturnIndex(kMediumSize, 1000);
117 
118     // There are allocations, a thread cache is created.
119     auto* tcache = root->thread_cache_for_testing();
120     ASSERT_TRUE(tcache);
121 
122     ThreadCacheRegistry::Instance().ResetForTesting();
123     tcache->ResetForTesting();
124   }
125 
TearDown()126   void TearDown() override {
127     auto* tcache = root()->thread_cache_for_testing();
128     ASSERT_TRUE(tcache);
129     tcache->Purge();
130 
131     ASSERT_EQ(root()->get_total_size_of_allocated_bytes(),
132               GetBucketSizeForThreadCache());
133   }
134 
root()135   PartitionRoot* root() { return allocator_->root(); }
136 
137   // Returns the size of the smallest bucket fitting an allocation of
138   // |sizeof(ThreadCache)| bytes.
GetBucketSizeForThreadCache()139   size_t GetBucketSizeForThreadCache() {
140     size_t tc_bucket_index = root()->SizeToBucketIndex(
141         sizeof(ThreadCache), PartitionRoot::BucketDistribution::kNeutral);
142     auto* tc_bucket = &root()->buckets[tc_bucket_index];
143     return tc_bucket->slot_size;
144   }
145 
SizeToIndex(size_t size)146   static size_t SizeToIndex(size_t size) {
147     return PartitionRoot::SizeToBucketIndex(size, GetParam());
148   }
149 
FillThreadCacheAndReturnIndex(size_t raw_size,size_t count=1)150   size_t FillThreadCacheAndReturnIndex(size_t raw_size, size_t count = 1) {
151     uint16_t bucket_index = SizeToIndex(raw_size);
152     std::vector<void*> allocated_data;
153 
154     for (size_t i = 0; i < count; ++i) {
155       allocated_data.push_back(
156           root()->Alloc(root()->AdjustSizeForExtrasSubtract(raw_size), ""));
157     }
158     for (void* ptr : allocated_data) {
159       root()->Free(ptr);
160     }
161 
162     return bucket_index;
163   }
164 
FillThreadCacheWithMemory(size_t target_cached_memory)165   void FillThreadCacheWithMemory(size_t target_cached_memory) {
166     for (int batch : {1, 2, 4, 8, 16}) {
167       for (size_t raw_size = root()->AdjustSizeForExtrasAdd(1);
168            raw_size <= ThreadCache::kLargeSizeThreshold; raw_size++) {
169         FillThreadCacheAndReturnIndex(raw_size, batch);
170 
171         if (ThreadCache::Get()->CachedMemory() >= target_cached_memory) {
172           return;
173         }
174       }
175     }
176 
177     ASSERT_GE(ThreadCache::Get()->CachedMemory(), target_cached_memory);
178   }
179 
180   std::unique_ptr<PartitionAllocatorForTesting> allocator_;
181   internal::ThreadCacheProcessScopeForTesting scope_;
182 };
183 
184 INSTANTIATE_TEST_SUITE_P(AlternateBucketDistribution,
185                          PartitionAllocThreadCacheTest,
186                          ::testing::Values(BucketDistribution::kNeutral,
187                                            BucketDistribution::kDenser));
188 
TEST_P(PartitionAllocThreadCacheTest,Simple)189 TEST_P(PartitionAllocThreadCacheTest, Simple) {
190   // There is a cache.
191   auto* tcache = root()->thread_cache_for_testing();
192   EXPECT_TRUE(tcache);
193   DeltaCounter batch_fill_counter(tcache->stats_for_testing().batch_fill_count);
194 
195   void* ptr =
196       root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
197   ASSERT_TRUE(ptr);
198 
199   uint16_t index = SizeToIndex(kSmallSize);
200   EXPECT_EQ(kFillCountForSmallBucket - 1,
201             tcache->bucket_count_for_testing(index));
202 
203   root()->Free(ptr);
204   // Freeing fills the thread cache.
205   EXPECT_EQ(kFillCountForSmallBucket, tcache->bucket_count_for_testing(index));
206 
207   void* ptr2 =
208       root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
209   // MTE-untag, because Free() changes tag.
210   EXPECT_EQ(UntagPtr(ptr), UntagPtr(ptr2));
211   // Allocated from the thread cache.
212   EXPECT_EQ(kFillCountForSmallBucket - 1,
213             tcache->bucket_count_for_testing(index));
214 
215   EXPECT_EQ(1u, batch_fill_counter.Delta());
216 
217   root()->Free(ptr2);
218 }
219 
TEST_P(PartitionAllocThreadCacheTest,InexactSizeMatch)220 TEST_P(PartitionAllocThreadCacheTest, InexactSizeMatch) {
221   void* ptr =
222       root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
223   ASSERT_TRUE(ptr);
224 
225   // There is a cache.
226   auto* tcache = root()->thread_cache_for_testing();
227   EXPECT_TRUE(tcache);
228 
229   uint16_t index = SizeToIndex(kSmallSize);
230   EXPECT_EQ(kFillCountForSmallBucket - 1,
231             tcache->bucket_count_for_testing(index));
232 
233   root()->Free(ptr);
234   // Freeing fills the thread cache.
235   EXPECT_EQ(kFillCountForSmallBucket, tcache->bucket_count_for_testing(index));
236 
237   void* ptr2 =
238       root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize + 1), "");
239   // MTE-untag, because Free() changes tag.
240   EXPECT_EQ(UntagPtr(ptr), UntagPtr(ptr2));
241   // Allocated from the thread cache.
242   EXPECT_EQ(kFillCountForSmallBucket - 1,
243             tcache->bucket_count_for_testing(index));
244   root()->Free(ptr2);
245 }
246 
TEST_P(PartitionAllocThreadCacheTest,MultipleObjectsCachedPerBucket)247 TEST_P(PartitionAllocThreadCacheTest, MultipleObjectsCachedPerBucket) {
248   auto* tcache = root()->thread_cache_for_testing();
249   DeltaCounter batch_fill_counter{tcache->stats_for_testing().batch_fill_count};
250   size_t bucket_index =
251       FillThreadCacheAndReturnIndex(kMediumSize, kFillCountForMediumBucket + 2);
252   EXPECT_EQ(2 * kFillCountForMediumBucket,
253             tcache->bucket_count_for_testing(bucket_index));
254   // 2 batches, since there were more than |kFillCountForMediumBucket|
255   // allocations.
256   EXPECT_EQ(2u, batch_fill_counter.Delta());
257 }
258 
TEST_P(PartitionAllocThreadCacheTest,ObjectsCachedCountIsLimited)259 TEST_P(PartitionAllocThreadCacheTest, ObjectsCachedCountIsLimited) {
260   size_t bucket_index = FillThreadCacheAndReturnIndex(kMediumSize, 1000);
261   auto* tcache = root()->thread_cache_for_testing();
262   EXPECT_LT(tcache->bucket_count_for_testing(bucket_index), 1000u);
263 }
264 
TEST_P(PartitionAllocThreadCacheTest,Purge)265 TEST_P(PartitionAllocThreadCacheTest, Purge) {
266   size_t allocations = 10;
267   size_t bucket_index = FillThreadCacheAndReturnIndex(kMediumSize, allocations);
268   auto* tcache = root()->thread_cache_for_testing();
269   EXPECT_EQ(
270       (1 + allocations / kFillCountForMediumBucket) * kFillCountForMediumBucket,
271       tcache->bucket_count_for_testing(bucket_index));
272   tcache->Purge();
273   EXPECT_EQ(0u, tcache->bucket_count_for_testing(bucket_index));
274 }
275 
TEST_P(PartitionAllocThreadCacheTest,NoCrossPartitionCache)276 TEST_P(PartitionAllocThreadCacheTest, NoCrossPartitionCache) {
277   PartitionOptions opts;
278   opts.aligned_alloc = PartitionOptions::kAllowed;
279   opts.star_scan_quarantine = PartitionOptions::kAllowed;
280   PartitionAllocatorForTesting allocator(opts);
281 
282   size_t bucket_index = FillThreadCacheAndReturnIndex(kSmallSize);
283   void* ptr = allocator.root()->Alloc(
284       allocator.root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
285   ASSERT_TRUE(ptr);
286 
287   auto* tcache = root()->thread_cache_for_testing();
288   EXPECT_EQ(kFillCountForSmallBucket,
289             tcache->bucket_count_for_testing(bucket_index));
290 
291   allocator.root()->Free(ptr);
292   EXPECT_EQ(kFillCountForSmallBucket,
293             tcache->bucket_count_for_testing(bucket_index));
294 }
295 
296 // Required to record hits and misses.
297 #if PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
TEST_P(PartitionAllocThreadCacheTest,LargeAllocationsAreNotCached)298 TEST_P(PartitionAllocThreadCacheTest, LargeAllocationsAreNotCached) {
299   auto* tcache = root()->thread_cache_for_testing();
300   DeltaCounter alloc_miss_counter{tcache->stats_for_testing().alloc_misses};
301   DeltaCounter alloc_miss_too_large_counter{
302       tcache->stats_for_testing().alloc_miss_too_large};
303   DeltaCounter cache_fill_counter{tcache->stats_for_testing().cache_fill_count};
304   DeltaCounter cache_fill_misses_counter{
305       tcache->stats_for_testing().cache_fill_misses};
306 
307   FillThreadCacheAndReturnIndex(100 * 1024);
308   tcache = root()->thread_cache_for_testing();
309   EXPECT_EQ(1u, alloc_miss_counter.Delta());
310   EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
311   EXPECT_EQ(1u, cache_fill_counter.Delta());
312   EXPECT_EQ(1u, cache_fill_misses_counter.Delta());
313 }
314 #endif  // PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
315 
TEST_P(PartitionAllocThreadCacheTest,DirectMappedAllocationsAreNotCached)316 TEST_P(PartitionAllocThreadCacheTest, DirectMappedAllocationsAreNotCached) {
317   FillThreadCacheAndReturnIndex(1024 * 1024);
318   // The line above would crash due to out of bounds access if this wasn't
319   // properly handled.
320 }
321 
322 // This tests that Realloc properly handles bookkeeping, specifically the path
323 // that reallocates in place.
TEST_P(PartitionAllocThreadCacheTest,DirectMappedReallocMetrics)324 TEST_P(PartitionAllocThreadCacheTest, DirectMappedReallocMetrics) {
325   root()->ResetBookkeepingForTesting();
326 
327   size_t expected_allocated_size = root()->get_total_size_of_allocated_bytes();
328 
329   EXPECT_EQ(expected_allocated_size,
330             root()->get_total_size_of_allocated_bytes());
331   EXPECT_EQ(expected_allocated_size, root()->get_max_size_of_allocated_bytes());
332 
333   void* ptr = root()->Alloc(
334       root()->AdjustSizeForExtrasSubtract(10 * internal::kMaxBucketed), "");
335 
336   EXPECT_EQ(expected_allocated_size + 10 * internal::kMaxBucketed,
337             root()->get_total_size_of_allocated_bytes());
338 
339   void* ptr2 = root()->Realloc(
340       ptr, root()->AdjustSizeForExtrasSubtract(9 * internal::kMaxBucketed), "");
341 
342   ASSERT_EQ(ptr, ptr2);
343   EXPECT_EQ(expected_allocated_size + 9 * internal::kMaxBucketed,
344             root()->get_total_size_of_allocated_bytes());
345 
346   ptr2 = root()->Realloc(
347       ptr, root()->AdjustSizeForExtrasSubtract(10 * internal::kMaxBucketed),
348       "");
349 
350   ASSERT_EQ(ptr, ptr2);
351   EXPECT_EQ(expected_allocated_size + 10 * internal::kMaxBucketed,
352             root()->get_total_size_of_allocated_bytes());
353 
354   root()->Free(ptr);
355 }
356 
357 namespace {
358 
FillThreadCacheAndReturnIndex(PartitionRoot * root,size_t size,BucketDistribution bucket_distribution,size_t count=1)359 size_t FillThreadCacheAndReturnIndex(PartitionRoot* root,
360                                      size_t size,
361                                      BucketDistribution bucket_distribution,
362                                      size_t count = 1) {
363   uint16_t bucket_index =
364       PartitionRoot::SizeToBucketIndex(size, bucket_distribution);
365   std::vector<void*> allocated_data;
366 
367   for (size_t i = 0; i < count; ++i) {
368     allocated_data.push_back(
369         root->Alloc(root->AdjustSizeForExtrasSubtract(size), ""));
370   }
371   for (void* ptr : allocated_data) {
372     root->Free(ptr);
373   }
374 
375   return bucket_index;
376 }
377 
378 // TODO(1151236): To remove callback from partition allocator's DEPS,
379 // rewrite the tests without BindLambdaForTesting and RepeatingClosure.
380 // However this makes a little annoying to add more tests using their
381 // own threads. Need to support an easier way to implement tests using
382 // PlatformThreadForTesting::Create().
383 class ThreadDelegateForMultipleThreadCaches
384     : public internal::base::PlatformThreadForTesting::Delegate {
385  public:
ThreadDelegateForMultipleThreadCaches(ThreadCache * parent_thread_cache,PartitionRoot * root,BucketDistribution bucket_distribution)386   ThreadDelegateForMultipleThreadCaches(ThreadCache* parent_thread_cache,
387                                         PartitionRoot* root,
388                                         BucketDistribution bucket_distribution)
389       : parent_thread_tcache_(parent_thread_cache),
390         root_(root),
391         bucket_distribution_(bucket_distribution) {}
392 
ThreadMain()393   void ThreadMain() override {
394     EXPECT_FALSE(root_->thread_cache_for_testing());  // No allocations yet.
395     FillThreadCacheAndReturnIndex(root_, kMediumSize, bucket_distribution_);
396     auto* tcache = root_->thread_cache_for_testing();
397     EXPECT_TRUE(tcache);
398 
399     EXPECT_NE(parent_thread_tcache_, tcache);
400   }
401 
402  private:
403   ThreadCache* parent_thread_tcache_ = nullptr;
404   PartitionRoot* root_ = nullptr;
405   PartitionRoot::BucketDistribution bucket_distribution_;
406 };
407 
408 }  // namespace
409 
TEST_P(PartitionAllocThreadCacheTest,MultipleThreadCaches)410 TEST_P(PartitionAllocThreadCacheTest, MultipleThreadCaches) {
411   FillThreadCacheAndReturnIndex(kMediumSize);
412   auto* parent_thread_tcache = root()->thread_cache_for_testing();
413   ASSERT_TRUE(parent_thread_tcache);
414 
415   ThreadDelegateForMultipleThreadCaches delegate(parent_thread_tcache, root(),
416                                                  GetParam());
417 
418   internal::base::PlatformThreadHandle thread_handle;
419   internal::base::PlatformThreadForTesting::Create(0, &delegate,
420                                                    &thread_handle);
421   internal::base::PlatformThreadForTesting::Join(thread_handle);
422 }
423 
424 namespace {
425 
426 class ThreadDelegateForThreadCacheReclaimedWhenThreadExits
427     : public internal::base::PlatformThreadForTesting::Delegate {
428  public:
ThreadDelegateForThreadCacheReclaimedWhenThreadExits(PartitionRoot * root,void * & other_thread_ptr)429   ThreadDelegateForThreadCacheReclaimedWhenThreadExits(PartitionRoot* root,
430                                                        void*& other_thread_ptr)
431       : root_(root), other_thread_ptr_(other_thread_ptr) {}
432 
ThreadMain()433   void ThreadMain() override {
434     EXPECT_FALSE(root_->thread_cache_for_testing());  // No allocations yet.
435     other_thread_ptr_ =
436         root_->Alloc(root_->AdjustSizeForExtrasSubtract(kMediumSize), "");
437     root_->Free(other_thread_ptr_);
438     // |other_thread_ptr| is now in the thread cache.
439   }
440 
441  private:
442   PartitionRoot* root_ = nullptr;
443   void*& other_thread_ptr_;
444 };
445 
446 }  // namespace
447 
TEST_P(PartitionAllocThreadCacheTest,ThreadCacheReclaimedWhenThreadExits)448 TEST_P(PartitionAllocThreadCacheTest, ThreadCacheReclaimedWhenThreadExits) {
449   // Make sure that there is always at least one object allocated in the test
450   // bucket, so that the PartitionPage is no reclaimed.
451   //
452   // Allocate enough objects to force a cache fill at the next allocation.
453   std::vector<void*> tmp;
454   for (size_t i = 0; i < kDefaultCountForMediumBucket / 4; i++) {
455     tmp.push_back(
456         root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), ""));
457   }
458 
459   void* other_thread_ptr = nullptr;
460   ThreadDelegateForThreadCacheReclaimedWhenThreadExits delegate(
461       root(), other_thread_ptr);
462 
463   internal::base::PlatformThreadHandle thread_handle;
464   internal::base::PlatformThreadForTesting::Create(0, &delegate,
465                                                    &thread_handle);
466   internal::base::PlatformThreadForTesting::Join(thread_handle);
467 
468   void* this_thread_ptr =
469       root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
470   // |other_thread_ptr| was returned to the central allocator, and is returned
471   // here, as it comes from the freelist.
472   EXPECT_EQ(UntagPtr(this_thread_ptr), UntagPtr(other_thread_ptr));
473   root()->Free(other_thread_ptr);
474 
475   for (void* ptr : tmp) {
476     root()->Free(ptr);
477   }
478 }
479 
480 namespace {
481 
482 class ThreadDelegateForThreadCacheRegistry
483     : public internal::base::PlatformThreadForTesting::Delegate {
484  public:
ThreadDelegateForThreadCacheRegistry(ThreadCache * parent_thread_cache,PartitionRoot * root,BucketDistribution bucket_distribution)485   ThreadDelegateForThreadCacheRegistry(ThreadCache* parent_thread_cache,
486                                        PartitionRoot* root,
487                                        BucketDistribution bucket_distribution)
488       : parent_thread_tcache_(parent_thread_cache),
489         root_(root),
490         bucket_distribution_(bucket_distribution) {}
491 
ThreadMain()492   void ThreadMain() override {
493     EXPECT_FALSE(root_->thread_cache_for_testing());  // No allocations yet.
494     FillThreadCacheAndReturnIndex(root_, kSmallSize, bucket_distribution_);
495     auto* tcache = root_->thread_cache_for_testing();
496     EXPECT_TRUE(tcache);
497 
498     internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
499     EXPECT_EQ(tcache->prev_for_testing(), nullptr);
500     EXPECT_EQ(tcache->next_for_testing(), parent_thread_tcache_);
501   }
502 
503  private:
504   ThreadCache* parent_thread_tcache_ = nullptr;
505   PartitionRoot* root_ = nullptr;
506   BucketDistribution bucket_distribution_;
507 };
508 
509 }  // namespace
510 
TEST_P(PartitionAllocThreadCacheTest,ThreadCacheRegistry)511 TEST_P(PartitionAllocThreadCacheTest, ThreadCacheRegistry) {
512   auto* parent_thread_tcache = root()->thread_cache_for_testing();
513   ASSERT_TRUE(parent_thread_tcache);
514 
515 #if !(BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) ||   \
516       BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)) && \
517     BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
518   // iOS and MacOS 15 create worker threads internally(start_wqthread).
519   // So thread caches are created for the worker threads, because the threads
520   // allocate memory for initialization (_dispatch_calloc is invoked).
521   // We cannot assume that there is only 1 thread cache here.
522 
523   // Regarding Linux, ChromeOS and Android, some other tests may create
524   // non-joinable threads. E.g. FilePathWatcherTest will create
525   // non-joinable thread at InotifyReader::StartThread(). The thread will
526   // be still running after the tests are finished, and will break
527   // an assumption that there exists only main thread here.
528   {
529     internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
530     EXPECT_EQ(parent_thread_tcache->prev_for_testing(), nullptr);
531     EXPECT_EQ(parent_thread_tcache->next_for_testing(), nullptr);
532   }
533 #endif
534 
535   ThreadDelegateForThreadCacheRegistry delegate(parent_thread_tcache, root(),
536                                                 GetParam());
537 
538   internal::base::PlatformThreadHandle thread_handle;
539   internal::base::PlatformThreadForTesting::Create(0, &delegate,
540                                                    &thread_handle);
541   internal::base::PlatformThreadForTesting::Join(thread_handle);
542 
543 #if !(BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) ||   \
544       BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)) && \
545     BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
546   internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
547   EXPECT_EQ(parent_thread_tcache->prev_for_testing(), nullptr);
548   EXPECT_EQ(parent_thread_tcache->next_for_testing(), nullptr);
549 #endif
550 }
551 
552 #if PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
TEST_P(PartitionAllocThreadCacheTest,RecordStats)553 TEST_P(PartitionAllocThreadCacheTest, RecordStats) {
554   auto* tcache = root()->thread_cache_for_testing();
555   DeltaCounter alloc_counter{tcache->stats_for_testing().alloc_count};
556   DeltaCounter alloc_hits_counter{tcache->stats_for_testing().alloc_hits};
557   DeltaCounter alloc_miss_counter{tcache->stats_for_testing().alloc_misses};
558 
559   DeltaCounter alloc_miss_empty_counter{
560       tcache->stats_for_testing().alloc_miss_empty};
561 
562   DeltaCounter cache_fill_counter{tcache->stats_for_testing().cache_fill_count};
563   DeltaCounter cache_fill_hits_counter{
564       tcache->stats_for_testing().cache_fill_hits};
565   DeltaCounter cache_fill_misses_counter{
566       tcache->stats_for_testing().cache_fill_misses};
567 
568   // Cache has been purged, first allocation is a miss.
569   void* data =
570       root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
571   EXPECT_EQ(1u, alloc_counter.Delta());
572   EXPECT_EQ(1u, alloc_miss_counter.Delta());
573   EXPECT_EQ(0u, alloc_hits_counter.Delta());
574 
575   // Cache fill worked.
576   root()->Free(data);
577   EXPECT_EQ(1u, cache_fill_counter.Delta());
578   EXPECT_EQ(1u, cache_fill_hits_counter.Delta());
579   EXPECT_EQ(0u, cache_fill_misses_counter.Delta());
580 
581   tcache->Purge();
582   cache_fill_counter.Reset();
583   // Buckets are never full, fill always succeeds.
584   size_t allocations = 10;
585   size_t bucket_index = FillThreadCacheAndReturnIndex(
586       kMediumSize, kDefaultCountForMediumBucket + allocations);
587   EXPECT_EQ(kDefaultCountForMediumBucket + allocations,
588             cache_fill_counter.Delta());
589   EXPECT_EQ(0u, cache_fill_misses_counter.Delta());
590 
591   // Memory footprint.
592   ThreadCacheStats stats;
593   ThreadCacheRegistry::Instance().DumpStats(true, &stats);
594   // Bucket was cleared (set to kDefaultCountForMediumBucket / 2) after going
595   // above the limit (-1), then refilled by batches (1 + floor(allocations /
596   // kFillCountForSmallBucket) times).
597   size_t expected_count =
598       kDefaultCountForMediumBucket / 2 - 1 +
599       (1 + allocations / kFillCountForMediumBucket) * kFillCountForMediumBucket;
600   EXPECT_EQ(root()->buckets[bucket_index].slot_size * expected_count,
601             stats.bucket_total_memory);
602   EXPECT_EQ(sizeof(ThreadCache), stats.metadata_overhead);
603 }
604 
605 namespace {
606 
607 class ThreadDelegateForMultipleThreadCachesAccounting
608     : public internal::base::PlatformThreadForTesting::Delegate {
609  public:
ThreadDelegateForMultipleThreadCachesAccounting(PartitionRoot * root,const ThreadCacheStats & wqthread_stats,int alloc_count,BucketDistribution bucket_distribution)610   ThreadDelegateForMultipleThreadCachesAccounting(
611       PartitionRoot* root,
612       const ThreadCacheStats& wqthread_stats,
613       int alloc_count,
614       BucketDistribution bucket_distribution)
615       : root_(root),
616         bucket_distribution_(bucket_distribution),
617         wqthread_stats_(wqthread_stats),
618         alloc_count_(alloc_count) {}
619 
ThreadMain()620   void ThreadMain() override {
621     EXPECT_FALSE(root_->thread_cache_for_testing());  // No allocations yet.
622     size_t bucket_index =
623         FillThreadCacheAndReturnIndex(root_, kMediumSize, bucket_distribution_);
624 
625     ThreadCacheStats stats;
626     ThreadCacheRegistry::Instance().DumpStats(false, &stats);
627     // 2* for this thread and the parent one.
628     EXPECT_EQ(
629         2 * root_->buckets[bucket_index].slot_size * kFillCountForMediumBucket,
630         stats.bucket_total_memory - wqthread_stats_.bucket_total_memory);
631     EXPECT_EQ(2 * sizeof(ThreadCache),
632               stats.metadata_overhead - wqthread_stats_.metadata_overhead);
633 
634     ThreadCacheStats this_thread_cache_stats{};
635     root_->thread_cache_for_testing()->AccumulateStats(
636         &this_thread_cache_stats);
637     EXPECT_EQ(alloc_count_ + this_thread_cache_stats.alloc_count,
638               stats.alloc_count - wqthread_stats_.alloc_count);
639   }
640 
641  private:
642   PartitionRoot* root_ = nullptr;
643   BucketDistribution bucket_distribution_;
644   const ThreadCacheStats wqthread_stats_;
645   const int alloc_count_;
646 };
647 
648 }  // namespace
649 
TEST_P(PartitionAllocThreadCacheTest,MultipleThreadCachesAccounting)650 TEST_P(PartitionAllocThreadCacheTest, MultipleThreadCachesAccounting) {
651   ThreadCacheStats wqthread_stats{0};
652 #if (BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS) || \
653      BUILDFLAG(IS_LINUX)) &&                                                   \
654     BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
655   {
656     // iOS and MacOS 15 create worker threads internally(start_wqthread).
657     // So thread caches are created for the worker threads, because the threads
658     // allocate memory for initialization (_dispatch_calloc is invoked).
659     // We need to count worker threads created by iOS and Mac system.
660 
661     // Regarding Linux, ChromeOS and Android, some other tests may create
662     // non-joinable threads. E.g. FilePathWatcherTest will create
663     // non-joinable thread at InotifyReader::StartThread(). The thread will
664     // be still running after the tests are finished. We need to count
665     // the joinable threads here.
666     ThreadCacheRegistry::Instance().DumpStats(false, &wqthread_stats);
667 
668     // Remove this thread's thread cache stats from wqthread_stats.
669     ThreadCacheStats this_stats;
670     ThreadCacheRegistry::Instance().DumpStats(true, &this_stats);
671 
672     wqthread_stats.alloc_count -= this_stats.alloc_count;
673     wqthread_stats.metadata_overhead -= this_stats.metadata_overhead;
674     wqthread_stats.bucket_total_memory -= this_stats.bucket_total_memory;
675   }
676 #endif
677   FillThreadCacheAndReturnIndex(kMediumSize);
678   uint64_t alloc_count =
679       root()->thread_cache_for_testing()->stats_for_testing().alloc_count;
680 
681   ThreadDelegateForMultipleThreadCachesAccounting delegate(
682       root(), wqthread_stats, alloc_count, GetParam());
683 
684   internal::base::PlatformThreadHandle thread_handle;
685   internal::base::PlatformThreadForTesting::Create(0, &delegate,
686                                                    &thread_handle);
687   internal::base::PlatformThreadForTesting::Join(thread_handle);
688 }
689 
690 #endif  // PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
691 
692 // TODO(https://crbug.com/1287799): Flaky on IOS.
693 #if BUILDFLAG(IS_IOS)
694 #define MAYBE_PurgeAll DISABLED_PurgeAll
695 #else
696 #define MAYBE_PurgeAll PurgeAll
697 #endif
698 
699 namespace {
700 
701 class ThreadDelegateForPurgeAll
702     : public internal::base::PlatformThreadForTesting::Delegate {
703  public:
ThreadDelegateForPurgeAll(PartitionRoot * root,ThreadCache * & other_thread_tcache,std::atomic<bool> & other_thread_started,std::atomic<bool> & purge_called,int bucket_index,BucketDistribution bucket_distribution)704   ThreadDelegateForPurgeAll(PartitionRoot* root,
705                             ThreadCache*& other_thread_tcache,
706                             std::atomic<bool>& other_thread_started,
707                             std::atomic<bool>& purge_called,
708                             int bucket_index,
709                             BucketDistribution bucket_distribution)
710       : root_(root),
711         other_thread_tcache_(other_thread_tcache),
712         other_thread_started_(other_thread_started),
713         purge_called_(purge_called),
714         bucket_index_(bucket_index),
715         bucket_distribution_(bucket_distribution) {}
716 
ThreadMain()717   void ThreadMain() override PA_NO_THREAD_SAFETY_ANALYSIS {
718     FillThreadCacheAndReturnIndex(root_, kSmallSize, bucket_distribution_);
719     other_thread_tcache_ = root_->thread_cache_for_testing();
720 
721     other_thread_started_.store(true, std::memory_order_release);
722     while (!purge_called_.load(std::memory_order_acquire)) {
723     }
724 
725     // Purge() was not triggered from the other thread.
726     EXPECT_EQ(kFillCountForSmallBucket,
727               other_thread_tcache_->bucket_count_for_testing(bucket_index_));
728     // Allocations do not trigger Purge().
729     void* data =
730         root_->Alloc(root_->AdjustSizeForExtrasSubtract(kSmallSize), "");
731     EXPECT_EQ(kFillCountForSmallBucket - 1,
732               other_thread_tcache_->bucket_count_for_testing(bucket_index_));
733     // But deallocations do.
734     root_->Free(data);
735     EXPECT_EQ(0u,
736               other_thread_tcache_->bucket_count_for_testing(bucket_index_));
737   }
738 
739  private:
740   PartitionRoot* root_ = nullptr;
741   ThreadCache*& other_thread_tcache_;
742   std::atomic<bool>& other_thread_started_;
743   std::atomic<bool>& purge_called_;
744   const int bucket_index_;
745   BucketDistribution bucket_distribution_;
746 };
747 
748 }  // namespace
749 
TEST_P(PartitionAllocThreadCacheTest,MAYBE_PurgeAll)750 TEST_P(PartitionAllocThreadCacheTest, MAYBE_PurgeAll)
751 PA_NO_THREAD_SAFETY_ANALYSIS {
752   std::atomic<bool> other_thread_started{false};
753   std::atomic<bool> purge_called{false};
754 
755   size_t bucket_index = FillThreadCacheAndReturnIndex(kSmallSize);
756   ThreadCache* this_thread_tcache = root()->thread_cache_for_testing();
757   ThreadCache* other_thread_tcache = nullptr;
758 
759   ThreadDelegateForPurgeAll delegate(root(), other_thread_tcache,
760                                      other_thread_started, purge_called,
761                                      bucket_index, GetParam());
762   internal::base::PlatformThreadHandle thread_handle;
763   internal::base::PlatformThreadForTesting::Create(0, &delegate,
764                                                    &thread_handle);
765 
766   while (!other_thread_started.load(std::memory_order_acquire)) {
767   }
768 
769   EXPECT_EQ(kFillCountForSmallBucket,
770             this_thread_tcache->bucket_count_for_testing(bucket_index));
771   EXPECT_EQ(kFillCountForSmallBucket,
772             other_thread_tcache->bucket_count_for_testing(bucket_index));
773 
774   ThreadCacheRegistry::Instance().PurgeAll();
775   // This thread is synchronously purged.
776   EXPECT_EQ(0u, this_thread_tcache->bucket_count_for_testing(bucket_index));
777   // Not the other one.
778   EXPECT_EQ(kFillCountForSmallBucket,
779             other_thread_tcache->bucket_count_for_testing(bucket_index));
780 
781   purge_called.store(true, std::memory_order_release);
782   internal::base::PlatformThreadForTesting::Join(thread_handle);
783 }
784 
TEST_P(PartitionAllocThreadCacheTest,PeriodicPurge)785 TEST_P(PartitionAllocThreadCacheTest, PeriodicPurge) {
786   auto& registry = ThreadCacheRegistry::Instance();
787   auto NextInterval = [&registry]() {
788     return internal::base::Microseconds(
789         registry.GetPeriodicPurgeNextIntervalInMicroseconds());
790   };
791 
792   EXPECT_EQ(NextInterval(), registry.default_purge_interval());
793 
794   // Small amount of memory, the period gets longer.
795   auto* tcache = ThreadCache::Get();
796   ASSERT_LT(tcache->CachedMemory(),
797             registry.min_cached_memory_for_purging_bytes());
798   registry.RunPeriodicPurge();
799   EXPECT_EQ(NextInterval(), 2 * registry.default_purge_interval());
800   registry.RunPeriodicPurge();
801   EXPECT_EQ(NextInterval(), 4 * registry.default_purge_interval());
802 
803   // Check that the purge interval is clamped at the maximum value.
804   while (NextInterval() < registry.max_purge_interval()) {
805     registry.RunPeriodicPurge();
806   }
807   registry.RunPeriodicPurge();
808 
809   // Not enough memory to decrease the interval.
810   FillThreadCacheWithMemory(registry.min_cached_memory_for_purging_bytes() + 1);
811   registry.RunPeriodicPurge();
812   EXPECT_EQ(NextInterval(), registry.max_purge_interval());
813 
814   FillThreadCacheWithMemory(2 * registry.min_cached_memory_for_purging_bytes() +
815                             1);
816   registry.RunPeriodicPurge();
817   EXPECT_EQ(NextInterval(), registry.max_purge_interval() / 2);
818 
819   // Enough memory, interval doesn't change.
820   FillThreadCacheWithMemory(registry.min_cached_memory_for_purging_bytes());
821   registry.RunPeriodicPurge();
822   EXPECT_EQ(NextInterval(), registry.max_purge_interval() / 2);
823 
824   // No cached memory, increase the interval.
825   registry.RunPeriodicPurge();
826   EXPECT_EQ(NextInterval(), registry.max_purge_interval());
827 
828   // Cannot test the very large size with only one thread, this is tested below
829   // in the multiple threads test.
830 }
831 
832 namespace {
833 
FillThreadCacheWithMemory(PartitionRoot * root,size_t target_cached_memory,BucketDistribution bucket_distribution)834 void FillThreadCacheWithMemory(PartitionRoot* root,
835                                size_t target_cached_memory,
836                                BucketDistribution bucket_distribution) {
837   for (int batch : {1, 2, 4, 8, 16}) {
838     for (size_t allocation_size = 1;
839          allocation_size <= ThreadCache::kLargeSizeThreshold;
840          allocation_size++) {
841       FillThreadCacheAndReturnIndex(
842           root, root->AdjustSizeForExtrasAdd(allocation_size),
843           bucket_distribution, batch);
844 
845       if (ThreadCache::Get()->CachedMemory() >= target_cached_memory) {
846         return;
847       }
848     }
849   }
850 
851   ASSERT_GE(ThreadCache::Get()->CachedMemory(), target_cached_memory);
852 }
853 
854 class ThreadDelegateForPeriodicPurgeSumsOverAllThreads
855     : public internal::base::PlatformThreadForTesting::Delegate {
856  public:
ThreadDelegateForPeriodicPurgeSumsOverAllThreads(PartitionRoot * root,std::atomic<int> & allocations_done,std::atomic<bool> & can_finish,BucketDistribution bucket_distribution)857   ThreadDelegateForPeriodicPurgeSumsOverAllThreads(
858       PartitionRoot* root,
859       std::atomic<int>& allocations_done,
860       std::atomic<bool>& can_finish,
861       BucketDistribution bucket_distribution)
862       : root_(root),
863         allocations_done_(allocations_done),
864         can_finish_(can_finish),
865         bucket_distribution_(bucket_distribution) {}
866 
ThreadMain()867   void ThreadMain() override {
868     FillThreadCacheWithMemory(root_,
869                               5 * ThreadCacheRegistry::Instance()
870                                       .min_cached_memory_for_purging_bytes(),
871                               bucket_distribution_);
872     allocations_done_.fetch_add(1, std::memory_order_release);
873 
874     // This thread needs to be alive when the next periodic purge task runs.
875     while (!can_finish_.load(std::memory_order_acquire)) {
876     }
877   }
878 
879  private:
880   PartitionRoot* root_ = nullptr;
881   std::atomic<int>& allocations_done_;
882   std::atomic<bool>& can_finish_;
883   BucketDistribution bucket_distribution_;
884 };
885 
886 }  // namespace
887 
888 // Disabled due to flakiness: crbug.com/1220371
TEST_P(PartitionAllocThreadCacheTest,DISABLED_PeriodicPurgeSumsOverAllThreads)889 TEST_P(PartitionAllocThreadCacheTest,
890        DISABLED_PeriodicPurgeSumsOverAllThreads) {
891   auto& registry = ThreadCacheRegistry::Instance();
892   auto NextInterval = [&registry]() {
893     return internal::base::Microseconds(
894         registry.GetPeriodicPurgeNextIntervalInMicroseconds());
895   };
896   EXPECT_EQ(NextInterval(), registry.default_purge_interval());
897 
898   // Small amount of memory, the period gets longer.
899   auto* tcache = ThreadCache::Get();
900   ASSERT_LT(tcache->CachedMemory(),
901             registry.min_cached_memory_for_purging_bytes());
902   registry.RunPeriodicPurge();
903   EXPECT_EQ(NextInterval(), 2 * registry.default_purge_interval());
904   registry.RunPeriodicPurge();
905   EXPECT_EQ(NextInterval(), 4 * registry.default_purge_interval());
906 
907   // Check that the purge interval is clamped at the maximum value.
908   while (NextInterval() < registry.max_purge_interval()) {
909     registry.RunPeriodicPurge();
910   }
911   registry.RunPeriodicPurge();
912 
913   // Not enough memory on this thread to decrease the interval.
914   FillThreadCacheWithMemory(registry.min_cached_memory_for_purging_bytes() / 2);
915   registry.RunPeriodicPurge();
916   EXPECT_EQ(NextInterval(), registry.max_purge_interval());
917 
918   std::atomic<int> allocations_done{0};
919   std::atomic<bool> can_finish{false};
920   ThreadDelegateForPeriodicPurgeSumsOverAllThreads delegate(
921       root(), allocations_done, can_finish, GetParam());
922 
923   internal::base::PlatformThreadHandle thread_handle;
924   internal::base::PlatformThreadForTesting::Create(0, &delegate,
925                                                    &thread_handle);
926   internal::base::PlatformThreadHandle thread_handle_2;
927   internal::base::PlatformThreadForTesting::Create(0, &delegate,
928                                                    &thread_handle_2);
929 
930   while (allocations_done.load(std::memory_order_acquire) != 2) {
931     internal::base::PlatformThreadForTesting::YieldCurrentThread();
932   }
933 
934   // Many allocations on the other thread.
935   registry.RunPeriodicPurge();
936   EXPECT_EQ(NextInterval(), registry.default_purge_interval());
937 
938   can_finish.store(true, std::memory_order_release);
939   internal::base::PlatformThreadForTesting::Join(thread_handle);
940   internal::base::PlatformThreadForTesting::Join(thread_handle_2);
941 }
942 
943 // TODO(https://crbug.com/1287799): Flaky on IOS.
944 #if BUILDFLAG(IS_IOS)
945 #define MAYBE_DynamicCountPerBucket DISABLED_DynamicCountPerBucket
946 #else
947 #define MAYBE_DynamicCountPerBucket DynamicCountPerBucket
948 #endif
TEST_P(PartitionAllocThreadCacheTest,MAYBE_DynamicCountPerBucket)949 TEST_P(PartitionAllocThreadCacheTest, MAYBE_DynamicCountPerBucket) {
950   auto* tcache = root()->thread_cache_for_testing();
951   size_t bucket_index =
952       FillThreadCacheAndReturnIndex(kMediumSize, kDefaultCountForMediumBucket);
953 
954   EXPECT_EQ(kDefaultCountForMediumBucket,
955             tcache->bucket_for_testing(bucket_index).count);
956 
957   ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
958       ThreadCache::kDefaultMultiplier / 2);
959   // No immediate batch deallocation.
960   EXPECT_EQ(kDefaultCountForMediumBucket,
961             tcache->bucket_for_testing(bucket_index).count);
962   void* data =
963       root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
964   // Not triggered by allocations.
965   EXPECT_EQ(kDefaultCountForMediumBucket - 1,
966             tcache->bucket_for_testing(bucket_index).count);
967 
968   // Free() triggers the purge within limits.
969   root()->Free(data);
970   EXPECT_LE(tcache->bucket_for_testing(bucket_index).count,
971             kDefaultCountForMediumBucket / 2);
972 
973   // Won't go above anymore.
974   FillThreadCacheAndReturnIndex(kMediumSize, 1000);
975   EXPECT_LE(tcache->bucket_for_testing(bucket_index).count,
976             kDefaultCountForMediumBucket / 2);
977 
978   // Limit can be raised.
979   ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
980       ThreadCache::kDefaultMultiplier * 2);
981   FillThreadCacheAndReturnIndex(kMediumSize, 1000);
982   EXPECT_GT(tcache->bucket_for_testing(bucket_index).count,
983             kDefaultCountForMediumBucket / 2);
984 }
985 
TEST_P(PartitionAllocThreadCacheTest,DynamicCountPerBucketClamping)986 TEST_P(PartitionAllocThreadCacheTest, DynamicCountPerBucketClamping) {
987   auto* tcache = root()->thread_cache_for_testing();
988 
989   ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
990       ThreadCache::kDefaultMultiplier / 1000.);
991   for (size_t i = 0; i < ThreadCache::kBucketCount; i++) {
992     // Invalid bucket.
993     if (!tcache->bucket_for_testing(i).limit.load(std::memory_order_relaxed)) {
994       EXPECT_EQ(root()->buckets[i].active_slot_spans_head, nullptr);
995       continue;
996     }
997     EXPECT_GE(
998         tcache->bucket_for_testing(i).limit.load(std::memory_order_relaxed),
999         1u);
1000   }
1001 
1002   ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
1003       ThreadCache::kDefaultMultiplier * 1000.);
1004   for (size_t i = 0; i < ThreadCache::kBucketCount; i++) {
1005     // Invalid bucket.
1006     if (!tcache->bucket_for_testing(i).limit.load(std::memory_order_relaxed)) {
1007       EXPECT_EQ(root()->buckets[i].active_slot_spans_head, nullptr);
1008       continue;
1009     }
1010     EXPECT_LT(
1011         tcache->bucket_for_testing(i).limit.load(std::memory_order_relaxed),
1012         0xff);
1013   }
1014 }
1015 
1016 // TODO(https://crbug.com/1287799): Flaky on IOS.
1017 #if BUILDFLAG(IS_IOS)
1018 #define MAYBE_DynamicCountPerBucketMultipleThreads \
1019   DISABLED_DynamicCountPerBucketMultipleThreads
1020 #else
1021 #define MAYBE_DynamicCountPerBucketMultipleThreads \
1022   DynamicCountPerBucketMultipleThreads
1023 #endif
1024 
1025 namespace {
1026 
1027 class ThreadDelegateForDynamicCountPerBucketMultipleThreads
1028     : public internal::base::PlatformThreadForTesting::Delegate {
1029  public:
ThreadDelegateForDynamicCountPerBucketMultipleThreads(PartitionRoot * root,std::atomic<bool> & other_thread_started,std::atomic<bool> & threshold_changed,int bucket_index,BucketDistribution bucket_distribution)1030   ThreadDelegateForDynamicCountPerBucketMultipleThreads(
1031       PartitionRoot* root,
1032       std::atomic<bool>& other_thread_started,
1033       std::atomic<bool>& threshold_changed,
1034       int bucket_index,
1035       BucketDistribution bucket_distribution)
1036       : root_(root),
1037         other_thread_started_(other_thread_started),
1038         threshold_changed_(threshold_changed),
1039         bucket_index_(bucket_index),
1040         bucket_distribution_(bucket_distribution) {}
1041 
ThreadMain()1042   void ThreadMain() override {
1043     FillThreadCacheAndReturnIndex(root_, kSmallSize, bucket_distribution_,
1044                                   kDefaultCountForSmallBucket + 10);
1045     auto* this_thread_tcache = root_->thread_cache_for_testing();
1046     // More than the default since the multiplier has changed.
1047     EXPECT_GT(this_thread_tcache->bucket_count_for_testing(bucket_index_),
1048               kDefaultCountForSmallBucket + 10);
1049 
1050     other_thread_started_.store(true, std::memory_order_release);
1051     while (!threshold_changed_.load(std::memory_order_acquire)) {
1052     }
1053 
1054     void* data =
1055         root_->Alloc(root_->AdjustSizeForExtrasSubtract(kSmallSize), "");
1056     // Deallocations trigger limit enforcement.
1057     root_->Free(data);
1058     // Since the bucket is too full, it gets halved by batched deallocation.
1059     EXPECT_EQ(static_cast<uint8_t>(ThreadCache::kSmallBucketBaseCount / 2),
1060               this_thread_tcache->bucket_count_for_testing(bucket_index_));
1061   }
1062 
1063  private:
1064   PartitionRoot* root_ = nullptr;
1065   std::atomic<bool>& other_thread_started_;
1066   std::atomic<bool>& threshold_changed_;
1067   const int bucket_index_;
1068   PartitionRoot::BucketDistribution bucket_distribution_;
1069 };
1070 
1071 }  // namespace
1072 
TEST_P(PartitionAllocThreadCacheTest,MAYBE_DynamicCountPerBucketMultipleThreads)1073 TEST_P(PartitionAllocThreadCacheTest,
1074        MAYBE_DynamicCountPerBucketMultipleThreads) {
1075   std::atomic<bool> other_thread_started{false};
1076   std::atomic<bool> threshold_changed{false};
1077 
1078   auto* tcache = root()->thread_cache_for_testing();
1079   size_t bucket_index =
1080       FillThreadCacheAndReturnIndex(kSmallSize, kDefaultCountForSmallBucket);
1081   EXPECT_EQ(kDefaultCountForSmallBucket,
1082             tcache->bucket_for_testing(bucket_index).count);
1083 
1084   // Change the ratio before starting the threads, checking that it will applied
1085   // to newly-created threads.
1086   ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
1087       ThreadCache::kDefaultMultiplier + 1);
1088 
1089   ThreadDelegateForDynamicCountPerBucketMultipleThreads delegate(
1090       root(), other_thread_started, threshold_changed, bucket_index,
1091       GetParam());
1092 
1093   internal::base::PlatformThreadHandle thread_handle;
1094   internal::base::PlatformThreadForTesting::Create(0, &delegate,
1095                                                    &thread_handle);
1096 
1097   while (!other_thread_started.load(std::memory_order_acquire)) {
1098   }
1099 
1100   ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(1.);
1101   threshold_changed.store(true, std::memory_order_release);
1102 
1103   internal::base::PlatformThreadForTesting::Join(thread_handle);
1104 }
1105 
TEST_P(PartitionAllocThreadCacheTest,DynamicSizeThreshold)1106 TEST_P(PartitionAllocThreadCacheTest, DynamicSizeThreshold) {
1107   auto* tcache = root()->thread_cache_for_testing();
1108   DeltaCounter alloc_miss_counter{tcache->stats_for_testing().alloc_misses};
1109   DeltaCounter alloc_miss_too_large_counter{
1110       tcache->stats_for_testing().alloc_miss_too_large};
1111   DeltaCounter cache_fill_counter{tcache->stats_for_testing().cache_fill_count};
1112   DeltaCounter cache_fill_misses_counter{
1113       tcache->stats_for_testing().cache_fill_misses};
1114 
1115   // Default threshold at first.
1116   ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
1117   FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold);
1118 
1119   EXPECT_EQ(0u, alloc_miss_too_large_counter.Delta());
1120   EXPECT_EQ(1u, cache_fill_counter.Delta());
1121 
1122   // Too large to be cached.
1123   FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
1124   EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
1125 
1126   // Increase.
1127   ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
1128   FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
1129   // No new miss.
1130   EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
1131 
1132   // Lower.
1133   ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
1134   FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
1135   EXPECT_EQ(2u, alloc_miss_too_large_counter.Delta());
1136 
1137   // Value is clamped.
1138   size_t too_large = 1024 * 1024;
1139   ThreadCache::SetLargestCachedSize(too_large);
1140   FillThreadCacheAndReturnIndex(too_large);
1141   EXPECT_EQ(3u, alloc_miss_too_large_counter.Delta());
1142 }
1143 
1144 // Disabled due to flakiness: crbug.com/1287811
TEST_P(PartitionAllocThreadCacheTest,DISABLED_DynamicSizeThresholdPurge)1145 TEST_P(PartitionAllocThreadCacheTest, DISABLED_DynamicSizeThresholdPurge) {
1146   auto* tcache = root()->thread_cache_for_testing();
1147   DeltaCounter alloc_miss_counter{tcache->stats_for_testing().alloc_misses};
1148   DeltaCounter alloc_miss_too_large_counter{
1149       tcache->stats_for_testing().alloc_miss_too_large};
1150   DeltaCounter cache_fill_counter{tcache->stats_for_testing().cache_fill_count};
1151   DeltaCounter cache_fill_misses_counter{
1152       tcache->stats_for_testing().cache_fill_misses};
1153 
1154   // Cache large allocations.
1155   size_t large_allocation_size = ThreadCache::kLargeSizeThreshold;
1156   ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
1157   size_t index = FillThreadCacheAndReturnIndex(large_allocation_size);
1158   EXPECT_EQ(0u, alloc_miss_too_large_counter.Delta());
1159 
1160   // Lower.
1161   ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
1162   FillThreadCacheAndReturnIndex(large_allocation_size);
1163   EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
1164 
1165   // There is memory trapped in the cache bucket.
1166   EXPECT_GT(tcache->bucket_for_testing(index).count, 0u);
1167 
1168   // Which is reclaimed by Purge().
1169   tcache->Purge();
1170   EXPECT_EQ(0u, tcache->bucket_for_testing(index).count);
1171 }
1172 
TEST_P(PartitionAllocThreadCacheTest,ClearFromTail)1173 TEST_P(PartitionAllocThreadCacheTest, ClearFromTail) {
1174   auto count_items = [](ThreadCache* tcache, size_t index) {
1175     uint8_t count = 0;
1176     auto* head = tcache->bucket_for_testing(index).freelist_head;
1177     while (head) {
1178       head = head->GetNextForThreadCache<true>(
1179           tcache->bucket_for_testing(index).slot_size);
1180       count++;
1181     }
1182     return count;
1183   };
1184 
1185   auto* tcache = root()->thread_cache_for_testing();
1186   size_t index = FillThreadCacheAndReturnIndex(kSmallSize, 10);
1187   ASSERT_GE(count_items(tcache, index), 10);
1188   void* head = tcache->bucket_for_testing(index).freelist_head;
1189 
1190   for (size_t limit : {8, 3, 1}) {
1191     tcache->ClearBucketForTesting(tcache->bucket_for_testing(index), limit);
1192     EXPECT_EQ(head, static_cast<void*>(
1193                         tcache->bucket_for_testing(index).freelist_head));
1194     EXPECT_EQ(count_items(tcache, index), limit);
1195   }
1196   tcache->ClearBucketForTesting(tcache->bucket_for_testing(index), 0);
1197   EXPECT_EQ(nullptr, static_cast<void*>(
1198                          tcache->bucket_for_testing(index).freelist_head));
1199 }
1200 
1201 // TODO(https://crbug.com/1287799): Flaky on IOS.
1202 #if BUILDFLAG(IS_IOS)
1203 #define MAYBE_Bookkeeping DISABLED_Bookkeeping
1204 #else
1205 #define MAYBE_Bookkeeping Bookkeeping
1206 #endif
TEST_P(PartitionAllocThreadCacheTest,MAYBE_Bookkeeping)1207 TEST_P(PartitionAllocThreadCacheTest, MAYBE_Bookkeeping) {
1208   void* arr[kFillCountForMediumBucket] = {};
1209   auto* tcache = root()->thread_cache_for_testing();
1210 
1211   root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
1212                       PurgeFlags::kDiscardUnusedSystemPages);
1213   root()->ResetBookkeepingForTesting();
1214 
1215   // The ThreadCache is allocated before we change buckets, so its size is
1216   // always based on the neutral distribution.
1217   size_t tc_bucket_index = root()->SizeToBucketIndex(
1218       sizeof(ThreadCache), PartitionRoot::BucketDistribution::kNeutral);
1219   auto* tc_bucket = &root()->buckets[tc_bucket_index];
1220   size_t expected_allocated_size =
1221       tc_bucket->slot_size;  // For the ThreadCache itself.
1222   size_t expected_committed_size = kUseLazyCommit
1223                                        ? internal::SystemPageSize()
1224                                        : tc_bucket->get_bytes_per_span();
1225 
1226   EXPECT_EQ(expected_committed_size, root()->total_size_of_committed_pages);
1227   EXPECT_EQ(expected_committed_size, root()->max_size_of_committed_pages);
1228   EXPECT_EQ(expected_allocated_size,
1229             root()->get_total_size_of_allocated_bytes());
1230   EXPECT_EQ(expected_allocated_size, root()->get_max_size_of_allocated_bytes());
1231 
1232   void* ptr =
1233       root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
1234 
1235   auto* medium_bucket = root()->buckets + SizeToIndex(kMediumSize);
1236   size_t medium_alloc_size = medium_bucket->slot_size;
1237   expected_allocated_size += medium_alloc_size;
1238   expected_committed_size += kUseLazyCommit
1239                                  ? internal::SystemPageSize()
1240                                  : medium_bucket->get_bytes_per_span();
1241 
1242   EXPECT_EQ(expected_committed_size, root()->total_size_of_committed_pages);
1243   EXPECT_EQ(expected_committed_size, root()->max_size_of_committed_pages);
1244   EXPECT_EQ(expected_allocated_size,
1245             root()->get_total_size_of_allocated_bytes());
1246   EXPECT_EQ(expected_allocated_size, root()->get_max_size_of_allocated_bytes());
1247 
1248   expected_allocated_size += kFillCountForMediumBucket * medium_alloc_size;
1249 
1250   // These allocations all come from the thread-cache.
1251   for (size_t i = 0; i < kFillCountForMediumBucket; i++) {
1252     arr[i] =
1253         root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
1254     EXPECT_EQ(expected_committed_size, root()->total_size_of_committed_pages);
1255     EXPECT_EQ(expected_committed_size, root()->max_size_of_committed_pages);
1256     EXPECT_EQ(expected_allocated_size,
1257               root()->get_total_size_of_allocated_bytes());
1258     EXPECT_EQ(expected_allocated_size,
1259               root()->get_max_size_of_allocated_bytes());
1260     EXPECT_EQ((kFillCountForMediumBucket - 1 - i) * medium_alloc_size,
1261               tcache->CachedMemory());
1262   }
1263 
1264   EXPECT_EQ(0U, tcache->CachedMemory());
1265 
1266   root()->Free(ptr);
1267 
1268   for (auto*& el : arr) {
1269     root()->Free(el);
1270   }
1271   EXPECT_EQ(root()->get_total_size_of_allocated_bytes(),
1272             expected_allocated_size);
1273   tcache->Purge();
1274   EXPECT_EQ(root()->get_total_size_of_allocated_bytes(),
1275             GetBucketSizeForThreadCache());
1276 }
1277 
TEST_P(PartitionAllocThreadCacheTest,TryPurgeNoAllocs)1278 TEST_P(PartitionAllocThreadCacheTest, TryPurgeNoAllocs) {
1279   auto* tcache = root()->thread_cache_for_testing();
1280   tcache->TryPurge();
1281 }
1282 
TEST_P(PartitionAllocThreadCacheTest,TryPurgeMultipleCorrupted)1283 TEST_P(PartitionAllocThreadCacheTest, TryPurgeMultipleCorrupted) {
1284   auto* tcache = root()->thread_cache_for_testing();
1285 
1286   void* ptr =
1287       root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
1288 
1289   auto* medium_bucket = root()->buckets + SizeToIndex(kMediumSize);
1290 
1291   auto* curr = medium_bucket->active_slot_spans_head->get_freelist_head();
1292   curr = curr->GetNextForThreadCache<true>(kMediumSize);
1293   curr->CorruptNextForTesting(0x12345678);
1294   tcache->TryPurge();
1295   curr->SetNext(nullptr);
1296   root()->Free(ptr);
1297 }
1298 
TEST(AlternateBucketDistributionTest,SizeToIndex)1299 TEST(AlternateBucketDistributionTest, SizeToIndex) {
1300   using internal::BucketIndexLookup;
1301 
1302   // The first 12 buckets are the same as the default bucket index.
1303   for (size_t i = 1 << 0; i < 1 << 8; i <<= 1) {
1304     for (size_t offset = 0; offset < 4; offset++) {
1305       size_t n = i * (4 + offset) / 4;
1306       EXPECT_EQ(BucketIndexLookup::GetIndex(n),
1307                 BucketIndexLookup::GetIndexForNeutralBuckets(n));
1308     }
1309   }
1310 
1311   // The alternate bucket distribution is different in the middle values.
1312   //
1313   // For each order, the top two buckets are removed compared with the default
1314   // distribution. Values that would be allocated in those two buckets are
1315   // instead allocated in the next power of two bucket.
1316   //
1317   // The first two buckets (each power of two and the next bucket up) remain
1318   // the same between the two bucket distributions.
1319   size_t expected_index = BucketIndexLookup::GetIndex(1 << 8);
1320   for (size_t i = 1 << 8; i < internal::kHighThresholdForAlternateDistribution;
1321        i <<= 1) {
1322     // The first two buckets in the order should match up to the normal bucket
1323     // distribution.
1324     for (size_t offset = 0; offset < 2; offset++) {
1325       size_t n = i * (4 + offset) / 4;
1326       EXPECT_EQ(BucketIndexLookup::GetIndex(n),
1327                 BucketIndexLookup::GetIndexForNeutralBuckets(n));
1328       EXPECT_EQ(BucketIndexLookup::GetIndex(n), expected_index);
1329       expected_index += 2;
1330     }
1331     // The last two buckets in the order are "rounded up" to the same bucket
1332     // as the next power of two.
1333     expected_index += 4;
1334     for (size_t offset = 2; offset < 4; offset++) {
1335       size_t n = i * (4 + offset) / 4;
1336       // These two are rounded up in the alternate distribution, so we expect
1337       // the bucket index to be larger than the bucket index for the same
1338       // allocation under the default distribution.
1339       EXPECT_GT(BucketIndexLookup::GetIndex(n),
1340                 BucketIndexLookup::GetIndexForNeutralBuckets(n));
1341       // We expect both allocations in this loop to be rounded up to the next
1342       // power of two bucket.
1343       EXPECT_EQ(BucketIndexLookup::GetIndex(n), expected_index);
1344     }
1345   }
1346 
1347   // The rest of the buckets all match up exactly with the existing
1348   // bucket distribution.
1349   for (size_t i = internal::kHighThresholdForAlternateDistribution;
1350        i < internal::kMaxBucketed; i <<= 1) {
1351     for (size_t offset = 0; offset < 4; offset++) {
1352       size_t n = i * (4 + offset) / 4;
1353       EXPECT_EQ(BucketIndexLookup::GetIndex(n),
1354                 BucketIndexLookup::GetIndexForNeutralBuckets(n));
1355     }
1356   }
1357 }
1358 
TEST_P(PartitionAllocThreadCacheTest,AllocationRecording)1359 TEST_P(PartitionAllocThreadCacheTest, AllocationRecording) {
1360   // There is a cache.
1361   auto* tcache = root()->thread_cache_for_testing();
1362   EXPECT_TRUE(tcache);
1363   tcache->ResetPerThreadAllocationStatsForTesting();
1364 
1365   constexpr size_t kBucketedNotCached = 1 << 12;
1366   constexpr size_t kDirectMapped = 4 * (1 << 20);
1367   // Not a "nice" size on purpose, to check that the raw size accounting works.
1368   const size_t kSingleSlot = internal::PartitionPageSize() + 1;
1369 
1370   size_t expected_total_size = 0;
1371   void* ptr =
1372       root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
1373   ASSERT_TRUE(ptr);
1374   expected_total_size += root()->GetUsableSize(ptr);
1375   void* ptr2 = root()->Alloc(
1376       root()->AdjustSizeForExtrasSubtract(kBucketedNotCached), "");
1377   ASSERT_TRUE(ptr2);
1378   expected_total_size += root()->GetUsableSize(ptr2);
1379   void* ptr3 =
1380       root()->Alloc(root()->AdjustSizeForExtrasSubtract(kDirectMapped), "");
1381   ASSERT_TRUE(ptr3);
1382   expected_total_size += root()->GetUsableSize(ptr3);
1383   void* ptr4 =
1384       root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSingleSlot), "");
1385   ASSERT_TRUE(ptr4);
1386   expected_total_size += root()->GetUsableSize(ptr4);
1387 
1388   EXPECT_EQ(4u, tcache->thread_alloc_stats().alloc_count);
1389   EXPECT_EQ(expected_total_size, tcache->thread_alloc_stats().alloc_total_size);
1390 
1391   root()->Free(ptr);
1392   root()->Free(ptr2);
1393   root()->Free(ptr3);
1394   root()->Free(ptr4);
1395 
1396   EXPECT_EQ(4u, tcache->thread_alloc_stats().alloc_count);
1397   EXPECT_EQ(expected_total_size, tcache->thread_alloc_stats().alloc_total_size);
1398   EXPECT_EQ(4u, tcache->thread_alloc_stats().dealloc_count);
1399   EXPECT_EQ(expected_total_size,
1400             tcache->thread_alloc_stats().dealloc_total_size);
1401 
1402   auto stats = internal::GetAllocStatsForCurrentThread();
1403   EXPECT_EQ(4u, stats.alloc_count);
1404   EXPECT_EQ(expected_total_size, stats.alloc_total_size);
1405   EXPECT_EQ(4u, stats.dealloc_count);
1406   EXPECT_EQ(expected_total_size, stats.dealloc_total_size);
1407 }
1408 
TEST_P(PartitionAllocThreadCacheTest,AllocationRecordingAligned)1409 TEST_P(PartitionAllocThreadCacheTest, AllocationRecordingAligned) {
1410   // There is a cache.
1411   auto* tcache = root()->thread_cache_for_testing();
1412   EXPECT_TRUE(tcache);
1413   tcache->ResetPerThreadAllocationStatsForTesting();
1414 
1415   // Aligned allocations take different paths depending on whether they are (in
1416   // the same order as the test cases below):
1417   // - Not really aligned (since alignment is always good-enough)
1418   // - Already satisfied by PA's alignment guarantees
1419   // - Requiring extra padding
1420   // - Already satisfied by PA's alignment guarantees
1421   // - In need of a special slot span (very large alignment)
1422   // - Direct-mapped with large alignment
1423   size_t alloc_count = 0;
1424   size_t total_size = 0;
1425   size_t size_alignments[][2] = {{128, 4},
1426                                  {128, 128},
1427                                  {1024, 128},
1428                                  {128, 1024},
1429                                  {128, 2 * internal::PartitionPageSize()},
1430                                  {(4 << 20) + 1, 1 << 19}};
1431   for (auto [requested_size, alignment] : size_alignments) {
1432     void* ptr = root()->AlignedAlloc(alignment, requested_size);
1433     ASSERT_TRUE(ptr);
1434     alloc_count++;
1435     total_size += root()->GetUsableSize(ptr);
1436     EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().alloc_count);
1437     EXPECT_EQ(total_size, tcache->thread_alloc_stats().alloc_total_size);
1438     root()->Free(ptr);
1439     EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().dealloc_count);
1440     EXPECT_EQ(total_size, tcache->thread_alloc_stats().dealloc_total_size);
1441   }
1442 
1443   EXPECT_EQ(tcache->thread_alloc_stats().alloc_total_size,
1444             tcache->thread_alloc_stats().dealloc_total_size);
1445 
1446   auto stats = internal::GetAllocStatsForCurrentThread();
1447   EXPECT_EQ(alloc_count, stats.alloc_count);
1448   EXPECT_EQ(total_size, stats.alloc_total_size);
1449   EXPECT_EQ(alloc_count, stats.dealloc_count);
1450   EXPECT_EQ(total_size, stats.dealloc_total_size);
1451 }
1452 
TEST_P(PartitionAllocThreadCacheTest,AllocationRecordingRealloc)1453 TEST_P(PartitionAllocThreadCacheTest, AllocationRecordingRealloc) {
1454   // There is a cache.
1455   auto* tcache = root()->thread_cache_for_testing();
1456   EXPECT_TRUE(tcache);
1457   tcache->ResetPerThreadAllocationStatsForTesting();
1458 
1459   size_t alloc_count = 0;
1460   size_t dealloc_count = 0;
1461   size_t total_alloc_size = 0;
1462   size_t total_dealloc_size = 0;
1463   size_t size_new_sizes[][2] = {
1464       {16, 15},
1465       {16, 64},
1466       {16, internal::PartitionPageSize() + 1},
1467       {4 << 20, 8 << 20},
1468       {8 << 20, 4 << 20},
1469       {(8 << 20) - internal::SystemPageSize(), 8 << 20}};
1470   for (auto [size, new_size] : size_new_sizes) {
1471     void* ptr = root()->Alloc(size);
1472     ASSERT_TRUE(ptr);
1473     alloc_count++;
1474     size_t usable_size = root()->GetUsableSize(ptr);
1475     total_alloc_size += usable_size;
1476 
1477     ptr = root()->Realloc(ptr, new_size, "");
1478     ASSERT_TRUE(ptr);
1479     total_dealloc_size += usable_size;
1480     dealloc_count++;
1481     usable_size = root()->GetUsableSize(ptr);
1482     total_alloc_size += usable_size;
1483     alloc_count++;
1484 
1485     EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().alloc_count);
1486     EXPECT_EQ(total_alloc_size, tcache->thread_alloc_stats().alloc_total_size);
1487     EXPECT_EQ(dealloc_count, tcache->thread_alloc_stats().dealloc_count);
1488     EXPECT_EQ(total_dealloc_size,
1489               tcache->thread_alloc_stats().dealloc_total_size)
1490         << new_size;
1491 
1492     root()->Free(ptr);
1493     dealloc_count++;
1494     total_dealloc_size += usable_size;
1495 
1496     EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().alloc_count);
1497     EXPECT_EQ(total_alloc_size, tcache->thread_alloc_stats().alloc_total_size);
1498     EXPECT_EQ(dealloc_count, tcache->thread_alloc_stats().dealloc_count);
1499     EXPECT_EQ(total_dealloc_size,
1500               tcache->thread_alloc_stats().dealloc_total_size);
1501   }
1502   EXPECT_EQ(tcache->thread_alloc_stats().alloc_total_size,
1503             tcache->thread_alloc_stats().dealloc_total_size);
1504 }
1505 
1506 // This test makes sure it's safe to switch to the alternate bucket distribution
1507 // at runtime. This is intended to happen once, near the start of Chrome,
1508 // once we have enabled features.
TEST(AlternateBucketDistributionTest,SwitchBeforeAlloc)1509 TEST(AlternateBucketDistributionTest, SwitchBeforeAlloc) {
1510   std::unique_ptr<PartitionAllocatorForTesting> allocator(CreateAllocator());
1511   PartitionRoot* root = allocator->root();
1512 
1513   root->SwitchToDenserBucketDistribution();
1514   constexpr size_t n = (1 << 12) * 3 / 2;
1515   EXPECT_NE(internal::BucketIndexLookup::GetIndex(n),
1516             internal::BucketIndexLookup::GetIndexForNeutralBuckets(n));
1517 
1518   void* ptr = root->Alloc(n);
1519 
1520   root->ResetBucketDistributionForTesting();
1521 
1522   root->Free(ptr);
1523 }
1524 
1525 // This test makes sure it's safe to switch to the alternate bucket distribution
1526 // at runtime. This is intended to happen once, near the start of Chrome,
1527 // once we have enabled features.
TEST(AlternateBucketDistributionTest,SwitchAfterAlloc)1528 TEST(AlternateBucketDistributionTest, SwitchAfterAlloc) {
1529   std::unique_ptr<PartitionAllocatorForTesting> allocator(CreateAllocator());
1530   constexpr size_t n = (1 << 12) * 3 / 2;
1531   EXPECT_NE(internal::BucketIndexLookup::GetIndex(n),
1532             internal::BucketIndexLookup::GetIndexForNeutralBuckets(n));
1533 
1534   PartitionRoot* root = allocator->root();
1535   void* ptr = root->Alloc(n);
1536 
1537   root->SwitchToDenserBucketDistribution();
1538 
1539   void* ptr2 = root->Alloc(n);
1540 
1541   root->Free(ptr2);
1542   root->Free(ptr);
1543 }
1544 
1545 }  // namespace partition_alloc
1546 
1547 #endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) &&
1548         // PA_CONFIG(THREAD_CACHE_SUPPORTED)
1549