1 // Copyright 2013 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "partition_alloc/partition_alloc_for_testing.h"
6
7 #include <algorithm>
8 #include <bit>
9 #include <cstddef>
10 #include <cstdint>
11 #include <cstdlib>
12 #include <cstring>
13 #include <iostream>
14 #include <limits>
15 #include <memory>
16 #include <random>
17 #include <set>
18 #include <tuple>
19 #include <vector>
20
21 #include "base/system/sys_info.h"
22 #include "base/test/gtest_util.h"
23 #include "build/build_config.h"
24 #include "partition_alloc/address_space_randomization.h"
25 #include "partition_alloc/chromecast_buildflags.h"
26 #include "partition_alloc/dangling_raw_ptr_checks.h"
27 #include "partition_alloc/freeslot_bitmap.h"
28 #include "partition_alloc/lightweight_quarantine.h"
29 #include "partition_alloc/memory_reclaimer.h"
30 #include "partition_alloc/page_allocator_constants.h"
31 #include "partition_alloc/partition_address_space.h"
32 #include "partition_alloc/partition_alloc_base/bits.h"
33 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
34 #include "partition_alloc/partition_alloc_base/cpu.h"
35 #include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
36 #include "partition_alloc/partition_alloc_base/logging.h"
37 #include "partition_alloc/partition_alloc_base/numerics/checked_math.h"
38 #include "partition_alloc/partition_alloc_base/rand_util.h"
39 #include "partition_alloc/partition_alloc_base/thread_annotations.h"
40 #include "partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
41 #include "partition_alloc/partition_alloc_buildflags.h"
42 #include "partition_alloc/partition_alloc_config.h"
43 #include "partition_alloc/partition_alloc_constants.h"
44 #include "partition_alloc/partition_alloc_forward.h"
45 #include "partition_alloc/partition_bucket.h"
46 #include "partition_alloc/partition_cookie.h"
47 #include "partition_alloc/partition_freelist_entry.h"
48 #include "partition_alloc/partition_page.h"
49 #include "partition_alloc/partition_ref_count.h"
50 #include "partition_alloc/partition_root.h"
51 #include "partition_alloc/partition_stats.h"
52 #include "partition_alloc/reservation_offset_table.h"
53 #include "partition_alloc/tagging.h"
54 #include "partition_alloc/thread_isolation/thread_isolation.h"
55 #include "testing/gtest/include/gtest/gtest.h"
56
57 #if defined(__ARM_FEATURE_MEMORY_TAGGING)
58 #include <arm_acle.h>
59 #endif
60
61 #if BUILDFLAG(IS_POSIX)
62 #if BUILDFLAG(IS_LINUX)
63 // We need PKEY_DISABLE_WRITE in this file; glibc defines it in sys/mman.h but
64 // it's actually Linux-specific and other Linux libcs define it in linux/mman.h.
65 // We have to include both to be sure we get the definition.
66 #include <linux/mman.h>
67 #endif // BUILDFLAG(IS_LINUX)
68 #include <sys/mman.h>
69 #include <sys/resource.h>
70 #include <sys/time.h>
71 #endif // BUILDFLAG(IS_POSIX)
72
73 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_MAC)
74 #include <OpenCL/opencl.h>
75 #endif
76
77 #if BUILDFLAG(IS_MAC)
78 #include "partition_alloc/partition_alloc_base/mac/mac_util.h"
79 #endif
80
81 #if BUILDFLAG(ENABLE_PKEYS)
82 #include <sys/syscall.h>
83 #endif
84
85 // In the MTE world, the upper bits of a pointer can be decorated with a tag,
86 // thus allowing many versions of the same pointer to exist. These macros take
87 // that into account when comparing.
88 #define PA_EXPECT_PTR_EQ(ptr1, ptr2) \
89 { EXPECT_EQ(UntagPtr(ptr1), UntagPtr(ptr2)); }
90 #define PA_EXPECT_PTR_NE(ptr1, ptr2) \
91 { EXPECT_NE(UntagPtr(ptr1), UntagPtr(ptr2)); }
92
93 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
94
95 namespace {
96
IsLargeMemoryDevice()97 bool IsLargeMemoryDevice() {
98 // Treat any device with 4GiB or more of physical memory as a "large memory
99 // device". We check for slightly less than GiB so that devices with a small
100 // amount of memory not accessible to the OS still count as "large".
101 //
102 // Set to 4GiB, since we have 2GiB Android devices where tests flakily fail
103 // (e.g. Nexus 5X, crbug.com/1191195).
104 return base::SysInfo::AmountOfPhysicalMemory() >= 4000ULL * 1024 * 1024;
105 }
106
SetAddressSpaceLimit()107 bool SetAddressSpaceLimit() {
108 #if !defined(ARCH_CPU_64_BITS) || !BUILDFLAG(IS_POSIX)
109 // 32 bits => address space is limited already.
110 return true;
111 #elif BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE)
112 // macOS will accept, but not enforce, |RLIMIT_AS| changes. See
113 // https://crbug.com/435269 and rdar://17576114.
114 //
115 // Note: This number must be not less than 6 GB, because with
116 // sanitizer_coverage_flags=edge, it reserves > 5 GB of address space. See
117 // https://crbug.com/674665.
118 const size_t kAddressSpaceLimit = static_cast<size_t>(6144) * 1024 * 1024;
119 struct rlimit limit;
120 if (getrlimit(RLIMIT_DATA, &limit) != 0) {
121 return false;
122 }
123 if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) {
124 limit.rlim_cur = kAddressSpaceLimit;
125 if (setrlimit(RLIMIT_DATA, &limit) != 0) {
126 return false;
127 }
128 }
129 return true;
130 #else
131 return false;
132 #endif
133 }
134
ClearAddressSpaceLimit()135 bool ClearAddressSpaceLimit() {
136 #if !defined(ARCH_CPU_64_BITS) || !BUILDFLAG(IS_POSIX)
137 return true;
138 #elif BUILDFLAG(IS_POSIX)
139 struct rlimit limit;
140 if (getrlimit(RLIMIT_DATA, &limit) != 0) {
141 return false;
142 }
143 limit.rlim_cur = limit.rlim_max;
144 if (setrlimit(RLIMIT_DATA, &limit) != 0) {
145 return false;
146 }
147 return true;
148 #else
149 return false;
150 #endif
151 }
152
153 const size_t kTestSizes[] = {
154 1,
155 17,
156 100,
157 partition_alloc::internal::SystemPageSize(),
158 partition_alloc::internal::SystemPageSize() + 1,
159 partition_alloc::PartitionRoot::GetDirectMapSlotSize(100),
160 1 << 20,
161 1 << 21,
162 };
163 constexpr size_t kTestSizesCount = std::size(kTestSizes);
164
165 template <
166 partition_alloc::AllocFlags alloc_flags,
167 partition_alloc::FreeFlags free_flags = partition_alloc::FreeFlags::kNone>
AllocateRandomly(partition_alloc::PartitionRoot * root,size_t count)168 void AllocateRandomly(partition_alloc::PartitionRoot* root, size_t count) {
169 std::vector<void*> allocations(count, nullptr);
170 for (size_t i = 0; i < count; ++i) {
171 const size_t size =
172 kTestSizes[partition_alloc::internal::base::RandGenerator(
173 kTestSizesCount)];
174 allocations[i] = root->Alloc<alloc_flags>(size);
175 EXPECT_NE(nullptr, allocations[i]) << " size: " << size << " i: " << i;
176 }
177
178 for (size_t i = 0; i < count; ++i) {
179 if (allocations[i]) {
180 root->Free(allocations[i]);
181 }
182 }
183 }
184
HandleOOM(size_t unused_size)185 void HandleOOM(size_t unused_size) {
186 PA_LOG(FATAL) << "Out of memory";
187 }
188
189 int g_dangling_raw_ptr_detected_count = 0;
190 int g_dangling_raw_ptr_released_count = 0;
191
192 class CountDanglingRawPtr {
193 public:
CountDanglingRawPtr()194 CountDanglingRawPtr() {
195 g_dangling_raw_ptr_detected_count = 0;
196 g_dangling_raw_ptr_released_count = 0;
197 old_detected_fn_ = partition_alloc::GetDanglingRawPtrDetectedFn();
198 old_released_fn_ = partition_alloc::GetDanglingRawPtrReleasedFn();
199
200 partition_alloc::SetDanglingRawPtrDetectedFn(
201 CountDanglingRawPtr::DanglingRawPtrDetected);
202 partition_alloc::SetDanglingRawPtrReleasedFn(
203 CountDanglingRawPtr::DanglingRawPtrReleased);
204 }
~CountDanglingRawPtr()205 ~CountDanglingRawPtr() {
206 partition_alloc::SetDanglingRawPtrDetectedFn(old_detected_fn_);
207 partition_alloc::SetDanglingRawPtrReleasedFn(old_released_fn_);
208 }
209
210 private:
DanglingRawPtrDetected(uintptr_t)211 static void DanglingRawPtrDetected(uintptr_t) {
212 g_dangling_raw_ptr_detected_count++;
213 }
DanglingRawPtrReleased(uintptr_t)214 static void DanglingRawPtrReleased(uintptr_t) {
215 g_dangling_raw_ptr_released_count++;
216 }
217
218 partition_alloc::DanglingRawPtrDetectedFn* old_detected_fn_;
219 partition_alloc::DanglingRawPtrReleasedFn* old_released_fn_;
220 };
221
222 } // namespace
223
224 // Note: This test exercises interfaces inside the `partition_alloc`
225 // namespace, but inspects objects inside `partition_alloc::internal`.
226 // For ease of reading, the tests are placed into the latter namespace.
227 namespace partition_alloc::internal {
228
229 using BucketDistribution = PartitionRoot::BucketDistribution;
230 using SlotSpan = SlotSpanMetadata;
231
232 const size_t kTestAllocSize = 16;
233
234 #if !BUILDFLAG(PA_DCHECK_IS_ON)
235 const size_t kPointerOffset = kPartitionRefCountOffsetAdjustment;
236 const size_t kExtraAllocSizeWithoutRefCount = 0ull;
237 #else
238 const size_t kPointerOffset = kPartitionRefCountOffsetAdjustment;
239 const size_t kExtraAllocSizeWithoutRefCount = kCookieSize;
240 #endif
241
242 const char* type_name = nullptr;
243
SetDistributionForPartitionRoot(PartitionRoot * root,BucketDistribution distribution)244 void SetDistributionForPartitionRoot(PartitionRoot* root,
245 BucketDistribution distribution) {
246 switch (distribution) {
247 case BucketDistribution::kNeutral:
248 root->ResetBucketDistributionForTesting();
249 break;
250 case BucketDistribution::kDenser:
251 root->SwitchToDenserBucketDistribution();
252 break;
253 }
254 }
255
256 struct PartitionAllocTestParam {
257 BucketDistribution bucket_distribution;
258 bool use_pkey_pool;
259 size_t ref_count_size;
260 };
261
GetPartitionAllocTestParams()262 const std::vector<PartitionAllocTestParam> GetPartitionAllocTestParams() {
263 std::vector<size_t> ref_count_sizes = {0, 8, 16};
264 // sizeof(PartitionRefCount) == 8 under some configurations, so we can't force
265 // the size down to 4.
266 #if !PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE) && \
267 !PA_CONFIG(REF_COUNT_CHECK_COOKIE) && \
268 !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
269 ref_count_sizes.push_back(4);
270 #endif
271 // Using MTE or Mac13 workaroud increases extras size without increasing
272 // sizeof(PartitionRefCount), so we don't have to exclude it here, as long as
273 // ExtraAllocSize() accounts for it.
274
275 std::vector<PartitionAllocTestParam> params;
276 for (size_t ref_count_size : ref_count_sizes) {
277 params.emplace_back(PartitionAllocTestParam{BucketDistribution::kNeutral,
278 false, ref_count_size});
279 params.emplace_back(PartitionAllocTestParam{BucketDistribution::kDenser,
280 false, ref_count_size});
281 #if BUILDFLAG(ENABLE_PKEYS)
282 if (CPUHasPkeySupport()) {
283 params.emplace_back(PartitionAllocTestParam{BucketDistribution::kNeutral,
284 true, ref_count_size});
285 params.emplace_back(PartitionAllocTestParam{BucketDistribution::kDenser,
286 true, ref_count_size});
287 }
288 #endif
289 }
290 return params;
291 }
292
293 class PartitionAllocTest
294 : public testing::TestWithParam<PartitionAllocTestParam> {
295 protected:
296 class ScopedPageAllocation {
297 public:
ScopedPageAllocation(PartitionAllocator & allocator,base::CheckedNumeric<size_t> npages)298 ScopedPageAllocation(PartitionAllocator& allocator,
299 base::CheckedNumeric<size_t> npages)
300 : allocator_(allocator),
301 npages_(npages),
302 ptr_(static_cast<char*>(allocator_.root()->Alloc(
303 (npages * SystemPageSize() - ExtraAllocSize(allocator_))
304 .ValueOrDie(),
305 type_name))) {}
306
~ScopedPageAllocation()307 ~ScopedPageAllocation() { allocator_.root()->Free(ptr_); }
308
TouchAllPages()309 void TouchAllPages() {
310 memset(ptr_, 'A',
311 ((npages_ * SystemPageSize()) - ExtraAllocSize(allocator_))
312 .ValueOrDie());
313 }
314
PageAtIndex(size_t index)315 void* PageAtIndex(size_t index) {
316 return ptr_ - kPointerOffset + (SystemPageSize() * index);
317 }
318
319 private:
320 PartitionAllocator& allocator_;
321 const base::CheckedNumeric<size_t> npages_;
322 char* ptr_;
323 };
324
325 PartitionAllocTest() = default;
326
327 ~PartitionAllocTest() override = default;
328
329 struct PartitionTestOptions {
330 bool use_memory_reclaimer = false;
331 bool uncap_empty_slot_span_memory = false;
332 bool set_bucket_distribution = false;
333 };
334
InitializeTestRoot(PartitionRoot * root,PartitionOptions opts,PartitionTestOptions test_opts)335 void InitializeTestRoot(PartitionRoot* root,
336 PartitionOptions opts,
337 PartitionTestOptions test_opts) {
338 root->Init(opts);
339 if (test_opts.use_memory_reclaimer) {
340 MemoryReclaimer::Instance()->RegisterPartition(root);
341 }
342 if (test_opts.uncap_empty_slot_span_memory) {
343 root->UncapEmptySlotSpanMemoryForTesting();
344 }
345 if (test_opts.set_bucket_distribution) {
346 SetDistributionForPartitionRoot(root, GetBucketDistribution());
347 }
348 }
349
CreateCustomTestRoot(PartitionOptions opts,PartitionTestOptions test_opts)350 std::unique_ptr<PartitionRoot> CreateCustomTestRoot(
351 PartitionOptions opts,
352 PartitionTestOptions test_opts) {
353 auto root = std::make_unique<PartitionRoot>();
354 InitializeTestRoot(root.get(), opts, test_opts);
355 return root;
356 }
357
GetCommonPartitionOptions()358 PartitionOptions GetCommonPartitionOptions() {
359 PartitionOptions opts;
360 opts.ref_count_size = GetParam().ref_count_size;
361 opts.zapping_by_free_flags = PartitionOptions::kEnabled;
362 return opts;
363 }
364
InitializeMainTestAllocators()365 void InitializeMainTestAllocators() {
366 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
367 PartitionOptions::EnableToggle enable_backup_ref_ptr =
368 PartitionOptions::kEnabled;
369 #endif
370 #if BUILDFLAG(ENABLE_PKEYS)
371 int pkey = PkeyAlloc(UseThreadIsolatedPool() ? 0 : PKEY_DISABLE_WRITE);
372 if (pkey != -1) {
373 pkey_ = pkey;
374 }
375
376 PartitionOptions pkey_opts = GetCommonPartitionOptions();
377 pkey_opts.aligned_alloc = PartitionOptions::kAllowed;
378 pkey_opts.thread_isolation = ThreadIsolationOption(pkey_);
379 // We always want to have a pkey allocator initialized to make sure that the
380 // other pools still work. As part of the initializition, we tag some memory
381 // with the new pkey, effectively making it read-only. So there's some
382 // potential for breakage that this should catch.
383 InitializeTestRoot(pkey_allocator.root(), pkey_opts,
384 PartitionTestOptions{.use_memory_reclaimer = true});
385
386 ThreadIsolationOption thread_isolation_opt;
387 if (UseThreadIsolatedPool() && pkey_ != kInvalidPkey) {
388 thread_isolation_opt = ThreadIsolationOption(pkey_);
389 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
390 // BRP and thread isolated mode use different pools, so they can't be
391 // enabled at the same time.
392 enable_backup_ref_ptr = PartitionOptions::kDisabled;
393 #endif
394 }
395 #endif // BUILDFLAG(ENABLE_PKEYS)
396
397 PartitionOptions opts = GetCommonPartitionOptions();
398 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
399 BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
400 // AlignedAlloc() can't be called when BRP is in the
401 // "before allocation" mode, because this mode adds extras before
402 // the allocation. Extras after the allocation are ok.
403 opts.aligned_alloc = PartitionOptions::kAllowed;
404 #endif
405 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
406 opts.backup_ref_ptr = enable_backup_ref_ptr;
407 #endif
408 #if BUILDFLAG(ENABLE_PKEYS)
409 opts.thread_isolation = thread_isolation_opt;
410 #endif
411 #if PA_CONFIG(HAS_MEMORY_TAGGING)
412 opts.memory_tagging = {
413 .enabled =
414 partition_alloc::internal::base::CPU::GetInstanceNoAllocation()
415 .has_mte()
416 ? PartitionOptions::kEnabled
417 : PartitionOptions::kDisabled,
418 };
419 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
420 InitializeTestRoot(
421 allocator.root(), opts,
422 PartitionTestOptions{.use_memory_reclaimer = true,
423 .uncap_empty_slot_span_memory = true,
424 .set_bucket_distribution = true});
425
426 PartitionOptions aligned_opts = GetCommonPartitionOptions();
427 aligned_opts.aligned_alloc = PartitionOptions::kAllowed;
428 InitializeTestRoot(
429 aligned_allocator.root(), aligned_opts,
430 PartitionTestOptions{.use_memory_reclaimer = true,
431 .uncap_empty_slot_span_memory = true,
432 .set_bucket_distribution = true});
433 }
434
RealAllocSize() const435 size_t RealAllocSize() const {
436 return partition_alloc::internal::base::bits::AlignUp(
437 kTestAllocSize + ExtraAllocSize(allocator), kAlignment);
438 }
439
SetUp()440 void SetUp() override {
441 PartitionRoot::SetStraightenLargerSlotSpanFreeListsMode(
442 StraightenLargerSlotSpanFreeListsMode::kOnlyWhenUnprovisioning);
443 PartitionRoot::SetSortSmallerSlotSpanFreeListsEnabled(true);
444 PartitionRoot::SetSortActiveSlotSpansEnabled(true);
445 PartitionAllocGlobalInit(HandleOOM);
446 InitializeMainTestAllocators();
447
448 test_bucket_index_ = SizeToIndex(RealAllocSize());
449 }
450
SizeToIndex(size_t size)451 size_t SizeToIndex(size_t size) {
452 const auto distribution_to_use = GetBucketDistribution();
453 return PartitionRoot::SizeToBucketIndex(size, distribution_to_use);
454 }
455
SizeToBucketSize(size_t size)456 size_t SizeToBucketSize(size_t size) {
457 const auto index = SizeToIndex(size);
458 return allocator.root()->buckets[index].slot_size;
459 }
460
TearDown()461 void TearDown() override {
462 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
463 PurgeFlags::kDiscardUnusedSystemPages);
464 PartitionAllocGlobalUninitForTesting();
465 #if BUILDFLAG(ENABLE_PKEYS)
466 if (pkey_ != kInvalidPkey) {
467 PkeyFree(pkey_);
468 }
469 #endif
470 }
471
ExtraAllocSize(const PartitionAllocator & allocator)472 static size_t ExtraAllocSize(const PartitionAllocator& allocator) {
473 size_t ref_count_size = 0;
474 // Duplicate the logic from PartitionRoot::Init().
475 if (allocator.root()->brp_enabled()) {
476 ref_count_size = GetParam().ref_count_size;
477 if (!ref_count_size) {
478 ref_count_size = kPartitionRefCountSizeAdjustment;
479 }
480 ref_count_size = AlignUpRefCountSizeForMac(ref_count_size);
481 #if PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
482 if (allocator.root()->IsMemoryTaggingEnabled()) {
483 ref_count_size = partition_alloc::internal::base::bits::AlignUp(
484 ref_count_size, kMemTagGranuleSize);
485 }
486 #endif // PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
487 }
488 return kExtraAllocSizeWithoutRefCount + ref_count_size;
489 }
490
GetNumPagesPerSlotSpan(size_t size)491 size_t GetNumPagesPerSlotSpan(size_t size) {
492 size_t real_size = size + ExtraAllocSize(allocator);
493 size_t bucket_index = SizeToIndex(real_size);
494 PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
495 // TODO(tasak): make get_pages_per_slot_span() available at
496 // partition_alloc_unittest.cc. Is it allowable to make the code from
497 // partition_bucet.cc to partition_bucket.h?
498 return (bucket->num_system_pages_per_slot_span +
499 (NumSystemPagesPerPartitionPage() - 1)) /
500 NumSystemPagesPerPartitionPage();
501 }
502
GetFullSlotSpan(size_t size)503 SlotSpan* GetFullSlotSpan(size_t size) {
504 size_t real_size = size + ExtraAllocSize(allocator);
505 size_t bucket_index = SizeToIndex(real_size);
506 PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
507 size_t num_slots =
508 (bucket->num_system_pages_per_slot_span * SystemPageSize()) /
509 bucket->slot_size;
510 uintptr_t first = 0;
511 uintptr_t last = 0;
512 size_t i;
513 for (i = 0; i < num_slots; ++i) {
514 void* ptr = allocator.root()->Alloc(size, type_name);
515 EXPECT_TRUE(ptr);
516 if (!i) {
517 first = allocator.root()->ObjectToSlotStart(ptr);
518 } else if (i == num_slots - 1) {
519 last = allocator.root()->ObjectToSlotStart(ptr);
520 }
521 }
522 EXPECT_EQ(SlotSpan::FromSlotStart(first), SlotSpan::FromSlotStart(last));
523 if (bucket->num_system_pages_per_slot_span ==
524 NumSystemPagesPerPartitionPage()) {
525 EXPECT_EQ(first & PartitionPageBaseMask(),
526 last & PartitionPageBaseMask());
527 }
528 EXPECT_EQ(num_slots, bucket->active_slot_spans_head->num_allocated_slots);
529 EXPECT_EQ(nullptr, bucket->active_slot_spans_head->get_freelist_head());
530 EXPECT_TRUE(bucket->is_valid());
531 EXPECT_TRUE(bucket->active_slot_spans_head !=
532 SlotSpan::get_sentinel_slot_span());
533 EXPECT_TRUE(bucket->active_slot_spans_head->is_full());
534 return bucket->active_slot_spans_head;
535 }
536
CycleFreeCache(size_t size)537 void CycleFreeCache(size_t size) {
538 for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
539 void* ptr = allocator.root()->Alloc(size, type_name);
540 auto* slot_span =
541 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
542 auto* bucket = slot_span->bucket;
543 EXPECT_EQ(1u, bucket->active_slot_spans_head->num_allocated_slots);
544 allocator.root()->Free(ptr);
545 EXPECT_EQ(0u, bucket->active_slot_spans_head->num_allocated_slots);
546 EXPECT_TRUE(bucket->active_slot_spans_head->in_empty_cache() ||
547 bucket->active_slot_spans_head ==
548 SlotSpanMetadata::get_sentinel_slot_span());
549 }
550 }
551
552 enum ReturnNullTestMode {
553 kPartitionAlloc,
554 kPartitionRealloc,
555 };
556
DoReturnNullTest(size_t alloc_size,ReturnNullTestMode mode)557 void DoReturnNullTest(size_t alloc_size, ReturnNullTestMode mode) {
558 // TODO(crbug.com/678782): Where necessary and possible, disable the
559 // platform's OOM-killing behavior. OOM-killing makes this test flaky on
560 // low-memory devices.
561 if (!IsLargeMemoryDevice()) {
562 PA_LOG(WARNING)
563 << "Skipping test on this device because of crbug.com/678782";
564 PA_LOG(FATAL) << "Passed DoReturnNullTest";
565 }
566
567 ASSERT_TRUE(SetAddressSpaceLimit());
568
569 // Work out the number of allocations for 6 GB of memory.
570 const int num_allocations = (6 * 1024 * 1024) / (alloc_size / 1024);
571
572 void** ptrs = static_cast<void**>(
573 allocator.root()->Alloc(num_allocations * sizeof(void*), type_name));
574 int i;
575
576 for (i = 0; i < num_allocations; ++i) {
577 switch (mode) {
578 case kPartitionAlloc: {
579 ptrs[i] = allocator.root()->Alloc<AllocFlags::kReturnNull>(alloc_size,
580 type_name);
581 break;
582 }
583 case kPartitionRealloc: {
584 ptrs[i] =
585 allocator.root()->Alloc<AllocFlags::kReturnNull>(1, type_name);
586 ptrs[i] = allocator.root()->Realloc<AllocFlags::kReturnNull>(
587 ptrs[i], alloc_size, type_name);
588 break;
589 }
590 }
591
592 if (!i) {
593 EXPECT_TRUE(ptrs[0]);
594 }
595 if (!ptrs[i]) {
596 ptrs[i] = allocator.root()->Alloc<AllocFlags::kReturnNull>(alloc_size,
597 type_name);
598 EXPECT_FALSE(ptrs[i]);
599 break;
600 }
601 }
602
603 // We shouldn't succeed in allocating all 6 GB of memory. If we do, then
604 // we're not actually testing anything here.
605 EXPECT_LT(i, num_allocations);
606
607 // Free, reallocate and free again each block we allocated. We do this to
608 // check that freeing memory also works correctly after a failed allocation.
609 for (--i; i >= 0; --i) {
610 allocator.root()->Free(ptrs[i]);
611 ptrs[i] = allocator.root()->Alloc<AllocFlags::kReturnNull>(alloc_size,
612 type_name);
613 EXPECT_TRUE(ptrs[i]);
614 allocator.root()->Free(ptrs[i]);
615 }
616
617 allocator.root()->Free(ptrs);
618
619 EXPECT_TRUE(ClearAddressSpaceLimit());
620 PA_LOG(FATAL) << "Passed DoReturnNullTest";
621 }
622
623 void RunRefCountReallocSubtest(size_t orig_size, size_t new_size);
624
Alloc(size_t size)625 PA_NOINLINE PA_MALLOC_FN void* Alloc(size_t size) {
626 return allocator.root()->Alloc(size);
627 }
628
Free(void * ptr)629 PA_NOINLINE void Free(void* ptr) { allocator.root()->Free(ptr); }
630
GetBucketDistribution() const631 BucketDistribution GetBucketDistribution() const {
632 return GetParam().bucket_distribution;
633 }
634
UseThreadIsolatedPool() const635 bool UseThreadIsolatedPool() const { return GetParam().use_pkey_pool; }
UseBRPPool() const636 bool UseBRPPool() const { return allocator.root()->brp_enabled(); }
637
638 partition_alloc::PartitionAllocatorForTesting allocator;
639 partition_alloc::PartitionAllocatorForTesting aligned_allocator;
640 #if BUILDFLAG(ENABLE_PKEYS)
641 partition_alloc::PartitionAllocatorForTesting pkey_allocator;
642 #endif
643 size_t test_bucket_index_;
644
645 #if BUILDFLAG(ENABLE_PKEYS)
646 int pkey_ = kInvalidPkey;
647 #endif
648 };
649
650 // Death tests misbehave on Android, http://crbug.com/643760.
651 #if defined(GTEST_HAS_DEATH_TEST) && !BUILDFLAG(IS_ANDROID)
652 #define PA_HAS_DEATH_TESTS
653
654 class PartitionAllocDeathTest : public PartitionAllocTest {};
655
656 INSTANTIATE_TEST_SUITE_P(AlternateTestParams,
657 PartitionAllocDeathTest,
658 testing::ValuesIn(GetPartitionAllocTestParams()));
659
660 #endif
661
662 namespace {
663
FreeFullSlotSpan(PartitionRoot * root,SlotSpan * slot_span)664 void FreeFullSlotSpan(PartitionRoot* root, SlotSpan* slot_span) {
665 EXPECT_TRUE(slot_span->is_full());
666 size_t size = slot_span->bucket->slot_size;
667 size_t num_slots =
668 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
669 size;
670 EXPECT_EQ(num_slots, slot_span->num_allocated_slots);
671 uintptr_t address = SlotSpan::ToSlotSpanStart(slot_span);
672 size_t i;
673 for (i = 0; i < num_slots; ++i) {
674 root->Free(root->SlotStartToObject(address));
675 address += size;
676 }
677 EXPECT_TRUE(slot_span->is_empty());
678 }
679
680 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
CheckPageInCore(void * ptr,bool in_core)681 bool CheckPageInCore(void* ptr, bool in_core) {
682 unsigned char ret = 0;
683 EXPECT_EQ(0, mincore(ptr, SystemPageSize(), &ret));
684 return in_core == (ret & 1);
685 }
686
687 #define CHECK_PAGE_IN_CORE(ptr, in_core) \
688 EXPECT_TRUE(CheckPageInCore(ptr, in_core))
689 #else
690 #define CHECK_PAGE_IN_CORE(ptr, in_core) (void)(0)
691 #endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
692
693 class MockPartitionStatsDumper : public PartitionStatsDumper {
694 public:
695 MockPartitionStatsDumper() = default;
696
PartitionDumpTotals(const char * partition_name,const PartitionMemoryStats * stats)697 void PartitionDumpTotals(const char* partition_name,
698 const PartitionMemoryStats* stats) override {
699 EXPECT_GE(stats->total_mmapped_bytes, stats->total_resident_bytes);
700 EXPECT_EQ(total_resident_bytes, stats->total_resident_bytes);
701 EXPECT_EQ(total_active_bytes, stats->total_active_bytes);
702 EXPECT_EQ(total_decommittable_bytes, stats->total_decommittable_bytes);
703 EXPECT_EQ(total_discardable_bytes, stats->total_discardable_bytes);
704 }
705
PartitionsDumpBucketStats(const char * partition_name,const PartitionBucketMemoryStats * stats)706 void PartitionsDumpBucketStats(
707 [[maybe_unused]] const char* partition_name,
708 const PartitionBucketMemoryStats* stats) override {
709 EXPECT_TRUE(stats->is_valid);
710 EXPECT_EQ(0u, stats->bucket_slot_size & sizeof(void*));
711 bucket_stats.push_back(*stats);
712 total_resident_bytes += stats->resident_bytes;
713 total_active_bytes += stats->active_bytes;
714 total_decommittable_bytes += stats->decommittable_bytes;
715 total_discardable_bytes += stats->discardable_bytes;
716 }
717
IsMemoryAllocationRecorded()718 bool IsMemoryAllocationRecorded() {
719 return total_resident_bytes != 0 && total_active_bytes != 0;
720 }
721
GetBucketStats(size_t bucket_size)722 const PartitionBucketMemoryStats* GetBucketStats(size_t bucket_size) {
723 for (auto& stat : bucket_stats) {
724 if (stat.bucket_slot_size == bucket_size) {
725 return &stat;
726 }
727 }
728 return nullptr;
729 }
730
731 private:
732 size_t total_resident_bytes = 0;
733 size_t total_active_bytes = 0;
734 size_t total_decommittable_bytes = 0;
735 size_t total_discardable_bytes = 0;
736
737 std::vector<PartitionBucketMemoryStats> bucket_stats;
738 };
739
740 } // namespace
741
742 INSTANTIATE_TEST_SUITE_P(AlternateTestParams,
743 PartitionAllocTest,
744 testing::ValuesIn(GetPartitionAllocTestParams()));
745
746 // Check that the most basic of allocate / free pairs work.
TEST_P(PartitionAllocTest,Basic)747 TEST_P(PartitionAllocTest, Basic) {
748 PartitionRoot::Bucket* bucket =
749 &allocator.root()->buckets[test_bucket_index_];
750 auto* seed_slot_span = SlotSpan::get_sentinel_slot_span();
751
752 EXPECT_FALSE(bucket->empty_slot_spans_head);
753 EXPECT_FALSE(bucket->decommitted_slot_spans_head);
754 EXPECT_EQ(seed_slot_span, bucket->active_slot_spans_head);
755 EXPECT_EQ(nullptr, bucket->active_slot_spans_head->next_slot_span);
756
757 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
758 EXPECT_TRUE(ptr);
759 EXPECT_EQ(kPointerOffset, UntagPtr(ptr) & PartitionPageOffsetMask());
760 // Check that the offset appears to include a guard page.
761 EXPECT_EQ(PartitionPageSize() +
762 partition_alloc::internal::ReservedFreeSlotBitmapSize() +
763 kPointerOffset,
764 UntagPtr(ptr) & kSuperPageOffsetMask);
765
766 allocator.root()->Free(ptr);
767 // Expect that the last active slot span gets noticed as empty but doesn't get
768 // decommitted.
769 EXPECT_TRUE(bucket->empty_slot_spans_head);
770 EXPECT_FALSE(bucket->decommitted_slot_spans_head);
771 }
772
773 // Test multiple allocations, and freelist handling.
TEST_P(PartitionAllocTest,MultiAlloc)774 TEST_P(PartitionAllocTest, MultiAlloc) {
775 void* ptr1 = allocator.root()->Alloc(kTestAllocSize, type_name);
776 void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
777 EXPECT_TRUE(ptr1);
778 EXPECT_TRUE(ptr2);
779 ptrdiff_t diff = UntagPtr(ptr2) - UntagPtr(ptr1);
780 EXPECT_EQ(static_cast<ptrdiff_t>(RealAllocSize()), diff);
781
782 // Check that we re-use the just-freed slot.
783 allocator.root()->Free(ptr2);
784 ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
785 EXPECT_TRUE(ptr2);
786 diff = UntagPtr(ptr2) - UntagPtr(ptr1);
787 EXPECT_EQ(static_cast<ptrdiff_t>(RealAllocSize()), diff);
788 allocator.root()->Free(ptr1);
789 ptr1 = allocator.root()->Alloc(kTestAllocSize, type_name);
790 EXPECT_TRUE(ptr1);
791 diff = UntagPtr(ptr2) - UntagPtr(ptr1);
792 EXPECT_EQ(static_cast<ptrdiff_t>(RealAllocSize()), diff);
793
794 void* ptr3 = allocator.root()->Alloc(kTestAllocSize, type_name);
795 EXPECT_TRUE(ptr3);
796 diff = UntagPtr(ptr3) - UntagPtr(ptr1);
797 EXPECT_EQ(static_cast<ptrdiff_t>(RealAllocSize() * 2), diff);
798
799 allocator.root()->Free(ptr1);
800 allocator.root()->Free(ptr2);
801 allocator.root()->Free(ptr3);
802 }
803
804 // Test a bucket with multiple slot spans.
TEST_P(PartitionAllocTest,MultiSlotSpans)805 TEST_P(PartitionAllocTest, MultiSlotSpans) {
806 PartitionRoot::Bucket* bucket =
807 &allocator.root()->buckets[test_bucket_index_];
808
809 auto* slot_span = GetFullSlotSpan(kTestAllocSize);
810 FreeFullSlotSpan(allocator.root(), slot_span);
811 EXPECT_TRUE(bucket->empty_slot_spans_head);
812 EXPECT_EQ(SlotSpan::get_sentinel_slot_span(), bucket->active_slot_spans_head);
813 EXPECT_EQ(nullptr, slot_span->next_slot_span);
814 EXPECT_EQ(0u, slot_span->num_allocated_slots);
815
816 slot_span = GetFullSlotSpan(kTestAllocSize);
817 auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
818
819 EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
820 EXPECT_EQ(nullptr, slot_span2->next_slot_span);
821 EXPECT_EQ(SlotSpan::ToSlotSpanStart(slot_span) & kSuperPageBaseMask,
822 SlotSpan::ToSlotSpanStart(slot_span2) & kSuperPageBaseMask);
823
824 // Fully free the non-current slot span. This will leave us with no current
825 // active slot span because one is empty and the other is full.
826 FreeFullSlotSpan(allocator.root(), slot_span);
827 EXPECT_EQ(0u, slot_span->num_allocated_slots);
828 EXPECT_TRUE(bucket->empty_slot_spans_head);
829 EXPECT_EQ(SlotSpanMetadata::get_sentinel_slot_span(),
830 bucket->active_slot_spans_head);
831
832 // Allocate a new slot span, it should pull from the freelist.
833 slot_span = GetFullSlotSpan(kTestAllocSize);
834 EXPECT_FALSE(bucket->empty_slot_spans_head);
835 EXPECT_EQ(slot_span, bucket->active_slot_spans_head);
836
837 FreeFullSlotSpan(allocator.root(), slot_span);
838 FreeFullSlotSpan(allocator.root(), slot_span2);
839 EXPECT_EQ(0u, slot_span->num_allocated_slots);
840 EXPECT_EQ(0u, slot_span2->num_allocated_slots);
841 EXPECT_EQ(0u, slot_span2->num_unprovisioned_slots);
842 EXPECT_TRUE(slot_span2->in_empty_cache());
843 }
844
845 // Test some finer aspects of internal slot span transitions.
TEST_P(PartitionAllocTest,SlotSpanTransitions)846 TEST_P(PartitionAllocTest, SlotSpanTransitions) {
847 PartitionRoot::Bucket* bucket =
848 &allocator.root()->buckets[test_bucket_index_];
849
850 auto* slot_span1 = GetFullSlotSpan(kTestAllocSize);
851 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
852 EXPECT_EQ(nullptr, slot_span1->next_slot_span);
853 auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
854 EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
855 EXPECT_EQ(nullptr, slot_span2->next_slot_span);
856
857 // Bounce slot_span1 back into the non-full list then fill it up again.
858 void* ptr = allocator.root()->SlotStartToObject(
859 SlotSpan::ToSlotSpanStart(slot_span1));
860 allocator.root()->Free(ptr);
861 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
862 std::ignore = allocator.root()->Alloc(kTestAllocSize, type_name);
863 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
864 EXPECT_EQ(slot_span2, bucket->active_slot_spans_head->next_slot_span);
865
866 // Allocating another slot span at this point should cause us to scan over
867 // slot_span1 (which is both full and NOT our current slot span), and evict it
868 // from the freelist. Older code had a O(n^2) condition due to failure to do
869 // this.
870 auto* slot_span3 = GetFullSlotSpan(kTestAllocSize);
871 EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
872 EXPECT_EQ(nullptr, slot_span3->next_slot_span);
873
874 // Work out a pointer into slot_span2 and free it.
875 ptr = allocator.root()->SlotStartToObject(
876 SlotSpan::ToSlotSpanStart(slot_span2));
877 allocator.root()->Free(ptr);
878 // Trying to allocate at this time should cause us to cycle around to
879 // slot_span2 and find the recently freed slot.
880 void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
881 PA_EXPECT_PTR_EQ(ptr, ptr2);
882 EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
883 EXPECT_EQ(slot_span3, slot_span2->next_slot_span);
884
885 // Work out a pointer into slot_span1 and free it. This should pull the slot
886 // span back into the list of available slot spans.
887 ptr = allocator.root()->SlotStartToObject(
888 SlotSpan::ToSlotSpanStart(slot_span1));
889 allocator.root()->Free(ptr);
890 // This allocation should be satisfied by slot_span1.
891 ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
892 PA_EXPECT_PTR_EQ(ptr, ptr2);
893 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
894 EXPECT_EQ(slot_span2, slot_span1->next_slot_span);
895
896 FreeFullSlotSpan(allocator.root(), slot_span3);
897 FreeFullSlotSpan(allocator.root(), slot_span2);
898 FreeFullSlotSpan(allocator.root(), slot_span1);
899
900 // Allocating whilst in this state exposed a bug, so keep the test.
901 ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
902 allocator.root()->Free(ptr);
903 }
904
905 // Test that ExtraAllocSize() is exactly what PA takes away from the slot for
906 // extras.
TEST_P(PartitionAllocTest,ExtraAllocSize)907 TEST_P(PartitionAllocTest, ExtraAllocSize) {
908 // There is a bucket with a slot size exactly that (asserted below).
909 size_t slot_size = 64;
910 size_t bucket_index =
911 allocator.root()->SizeToBucketIndex(slot_size, GetBucketDistribution());
912 PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
913 ASSERT_EQ(bucket->slot_size, slot_size);
914
915 // The first allocation is expected to span exactly the capcity of the slot.
916 // The second one should overflow into a higher-size slot, and not fill its
917 // capacity.
918 size_t requested_size1 = slot_size - ExtraAllocSize(allocator);
919 size_t requested_size2 = requested_size1 + 1;
920 void* ptr1 = allocator.root()->Alloc(requested_size1);
921 void* ptr2 = allocator.root()->Alloc(requested_size2);
922 size_t capacity1 = allocator.root()->AllocationCapacityFromSlotStart(
923 allocator.root()->ObjectToSlotStart(ptr1));
924 size_t capacity2 = allocator.root()->AllocationCapacityFromSlotStart(
925 allocator.root()->ObjectToSlotStart(ptr2));
926 EXPECT_EQ(capacity1, requested_size1);
927 EXPECT_LT(capacity1, capacity2);
928 EXPECT_LT(requested_size2, capacity2);
929 allocator.root()->Free(ptr1);
930 allocator.root()->Free(ptr2);
931 }
932
TEST_P(PartitionAllocTest,PreferSlotSpansWithProvisionedEntries)933 TEST_P(PartitionAllocTest, PreferSlotSpansWithProvisionedEntries) {
934 size_t size = SystemPageSize() - ExtraAllocSize(allocator);
935 size_t real_size = size + ExtraAllocSize(allocator);
936 size_t bucket_index =
937 allocator.root()->SizeToBucketIndex(real_size, GetBucketDistribution());
938 PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
939 ASSERT_EQ(bucket->slot_size, real_size);
940 size_t slots_per_span = bucket->num_system_pages_per_slot_span;
941
942 // Make 10 full slot spans.
943 constexpr int kSpans = 10;
944 std::vector<std::vector<void*>> allocated_memory_spans(kSpans);
945 for (int span_index = 0; span_index < kSpans; span_index++) {
946 for (size_t i = 0; i < slots_per_span; i++) {
947 allocated_memory_spans[span_index].push_back(
948 allocator.root()->Alloc(size));
949 }
950 }
951
952 // Reverse ordering, since a newly non-full span is placed at the head of the
953 // active list.
954 for (int span_index = kSpans - 1; span_index >= 0; span_index--) {
955 allocator.root()->Free(allocated_memory_spans[span_index].back());
956 allocated_memory_spans[span_index].pop_back();
957 }
958
959 // Since slot spans are large enough and we freed memory from the end, the
960 // slot spans become partially provisioned after PurgeMemory().
961 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
962 PurgeFlags::kDiscardUnusedSystemPages);
963 std::vector<SlotSpanMetadata*> active_slot_spans;
964 for (auto* span = bucket->active_slot_spans_head; span;
965 span = span->next_slot_span) {
966 active_slot_spans.push_back(span);
967 ASSERT_EQ(span->num_unprovisioned_slots, 1u);
968 // But no freelist entries.
969 ASSERT_FALSE(span->get_freelist_head());
970 }
971
972 // Free one entry in the middle span, creating a freelist entry.
973 constexpr size_t kSpanIndex = 5;
974 allocator.root()->Free(allocated_memory_spans[kSpanIndex].back());
975 allocated_memory_spans[kSpanIndex].pop_back();
976
977 ASSERT_TRUE(active_slot_spans[kSpanIndex]->get_freelist_head());
978 ASSERT_FALSE(bucket->active_slot_spans_head->get_freelist_head());
979
980 // It must come from the middle slot span even though the first one has
981 // unprovisioned space.
982 void* new_ptr = allocator.root()->Alloc(size);
983
984 // Comes from the middle slot span, since it has a freelist entry.
985 auto* new_active_slot_span = active_slot_spans[kSpanIndex];
986 ASSERT_FALSE(new_active_slot_span->get_freelist_head());
987
988 // The middle slot span was moved to the front.
989 active_slot_spans.erase(active_slot_spans.begin() + kSpanIndex);
990 active_slot_spans.insert(active_slot_spans.begin(), new_active_slot_span);
991
992 // Check slot span ordering.
993 int index = 0;
994 for (auto* span = bucket->active_slot_spans_head; span;
995 span = span->next_slot_span) {
996 EXPECT_EQ(span, active_slot_spans[index]);
997 index++;
998 }
999 EXPECT_EQ(index, kSpans);
1000
1001 allocator.root()->Free(new_ptr);
1002 for (int span_index = 0; span_index < kSpans; span_index++) {
1003 for (void* ptr : allocated_memory_spans[span_index]) {
1004 allocator.root()->Free(ptr);
1005 }
1006 }
1007 }
1008
1009 // Test some corner cases relating to slot span transitions in the internal
1010 // free slot span list metadata bucket.
TEST_P(PartitionAllocTest,FreeSlotSpanListSlotSpanTransitions)1011 TEST_P(PartitionAllocTest, FreeSlotSpanListSlotSpanTransitions) {
1012 PartitionRoot::Bucket* bucket =
1013 &allocator.root()->buckets[test_bucket_index_];
1014
1015 size_t num_to_fill_free_list_slot_span =
1016 PartitionPageSize() / (sizeof(SlotSpan) + ExtraAllocSize(allocator));
1017 // The +1 is because we need to account for the fact that the current slot
1018 // span never gets thrown on the freelist.
1019 ++num_to_fill_free_list_slot_span;
1020 auto slot_spans =
1021 std::make_unique<SlotSpan*[]>(num_to_fill_free_list_slot_span);
1022
1023 size_t i;
1024 for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
1025 slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
1026 }
1027 EXPECT_EQ(slot_spans[num_to_fill_free_list_slot_span - 1],
1028 bucket->active_slot_spans_head);
1029 for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
1030 FreeFullSlotSpan(allocator.root(), slot_spans[i]);
1031 }
1032 EXPECT_EQ(SlotSpan::get_sentinel_slot_span(), bucket->active_slot_spans_head);
1033 EXPECT_TRUE(bucket->empty_slot_spans_head);
1034
1035 // Allocate / free in a different bucket size so we get control of a
1036 // different free slot span list. We need two slot spans because one will be
1037 // the last active slot span and not get freed.
1038 auto* slot_span1 = GetFullSlotSpan(kTestAllocSize * 2);
1039 auto* slot_span2 = GetFullSlotSpan(kTestAllocSize * 2);
1040 FreeFullSlotSpan(allocator.root(), slot_span1);
1041 FreeFullSlotSpan(allocator.root(), slot_span2);
1042
1043 for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
1044 slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
1045 }
1046 EXPECT_EQ(slot_spans[num_to_fill_free_list_slot_span - 1],
1047 bucket->active_slot_spans_head);
1048
1049 for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
1050 FreeFullSlotSpan(allocator.root(), slot_spans[i]);
1051 }
1052 EXPECT_EQ(SlotSpan::get_sentinel_slot_span(), bucket->active_slot_spans_head);
1053 EXPECT_TRUE(bucket->empty_slot_spans_head);
1054 }
1055
1056 // Test a large series of allocations that cross more than one underlying
1057 // super page.
TEST_P(PartitionAllocTest,MultiPageAllocs)1058 TEST_P(PartitionAllocTest, MultiPageAllocs) {
1059 size_t num_pages_per_slot_span = GetNumPagesPerSlotSpan(kTestAllocSize);
1060 // 1 super page has 2 guard partition pages and a tag bitmap.
1061 size_t num_slot_spans_needed =
1062 (NumPartitionPagesPerSuperPage() - 2 -
1063 partition_alloc::internal::NumPartitionPagesPerFreeSlotBitmap()) /
1064 num_pages_per_slot_span;
1065
1066 // We need one more slot span in order to cross super page boundary.
1067 ++num_slot_spans_needed;
1068
1069 EXPECT_GT(num_slot_spans_needed, 1u);
1070 auto slot_spans = std::make_unique<SlotSpan*[]>(num_slot_spans_needed);
1071 uintptr_t first_super_page_base = 0;
1072 size_t i;
1073 for (i = 0; i < num_slot_spans_needed; ++i) {
1074 slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
1075 uintptr_t slot_span_start = SlotSpan::ToSlotSpanStart(slot_spans[i]);
1076 if (!i) {
1077 first_super_page_base = slot_span_start & kSuperPageBaseMask;
1078 }
1079 if (i == num_slot_spans_needed - 1) {
1080 uintptr_t second_super_page_base = slot_span_start & kSuperPageBaseMask;
1081 uintptr_t second_super_page_offset =
1082 slot_span_start & kSuperPageOffsetMask;
1083 EXPECT_FALSE(second_super_page_base == first_super_page_base);
1084 // Check that we allocated a guard page and the reserved tag bitmap for
1085 // the second page.
1086 EXPECT_EQ(PartitionPageSize() +
1087 partition_alloc::internal::ReservedFreeSlotBitmapSize(),
1088 second_super_page_offset);
1089 }
1090 }
1091 for (i = 0; i < num_slot_spans_needed; ++i) {
1092 FreeFullSlotSpan(allocator.root(), slot_spans[i]);
1093 }
1094 }
1095
1096 // Test the generic allocation functions that can handle arbitrary sizes and
1097 // reallocing etc.
TEST_P(PartitionAllocTest,Alloc)1098 TEST_P(PartitionAllocTest, Alloc) {
1099 void* ptr = allocator.root()->Alloc(1, type_name);
1100 EXPECT_TRUE(ptr);
1101 allocator.root()->Free(ptr);
1102 ptr = allocator.root()->Alloc(kMaxBucketed + 1, type_name);
1103 EXPECT_TRUE(ptr);
1104 allocator.root()->Free(ptr);
1105
1106 // To make both alloc(x + 1) and alloc(x + kSmallestBucket) to allocate from
1107 // the same bucket, partition_alloc::internal::base::bits::AlignUp(1 + x +
1108 // ExtraAllocSize(allocator), kAlignment)
1109 // == partition_alloc::internal::base::bits::AlignUp(kSmallestBucket + x +
1110 // ExtraAllocSize(allocator), kAlignment), because slot_size is multiples of
1111 // kAlignment. So (x + ExtraAllocSize(allocator)) must be multiples of
1112 // kAlignment. x =
1113 // partition_alloc::internal::base::bits::AlignUp(ExtraAllocSize(allocator),
1114 // kAlignment) - ExtraAllocSize(allocator);
1115 size_t base_size = partition_alloc::internal::base::bits::AlignUp(
1116 ExtraAllocSize(allocator), kAlignment) -
1117 ExtraAllocSize(allocator);
1118 ptr = allocator.root()->Alloc(base_size + 1, type_name);
1119 EXPECT_TRUE(ptr);
1120 void* orig_ptr = ptr;
1121 char* char_ptr = static_cast<char*>(ptr);
1122 *char_ptr = 'A';
1123
1124 // Change the size of the realloc, remaining inside the same bucket.
1125 void* new_ptr = allocator.root()->Realloc(ptr, base_size + 2, type_name);
1126 PA_EXPECT_PTR_EQ(ptr, new_ptr);
1127 new_ptr = allocator.root()->Realloc(ptr, base_size + 1, type_name);
1128 PA_EXPECT_PTR_EQ(ptr, new_ptr);
1129 new_ptr =
1130 allocator.root()->Realloc(ptr, base_size + kSmallestBucket, type_name);
1131 PA_EXPECT_PTR_EQ(ptr, new_ptr);
1132
1133 // Change the size of the realloc, switching buckets.
1134 new_ptr = allocator.root()->Realloc(ptr, base_size + kSmallestBucket + 1,
1135 type_name);
1136 PA_EXPECT_PTR_NE(new_ptr, ptr);
1137 // Check that the realloc copied correctly.
1138 char* new_char_ptr = static_cast<char*>(new_ptr);
1139 EXPECT_EQ(*new_char_ptr, 'A');
1140 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
1141 // Subtle: this checks for an old bug where we copied too much from the
1142 // source of the realloc. The condition can be detected by a trashing of
1143 // the uninitialized value in the space of the upsized allocation.
1144 EXPECT_EQ(kUninitializedByte,
1145 static_cast<unsigned char>(*(new_char_ptr + kSmallestBucket)));
1146 #endif
1147 *new_char_ptr = 'B';
1148 // The realloc moved. To check that the old allocation was freed, we can
1149 // do an alloc of the old allocation size and check that the old allocation
1150 // address is at the head of the freelist and reused.
1151 void* reused_ptr = allocator.root()->Alloc(base_size + 1, type_name);
1152 PA_EXPECT_PTR_EQ(reused_ptr, orig_ptr);
1153 allocator.root()->Free(reused_ptr);
1154
1155 // Downsize the realloc.
1156 ptr = new_ptr;
1157 new_ptr = allocator.root()->Realloc(ptr, base_size + 1, type_name);
1158 PA_EXPECT_PTR_EQ(new_ptr, orig_ptr);
1159 new_char_ptr = static_cast<char*>(new_ptr);
1160 EXPECT_EQ(*new_char_ptr, 'B');
1161 *new_char_ptr = 'C';
1162
1163 // Upsize the realloc to outside the partition.
1164 ptr = new_ptr;
1165 new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed + 1, type_name);
1166 PA_EXPECT_PTR_NE(new_ptr, ptr);
1167 new_char_ptr = static_cast<char*>(new_ptr);
1168 EXPECT_EQ(*new_char_ptr, 'C');
1169 *new_char_ptr = 'D';
1170
1171 // Upsize and downsize the realloc, remaining outside the partition.
1172 ptr = new_ptr;
1173 new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed * 10, type_name);
1174 new_char_ptr = static_cast<char*>(new_ptr);
1175 EXPECT_EQ(*new_char_ptr, 'D');
1176 *new_char_ptr = 'E';
1177 ptr = new_ptr;
1178 new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed * 2, type_name);
1179 new_char_ptr = static_cast<char*>(new_ptr);
1180 EXPECT_EQ(*new_char_ptr, 'E');
1181 *new_char_ptr = 'F';
1182
1183 // Downsize the realloc to inside the partition.
1184 ptr = new_ptr;
1185 new_ptr = allocator.root()->Realloc(ptr, base_size + 1, type_name);
1186 PA_EXPECT_PTR_NE(new_ptr, ptr);
1187 PA_EXPECT_PTR_EQ(new_ptr, orig_ptr);
1188 new_char_ptr = static_cast<char*>(new_ptr);
1189 EXPECT_EQ(*new_char_ptr, 'F');
1190
1191 allocator.root()->Free(new_ptr);
1192 }
1193
1194 // Test the generic allocation functions can handle some specific sizes of
1195 // interest.
TEST_P(PartitionAllocTest,AllocSizes)1196 TEST_P(PartitionAllocTest, AllocSizes) {
1197 {
1198 void* ptr = allocator.root()->Alloc(0, type_name);
1199 EXPECT_TRUE(ptr);
1200 allocator.root()->Free(ptr);
1201 }
1202
1203 {
1204 // PartitionPageSize() is interesting because it results in just one
1205 // allocation per page, which tripped up some corner cases.
1206 const size_t size = PartitionPageSize() - ExtraAllocSize(allocator);
1207 void* ptr = allocator.root()->Alloc(size, type_name);
1208 EXPECT_TRUE(ptr);
1209 void* ptr2 = allocator.root()->Alloc(size, type_name);
1210 EXPECT_TRUE(ptr2);
1211 allocator.root()->Free(ptr);
1212 // Should be freeable at this point.
1213 auto* slot_span =
1214 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1215 EXPECT_TRUE(slot_span->in_empty_cache());
1216 allocator.root()->Free(ptr2);
1217 }
1218
1219 {
1220 // Single-slot slot span size.
1221 const size_t size =
1222 PartitionPageSize() * kMaxPartitionPagesPerRegularSlotSpan + 1;
1223
1224 void* ptr = allocator.root()->Alloc(size, type_name);
1225 EXPECT_TRUE(ptr);
1226 memset(ptr, 'A', size);
1227 void* ptr2 = allocator.root()->Alloc(size, type_name);
1228 EXPECT_TRUE(ptr2);
1229 void* ptr3 = allocator.root()->Alloc(size, type_name);
1230 EXPECT_TRUE(ptr3);
1231 void* ptr4 = allocator.root()->Alloc(size, type_name);
1232 EXPECT_TRUE(ptr4);
1233
1234 auto* slot_span = SlotSpanMetadata::FromSlotStart(
1235 allocator.root()->ObjectToSlotStart(ptr));
1236 auto* slot_span2 =
1237 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr3));
1238 EXPECT_NE(slot_span, slot_span2);
1239
1240 allocator.root()->Free(ptr);
1241 allocator.root()->Free(ptr3);
1242 allocator.root()->Free(ptr2);
1243 // Should be freeable at this point.
1244 EXPECT_TRUE(slot_span->in_empty_cache());
1245 EXPECT_EQ(0u, slot_span->num_allocated_slots);
1246 EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
1247 void* new_ptr_1 = allocator.root()->Alloc(size, type_name);
1248 PA_EXPECT_PTR_EQ(ptr2, new_ptr_1);
1249 void* new_ptr_2 = allocator.root()->Alloc(size, type_name);
1250 PA_EXPECT_PTR_EQ(ptr3, new_ptr_2);
1251
1252 allocator.root()->Free(new_ptr_1);
1253 allocator.root()->Free(new_ptr_2);
1254 allocator.root()->Free(ptr4);
1255
1256 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
1257 // |SlotSpanMetadata::Free| must poison the slot's contents with
1258 // |kFreedByte|.
1259 EXPECT_EQ(kFreedByte,
1260 *(static_cast<unsigned char*>(new_ptr_1) + (size - 1)));
1261 #endif
1262 }
1263
1264 // Can we allocate a massive (128MB) size?
1265 // Add +1, to test for cookie writing alignment issues.
1266 // Test this only if the device has enough memory or it might fail due
1267 // to OOM.
1268 if (IsLargeMemoryDevice()) {
1269 void* ptr = allocator.root()->Alloc(128 * 1024 * 1024 + 1, type_name);
1270 allocator.root()->Free(ptr);
1271 }
1272
1273 {
1274 // Check a more reasonable, but still direct mapped, size.
1275 // Chop a system page and a byte off to test for rounding errors.
1276 size_t size = 20 * 1024 * 1024;
1277 ASSERT_GT(size, kMaxBucketed);
1278 size -= SystemPageSize();
1279 size -= 1;
1280 void* ptr = allocator.root()->Alloc(size, type_name);
1281 char* char_ptr = static_cast<char*>(ptr);
1282 *(char_ptr + (size - 1)) = 'A';
1283 allocator.root()->Free(ptr);
1284
1285 // Can we free null?
1286 allocator.root()->Free(nullptr);
1287
1288 // Do we correctly get a null for a failed allocation?
1289 EXPECT_EQ(nullptr, allocator.root()->Alloc<AllocFlags::kReturnNull>(
1290 3u * 1024 * 1024 * 1024, type_name));
1291 }
1292 }
1293
1294 // Test that we can fetch the real allocated size after an allocation.
TEST_P(PartitionAllocTest,AllocGetSizeAndStart)1295 TEST_P(PartitionAllocTest, AllocGetSizeAndStart) {
1296 void* ptr;
1297 size_t requested_size, actual_capacity, predicted_capacity;
1298
1299 // Allocate something small.
1300 requested_size = 511 - ExtraAllocSize(allocator);
1301 predicted_capacity =
1302 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1303 ptr = allocator.root()->Alloc(requested_size, type_name);
1304 EXPECT_TRUE(ptr);
1305 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
1306 actual_capacity =
1307 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1308 EXPECT_EQ(predicted_capacity, actual_capacity);
1309 EXPECT_LT(requested_size, actual_capacity);
1310 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1311 if (UseBRPPool()) {
1312 uintptr_t address = UntagPtr(ptr);
1313 for (size_t offset = 0; offset < requested_size; ++offset) {
1314 EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
1315 slot_start);
1316 }
1317 }
1318 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1319 allocator.root()->Free(ptr);
1320
1321 // Allocate a size that should be a perfect match for a bucket, because it
1322 // is an exact power of 2.
1323 requested_size = (256 * 1024) - ExtraAllocSize(allocator);
1324 predicted_capacity =
1325 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1326 ptr = allocator.root()->Alloc(requested_size, type_name);
1327 EXPECT_TRUE(ptr);
1328 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1329 actual_capacity =
1330 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1331 EXPECT_EQ(predicted_capacity, actual_capacity);
1332 EXPECT_EQ(requested_size, actual_capacity);
1333 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1334 if (UseBRPPool()) {
1335 uintptr_t address = UntagPtr(ptr);
1336 for (size_t offset = 0; offset < requested_size; offset += 877) {
1337 EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
1338 slot_start);
1339 }
1340 }
1341 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1342 allocator.root()->Free(ptr);
1343
1344 // Allocate a size that is a system page smaller than a bucket.
1345 // AllocationCapacityFromSlotStart() should return a larger size than we asked
1346 // for now.
1347 size_t num = 64;
1348 while (num * SystemPageSize() >= 1024 * 1024) {
1349 num /= 2;
1350 }
1351 requested_size =
1352 num * SystemPageSize() - SystemPageSize() - ExtraAllocSize(allocator);
1353 predicted_capacity =
1354 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1355 ptr = allocator.root()->Alloc(requested_size, type_name);
1356 EXPECT_TRUE(ptr);
1357 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1358 actual_capacity =
1359 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1360 EXPECT_EQ(predicted_capacity, actual_capacity);
1361 EXPECT_EQ(requested_size + SystemPageSize(), actual_capacity);
1362 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1363 if (UseBRPPool()) {
1364 uintptr_t address = UntagPtr(ptr);
1365 for (size_t offset = 0; offset < requested_size; offset += 4999) {
1366 EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
1367 slot_start);
1368 }
1369 }
1370 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1371 allocator.root()->Free(ptr);
1372
1373 // Allocate the maximum allowed bucketed size.
1374 requested_size = kMaxBucketed - ExtraAllocSize(allocator);
1375 predicted_capacity =
1376 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1377 ptr = allocator.root()->Alloc(requested_size, type_name);
1378 EXPECT_TRUE(ptr);
1379 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1380 actual_capacity =
1381 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1382 EXPECT_EQ(predicted_capacity, actual_capacity);
1383 EXPECT_EQ(requested_size, actual_capacity);
1384 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1385 if (UseBRPPool()) {
1386 uintptr_t address = UntagPtr(ptr);
1387 for (size_t offset = 0; offset < requested_size; offset += 4999) {
1388 EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
1389 slot_start);
1390 }
1391 }
1392 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1393
1394 // Check that we can write at the end of the reported size too.
1395 char* char_ptr = static_cast<char*>(ptr);
1396 *(char_ptr + (actual_capacity - 1)) = 'A';
1397 allocator.root()->Free(ptr);
1398
1399 // Allocate something very large, and uneven.
1400 if (IsLargeMemoryDevice()) {
1401 requested_size = 128 * 1024 * 1024 - 33;
1402 predicted_capacity =
1403 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1404 ptr = allocator.root()->Alloc(requested_size, type_name);
1405 EXPECT_TRUE(ptr);
1406 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1407 actual_capacity =
1408 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1409 EXPECT_EQ(predicted_capacity, actual_capacity);
1410
1411 EXPECT_LT(requested_size, actual_capacity);
1412
1413 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1414 if (UseBRPPool()) {
1415 uintptr_t address = UntagPtr(ptr);
1416 for (size_t offset = 0; offset < requested_size; offset += 16111) {
1417 EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
1418 slot_start);
1419 }
1420 }
1421 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1422 allocator.root()->Free(ptr);
1423 }
1424
1425 // Too large allocation.
1426 requested_size = MaxDirectMapped() + 1;
1427 predicted_capacity =
1428 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1429 EXPECT_EQ(requested_size, predicted_capacity);
1430 }
1431
1432 #if PA_CONFIG(HAS_MEMORY_TAGGING)
TEST_P(PartitionAllocTest,MTEProtectsFreedPtr)1433 TEST_P(PartitionAllocTest, MTEProtectsFreedPtr) {
1434 // This test checks that Arm's memory tagging extension (MTE) is correctly
1435 // protecting freed pointers.
1436 base::CPU cpu;
1437 if (!cpu.has_mte()) {
1438 // This test won't pass without MTE support.
1439 GTEST_SKIP();
1440 }
1441
1442 // Create an arbitrarily-sized small allocation.
1443 size_t alloc_size = 64 - ExtraAllocSize(allocator);
1444 uint64_t* ptr1 =
1445 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
1446 EXPECT_TRUE(ptr1);
1447
1448 // Invalidate the pointer by freeing it.
1449 allocator.root()->Free(ptr1);
1450
1451 // When we immediately reallocate a pointer, we should see the same allocation
1452 // slot but with a different tag (PA_EXPECT_PTR_EQ ignores the MTE tag).
1453 uint64_t* ptr2 =
1454 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
1455 PA_EXPECT_PTR_EQ(ptr1, ptr2);
1456 // The different tag bits mean that ptr1 is not the same as ptr2.
1457 EXPECT_NE(ptr1, ptr2);
1458
1459 // When we free again, we expect a new tag for that area that's different from
1460 // ptr1 and ptr2.
1461 allocator.root()->Free(ptr2);
1462 uint64_t* ptr3 =
1463 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
1464 PA_EXPECT_PTR_EQ(ptr2, ptr3);
1465 EXPECT_NE(ptr1, ptr3);
1466 EXPECT_NE(ptr2, ptr3);
1467
1468 // We don't check anything about ptr3, but we do clean it up to avoid DCHECKs.
1469 allocator.root()->Free(ptr3);
1470 }
1471 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
1472
1473 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
TEST_P(PartitionAllocTest,IsPtrWithinSameAlloc)1474 TEST_P(PartitionAllocTest, IsPtrWithinSameAlloc) {
1475 if (!UseBRPPool()) {
1476 return;
1477 }
1478
1479 const size_t kMinReasonableTestSize =
1480 partition_alloc::internal::base::bits::AlignUp(
1481 ExtraAllocSize(allocator) + 1, kAlignment);
1482 ASSERT_GT(kMinReasonableTestSize, ExtraAllocSize(allocator));
1483 const size_t kSizes[] = {kMinReasonableTestSize,
1484 256,
1485 SystemPageSize(),
1486 PartitionPageSize(),
1487 MaxRegularSlotSpanSize(),
1488 MaxRegularSlotSpanSize() + 1,
1489 MaxRegularSlotSpanSize() + SystemPageSize(),
1490 MaxRegularSlotSpanSize() + PartitionPageSize(),
1491 kMaxBucketed,
1492 kMaxBucketed + 1,
1493 kMaxBucketed + SystemPageSize(),
1494 kMaxBucketed + PartitionPageSize(),
1495 kSuperPageSize};
1496 #if BUILDFLAG(HAS_64_BIT_POINTERS)
1497 constexpr size_t kFarFarAwayDelta = 512 * kGiB;
1498 #else
1499 constexpr size_t kFarFarAwayDelta = kGiB;
1500 #endif
1501 for (size_t size : kSizes) {
1502 size_t requested_size = size - ExtraAllocSize(allocator);
1503 // For regular slot-span allocations, confirm the size fills the entire
1504 // slot. Otherwise the test would be ineffective, as Partition Alloc has no
1505 // ability to check against the actual allocated size.
1506 // Single-slot slot-spans and direct map don't have that problem.
1507 if (size <= MaxRegularSlotSpanSize()) {
1508 ASSERT_EQ(requested_size,
1509 allocator.root()->AllocationCapacityFromRequestedSize(
1510 requested_size));
1511 }
1512
1513 constexpr size_t kNumRepeats = 3;
1514 void* ptrs[kNumRepeats];
1515 for (void*& ptr : ptrs) {
1516 ptr = allocator.root()->Alloc(requested_size, type_name);
1517 // Double check.
1518 if (size <= MaxRegularSlotSpanSize()) {
1519 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
1520 EXPECT_EQ(
1521 requested_size,
1522 allocator.root()->AllocationCapacityFromSlotStart(slot_start));
1523 }
1524
1525 uintptr_t address = UntagPtr(ptr);
1526 EXPECT_EQ(IsPtrWithinSameAlloc(address, address - kFarFarAwayDelta, 0u),
1527 PtrPosWithinAlloc::kFarOOB);
1528 EXPECT_EQ(IsPtrWithinSameAlloc(address, address - kSuperPageSize, 0u),
1529 PtrPosWithinAlloc::kFarOOB);
1530 EXPECT_EQ(IsPtrWithinSameAlloc(address, address - 1, 0u),
1531 PtrPosWithinAlloc::kFarOOB);
1532 EXPECT_EQ(IsPtrWithinSameAlloc(address, address, 0u),
1533 PtrPosWithinAlloc::kInBounds);
1534 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size / 2, 0u),
1535 PtrPosWithinAlloc::kInBounds);
1536 #if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
1537 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size - 1, 1u),
1538 PtrPosWithinAlloc::kInBounds);
1539 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size, 1u),
1540 PtrPosWithinAlloc::kAllocEnd);
1541 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size - 4, 4u),
1542 PtrPosWithinAlloc::kInBounds);
1543 for (size_t subtrahend = 0; subtrahend < 4; subtrahend++) {
1544 EXPECT_EQ(IsPtrWithinSameAlloc(
1545 address, address + requested_size - subtrahend, 4u),
1546 PtrPosWithinAlloc::kAllocEnd);
1547 }
1548 #else // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
1549 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size, 0u),
1550 PtrPosWithinAlloc::kInBounds);
1551 #endif
1552 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size + 1, 0u),
1553 PtrPosWithinAlloc::kFarOOB);
1554 EXPECT_EQ(IsPtrWithinSameAlloc(
1555 address, address + requested_size + kSuperPageSize, 0u),
1556 PtrPosWithinAlloc::kFarOOB);
1557 EXPECT_EQ(IsPtrWithinSameAlloc(
1558 address, address + requested_size + kFarFarAwayDelta, 0u),
1559 PtrPosWithinAlloc::kFarOOB);
1560 EXPECT_EQ(
1561 IsPtrWithinSameAlloc(address + requested_size,
1562 address + requested_size + kFarFarAwayDelta, 0u),
1563 PtrPosWithinAlloc::kFarOOB);
1564 EXPECT_EQ(
1565 IsPtrWithinSameAlloc(address + requested_size,
1566 address + requested_size + kSuperPageSize, 0u),
1567 PtrPosWithinAlloc::kFarOOB);
1568 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
1569 address + requested_size + 1, 0u),
1570 PtrPosWithinAlloc::kFarOOB);
1571 #if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
1572 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size - 1,
1573 address + requested_size - 1, 1u),
1574 PtrPosWithinAlloc::kInBounds);
1575 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size - 1,
1576 address + requested_size, 1u),
1577 PtrPosWithinAlloc::kAllocEnd);
1578 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
1579 address + requested_size, 1u),
1580 PtrPosWithinAlloc::kAllocEnd);
1581 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size - 4,
1582 address + requested_size - 4, 4u),
1583 PtrPosWithinAlloc::kInBounds);
1584 for (size_t addend = 1; addend < 4; addend++) {
1585 EXPECT_EQ(
1586 IsPtrWithinSameAlloc(address + requested_size - 4,
1587 address + requested_size - 4 + addend, 4u),
1588 PtrPosWithinAlloc::kAllocEnd);
1589 }
1590 #else // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
1591 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
1592 address + requested_size, 0u),
1593 PtrPosWithinAlloc::kInBounds);
1594 #endif
1595 EXPECT_EQ(IsPtrWithinSameAlloc(
1596 address + requested_size,
1597 address + requested_size - (requested_size / 2), 0u),
1598 PtrPosWithinAlloc::kInBounds);
1599 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size, address, 0u),
1600 PtrPosWithinAlloc::kInBounds);
1601 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size, address - 1, 0u),
1602 PtrPosWithinAlloc::kFarOOB);
1603 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
1604 address - kSuperPageSize, 0u),
1605 PtrPosWithinAlloc::kFarOOB);
1606 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
1607 address - kFarFarAwayDelta, 0u),
1608 PtrPosWithinAlloc::kFarOOB);
1609 }
1610
1611 for (void* ptr : ptrs) {
1612 allocator.root()->Free(ptr);
1613 }
1614 }
1615 }
1616
TEST_P(PartitionAllocTest,GetSlotStartMultiplePages)1617 TEST_P(PartitionAllocTest, GetSlotStartMultiplePages) {
1618 if (!UseBRPPool()) {
1619 return;
1620 }
1621
1622 auto* root = allocator.root();
1623 // Find the smallest bucket with multiple PartitionPages. When searching for
1624 // a bucket here, we need to check two conditions:
1625 // (1) The bucket is used in our current bucket distribution.
1626 // (2) The bucket is large enough that our requested size (see below) will be
1627 // non-zero.
1628 size_t real_size = 0;
1629 for (const auto& bucket : root->buckets) {
1630 if ((root->buckets + SizeToIndex(bucket.slot_size))->slot_size !=
1631 bucket.slot_size) {
1632 continue;
1633 }
1634 if (bucket.slot_size <= ExtraAllocSize(allocator)) {
1635 continue;
1636 }
1637 if (bucket.num_system_pages_per_slot_span >
1638 NumSystemPagesPerPartitionPage()) {
1639 real_size = bucket.slot_size;
1640 break;
1641 }
1642 }
1643
1644 // Make sure that we've managed to find an appropriate bucket.
1645 ASSERT_GT(real_size, 0u);
1646
1647 const size_t requested_size = real_size - ExtraAllocSize(allocator);
1648 // Double check we don't end up with 0 or negative size.
1649 EXPECT_GT(requested_size, 0u);
1650 EXPECT_LE(requested_size, real_size);
1651 const auto* bucket = allocator.root()->buckets + SizeToIndex(real_size);
1652 EXPECT_EQ(bucket->slot_size, real_size);
1653 // Make sure the test is testing multiple partition pages case.
1654 EXPECT_GT(bucket->num_system_pages_per_slot_span,
1655 PartitionPageSize() / SystemPageSize());
1656 size_t num_slots =
1657 (bucket->num_system_pages_per_slot_span * SystemPageSize()) / real_size;
1658 std::vector<void*> ptrs;
1659 for (size_t i = 0; i < num_slots; ++i) {
1660 ptrs.push_back(allocator.root()->Alloc(requested_size, type_name));
1661 }
1662 for (void* ptr : ptrs) {
1663 uintptr_t address = UntagPtr(ptr);
1664 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
1665 EXPECT_EQ(allocator.root()->AllocationCapacityFromSlotStart(slot_start),
1666 requested_size);
1667 for (size_t offset = 0; offset < requested_size; offset += 13) {
1668 EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
1669 slot_start);
1670 }
1671 allocator.root()->Free(ptr);
1672 }
1673 }
1674 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1675
1676 // Test the realloc() contract.
TEST_P(PartitionAllocTest,Realloc)1677 TEST_P(PartitionAllocTest, Realloc) {
1678 // realloc(0, size) should be equivalent to malloc().
1679 void* ptr = allocator.root()->Realloc(nullptr, kTestAllocSize, type_name);
1680 memset(ptr, 'A', kTestAllocSize);
1681 auto* slot_span =
1682 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1683 // realloc(ptr, 0) should be equivalent to free().
1684 void* ptr2 = allocator.root()->Realloc(ptr, 0, type_name);
1685 EXPECT_EQ(nullptr, ptr2);
1686 EXPECT_EQ(allocator.root()->ObjectToSlotStart(ptr),
1687 UntagPtr(slot_span->get_freelist_head()));
1688
1689 // Test that growing an allocation with realloc() copies everything from the
1690 // old allocation.
1691 size_t size = SystemPageSize() - ExtraAllocSize(allocator);
1692 // Confirm size fills the entire slot.
1693 ASSERT_EQ(size, allocator.root()->AllocationCapacityFromRequestedSize(size));
1694 ptr = allocator.root()->Alloc(size, type_name);
1695 memset(ptr, 'A', size);
1696 ptr2 = allocator.root()->Realloc(ptr, size + 1, type_name);
1697 PA_EXPECT_PTR_NE(ptr, ptr2);
1698 char* char_ptr2 = static_cast<char*>(ptr2);
1699 EXPECT_EQ('A', char_ptr2[0]);
1700 EXPECT_EQ('A', char_ptr2[size - 1]);
1701 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
1702 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr2[size]));
1703 #endif
1704
1705 // Test that shrinking an allocation with realloc() also copies everything
1706 // from the old allocation. Use |size - 1| to test what happens to the extra
1707 // space before the cookie.
1708 ptr = allocator.root()->Realloc(ptr2, size - 1, type_name);
1709 PA_EXPECT_PTR_NE(ptr2, ptr);
1710 char* char_ptr = static_cast<char*>(ptr);
1711 EXPECT_EQ('A', char_ptr[0]);
1712 EXPECT_EQ('A', char_ptr[size - 2]);
1713 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
1714 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr[size - 1]));
1715 #endif
1716
1717 allocator.root()->Free(ptr);
1718
1719 // Single-slot slot spans...
1720 // Test that growing an allocation with realloc() copies everything from the
1721 // old allocation.
1722 size = MaxRegularSlotSpanSize() + 1;
1723 ASSERT_LE(2 * size, kMaxBucketed); // should be in single-slot span range
1724 // Confirm size doesn't fill the entire slot.
1725 ASSERT_LT(size, allocator.root()->AllocationCapacityFromRequestedSize(size));
1726 ptr = allocator.root()->Alloc(size, type_name);
1727 memset(ptr, 'A', size);
1728 ptr2 = allocator.root()->Realloc(ptr, size * 2, type_name);
1729 PA_EXPECT_PTR_NE(ptr, ptr2);
1730 char_ptr2 = static_cast<char*>(ptr2);
1731 EXPECT_EQ('A', char_ptr2[0]);
1732 EXPECT_EQ('A', char_ptr2[size - 1]);
1733 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
1734 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr2[size]));
1735 #endif
1736 allocator.root()->Free(ptr2);
1737
1738 // Test that shrinking an allocation with realloc() also copies everything
1739 // from the old allocation.
1740 size = 2 * (MaxRegularSlotSpanSize() + 1);
1741 ASSERT_GT(size / 2, MaxRegularSlotSpanSize()); // in single-slot span range
1742 ptr = allocator.root()->Alloc(size, type_name);
1743 memset(ptr, 'A', size);
1744 ptr2 = allocator.root()->Realloc(ptr2, size / 2, type_name);
1745 PA_EXPECT_PTR_NE(ptr, ptr2);
1746 char_ptr2 = static_cast<char*>(ptr2);
1747 EXPECT_EQ('A', char_ptr2[0]);
1748 EXPECT_EQ('A', char_ptr2[size / 2 - 1]);
1749 #if BUILDFLAG(PA_DCHECK_IS_ON)
1750 // For single-slot slot spans, the cookie is always placed immediately after
1751 // the allocation.
1752 EXPECT_EQ(kCookieValue[0], static_cast<unsigned char>(char_ptr2[size / 2]));
1753 #endif
1754 allocator.root()->Free(ptr2);
1755
1756 // Test that shrinking a direct mapped allocation happens in-place.
1757 // Pick a large size so that Realloc doesn't think it's worthwhile to
1758 // downsize even if one less super page is used (due to high granularity on
1759 // 64-bit systems).
1760 size = 10 * kSuperPageSize + SystemPageSize() - 42;
1761 ASSERT_GT(size - 32 * SystemPageSize(), kMaxBucketed);
1762 ptr = allocator.root()->Alloc(size, type_name);
1763 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
1764 size_t actual_capacity =
1765 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1766 ptr2 = allocator.root()->Realloc(ptr, size - SystemPageSize(), type_name);
1767 uintptr_t slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
1768 EXPECT_EQ(slot_start, slot_start2);
1769 EXPECT_EQ(actual_capacity - SystemPageSize(),
1770 allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
1771 void* ptr3 =
1772 allocator.root()->Realloc(ptr2, size - 32 * SystemPageSize(), type_name);
1773 uintptr_t slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
1774 EXPECT_EQ(slot_start2, slot_start3);
1775 EXPECT_EQ(actual_capacity - 32 * SystemPageSize(),
1776 allocator.root()->AllocationCapacityFromSlotStart(slot_start3));
1777
1778 // Test that a previously in-place shrunk direct mapped allocation can be
1779 // expanded up again up to its original size.
1780 ptr = allocator.root()->Realloc(ptr3, size, type_name);
1781 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1782 EXPECT_EQ(slot_start3, slot_start);
1783 EXPECT_EQ(actual_capacity,
1784 allocator.root()->AllocationCapacityFromSlotStart(slot_start));
1785
1786 // Test that the allocation can be expanded in place up to its capacity.
1787 ptr2 = allocator.root()->Realloc(ptr, actual_capacity, type_name);
1788 slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
1789 EXPECT_EQ(slot_start, slot_start2);
1790 EXPECT_EQ(actual_capacity,
1791 allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
1792
1793 // Test that a direct mapped allocation is performed not in-place when the
1794 // new size is small enough.
1795 ptr3 = allocator.root()->Realloc(ptr2, SystemPageSize(), type_name);
1796 slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
1797 EXPECT_NE(slot_start, slot_start3);
1798
1799 allocator.root()->Free(ptr3);
1800 }
1801
TEST_P(PartitionAllocTest,ReallocDirectMapAligned)1802 TEST_P(PartitionAllocTest, ReallocDirectMapAligned) {
1803 size_t alignments[] = {
1804 PartitionPageSize(),
1805 2 * PartitionPageSize(),
1806 kMaxSupportedAlignment / 2,
1807 kMaxSupportedAlignment,
1808 };
1809
1810 for (size_t alignment : alignments) {
1811 // Test that shrinking a direct mapped allocation happens in-place.
1812 // Pick a large size so that Realloc doesn't think it's worthwhile to
1813 // downsize even if one less super page is used (due to high granularity on
1814 // 64-bit systems), even if the alignment padding is taken out.
1815 size_t size = 10 * kSuperPageSize + SystemPageSize() - 42;
1816 ASSERT_GT(size, kMaxBucketed);
1817 void* ptr =
1818 allocator.root()->AllocInternalForTesting(size, alignment, type_name);
1819 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
1820 size_t actual_capacity =
1821 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1822 void* ptr2 =
1823 allocator.root()->Realloc(ptr, size - SystemPageSize(), type_name);
1824 uintptr_t slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
1825 EXPECT_EQ(slot_start, slot_start2);
1826 EXPECT_EQ(actual_capacity - SystemPageSize(),
1827 allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
1828 void* ptr3 = allocator.root()->Realloc(ptr2, size - 32 * SystemPageSize(),
1829 type_name);
1830 uintptr_t slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
1831 EXPECT_EQ(slot_start2, slot_start3);
1832 EXPECT_EQ(actual_capacity - 32 * SystemPageSize(),
1833 allocator.root()->AllocationCapacityFromSlotStart(slot_start3));
1834
1835 // Test that a previously in-place shrunk direct mapped allocation can be
1836 // expanded up again up to its original size.
1837 ptr = allocator.root()->Realloc(ptr3, size, type_name);
1838 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1839 EXPECT_EQ(slot_start3, slot_start);
1840 EXPECT_EQ(actual_capacity,
1841 allocator.root()->AllocationCapacityFromSlotStart(slot_start));
1842
1843 // Test that the allocation can be expanded in place up to its capacity.
1844 ptr2 = allocator.root()->Realloc(ptr, actual_capacity, type_name);
1845 slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
1846 EXPECT_EQ(slot_start, slot_start2);
1847 EXPECT_EQ(actual_capacity,
1848 allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
1849
1850 // Test that a direct mapped allocation is performed not in-place when the
1851 // new size is small enough.
1852 ptr3 = allocator.root()->Realloc(ptr2, SystemPageSize(), type_name);
1853 slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
1854 EXPECT_NE(slot_start2, slot_start3);
1855
1856 allocator.root()->Free(ptr3);
1857 }
1858 }
1859
TEST_P(PartitionAllocTest,ReallocDirectMapAlignedRelocate)1860 TEST_P(PartitionAllocTest, ReallocDirectMapAlignedRelocate) {
1861 // Pick size such that the alignment will put it cross the super page
1862 // boundary.
1863 size_t size = 2 * kSuperPageSize - kMaxSupportedAlignment + SystemPageSize();
1864 ASSERT_GT(size, kMaxBucketed);
1865 void* ptr = allocator.root()->AllocInternalForTesting(
1866 size, kMaxSupportedAlignment, type_name);
1867 // Reallocating with the same size will actually relocate, because without a
1868 // need for alignment we can downsize the reservation significantly.
1869 void* ptr2 = allocator.root()->Realloc(ptr, size, type_name);
1870 PA_EXPECT_PTR_NE(ptr, ptr2);
1871 allocator.root()->Free(ptr2);
1872
1873 // Again pick size such that the alignment will put it cross the super page
1874 // boundary, but this time make it so large that Realloc doesn't fing it worth
1875 // shrinking.
1876 size = 10 * kSuperPageSize - kMaxSupportedAlignment + SystemPageSize();
1877 ASSERT_GT(size, kMaxBucketed);
1878 ptr = allocator.root()->AllocInternalForTesting(size, kMaxSupportedAlignment,
1879 type_name);
1880 ptr2 = allocator.root()->Realloc(ptr, size, type_name);
1881 EXPECT_EQ(ptr, ptr2);
1882 allocator.root()->Free(ptr2);
1883 }
1884
1885 // Tests the handing out of freelists for partial slot spans.
TEST_P(PartitionAllocTest,PartialPageFreelists)1886 TEST_P(PartitionAllocTest, PartialPageFreelists) {
1887 size_t big_size = SystemPageSize() - ExtraAllocSize(allocator);
1888 size_t bucket_index = SizeToIndex(big_size + ExtraAllocSize(allocator));
1889 PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
1890 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1891
1892 void* ptr = allocator.root()->Alloc(big_size, type_name);
1893 EXPECT_TRUE(ptr);
1894
1895 auto* slot_span =
1896 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1897 size_t total_slots =
1898 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1899 (big_size + ExtraAllocSize(allocator));
1900 EXPECT_EQ(4u, total_slots);
1901 // The freelist should have one entry, because we were able to exactly fit
1902 // one object slot and one freelist pointer (the null that the head points
1903 // to) into a system page.
1904 EXPECT_FALSE(slot_span->get_freelist_head());
1905 EXPECT_EQ(1u, slot_span->num_allocated_slots);
1906 EXPECT_EQ(3u, slot_span->num_unprovisioned_slots);
1907
1908 void* ptr2 = allocator.root()->Alloc(big_size, type_name);
1909 EXPECT_TRUE(ptr2);
1910 EXPECT_FALSE(slot_span->get_freelist_head());
1911 EXPECT_EQ(2u, slot_span->num_allocated_slots);
1912 EXPECT_EQ(2u, slot_span->num_unprovisioned_slots);
1913
1914 void* ptr3 = allocator.root()->Alloc(big_size, type_name);
1915 EXPECT_TRUE(ptr3);
1916 EXPECT_FALSE(slot_span->get_freelist_head());
1917 EXPECT_EQ(3u, slot_span->num_allocated_slots);
1918 EXPECT_EQ(1u, slot_span->num_unprovisioned_slots);
1919
1920 void* ptr4 = allocator.root()->Alloc(big_size, type_name);
1921 EXPECT_TRUE(ptr4);
1922 EXPECT_FALSE(slot_span->get_freelist_head());
1923 EXPECT_EQ(4u, slot_span->num_allocated_slots);
1924 EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
1925
1926 void* ptr5 = allocator.root()->Alloc(big_size, type_name);
1927 EXPECT_TRUE(ptr5);
1928
1929 auto* slot_span2 =
1930 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr5));
1931 EXPECT_EQ(1u, slot_span2->num_allocated_slots);
1932
1933 // Churn things a little whilst there's a partial slot span freelist.
1934 allocator.root()->Free(ptr);
1935 ptr = allocator.root()->Alloc(big_size, type_name);
1936 void* ptr6 = allocator.root()->Alloc(big_size, type_name);
1937
1938 allocator.root()->Free(ptr);
1939 allocator.root()->Free(ptr2);
1940 allocator.root()->Free(ptr3);
1941 allocator.root()->Free(ptr4);
1942 allocator.root()->Free(ptr5);
1943 allocator.root()->Free(ptr6);
1944 EXPECT_TRUE(slot_span->in_empty_cache());
1945 EXPECT_TRUE(slot_span2->in_empty_cache());
1946 EXPECT_TRUE(slot_span2->get_freelist_head());
1947 EXPECT_EQ(0u, slot_span2->num_allocated_slots);
1948
1949 // Size that's just above half a page.
1950 size_t non_dividing_size =
1951 SystemPageSize() / 2 + 1 - ExtraAllocSize(allocator);
1952 bucket_index = SizeToIndex(non_dividing_size + ExtraAllocSize(allocator));
1953 bucket = &allocator.root()->buckets[bucket_index];
1954 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1955
1956 ptr = allocator.root()->Alloc(non_dividing_size, type_name);
1957 EXPECT_TRUE(ptr);
1958
1959 slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1960 total_slots =
1961 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1962 bucket->slot_size;
1963
1964 EXPECT_FALSE(slot_span->get_freelist_head());
1965 EXPECT_EQ(1u, slot_span->num_allocated_slots);
1966 EXPECT_EQ(total_slots - 1, slot_span->num_unprovisioned_slots);
1967
1968 ptr2 = allocator.root()->Alloc(non_dividing_size, type_name);
1969 EXPECT_TRUE(ptr2);
1970 EXPECT_TRUE(slot_span->get_freelist_head());
1971 EXPECT_EQ(2u, slot_span->num_allocated_slots);
1972 // 2 slots got provisioned: the first one fills the rest of the first (already
1973 // provision page) and exceeds it by just a tad, thus leading to provisioning
1974 // a new page, and the second one fully fits within that new page.
1975 EXPECT_EQ(total_slots - 3, slot_span->num_unprovisioned_slots);
1976
1977 ptr3 = allocator.root()->Alloc(non_dividing_size, type_name);
1978 EXPECT_TRUE(ptr3);
1979 EXPECT_FALSE(slot_span->get_freelist_head());
1980 EXPECT_EQ(3u, slot_span->num_allocated_slots);
1981 EXPECT_EQ(total_slots - 3, slot_span->num_unprovisioned_slots);
1982
1983 allocator.root()->Free(ptr);
1984 allocator.root()->Free(ptr2);
1985 allocator.root()->Free(ptr3);
1986 EXPECT_TRUE(slot_span->in_empty_cache());
1987 EXPECT_TRUE(slot_span2->get_freelist_head());
1988 EXPECT_EQ(0u, slot_span2->num_allocated_slots);
1989
1990 // And test a couple of sizes that do not cross SystemPageSize() with a
1991 // single allocation.
1992 size_t medium_size = (SystemPageSize() / 2) - ExtraAllocSize(allocator);
1993 bucket_index = SizeToIndex(medium_size + ExtraAllocSize(allocator));
1994 bucket = &allocator.root()->buckets[bucket_index];
1995 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1996
1997 ptr = allocator.root()->Alloc(medium_size, type_name);
1998 EXPECT_TRUE(ptr);
1999 slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2000 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2001 total_slots =
2002 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
2003 (medium_size + ExtraAllocSize(allocator));
2004 size_t first_slot_span_slots =
2005 SystemPageSize() / (medium_size + ExtraAllocSize(allocator));
2006 EXPECT_EQ(2u, first_slot_span_slots);
2007 EXPECT_EQ(total_slots - first_slot_span_slots,
2008 slot_span->num_unprovisioned_slots);
2009
2010 allocator.root()->Free(ptr);
2011
2012 size_t small_size = (SystemPageSize() / 4) - ExtraAllocSize(allocator);
2013 bucket_index = SizeToIndex(small_size + ExtraAllocSize(allocator));
2014 bucket = &allocator.root()->buckets[bucket_index];
2015 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
2016
2017 ptr = allocator.root()->Alloc(small_size, type_name);
2018 EXPECT_TRUE(ptr);
2019 slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2020 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2021 total_slots =
2022 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
2023 (small_size + ExtraAllocSize(allocator));
2024 first_slot_span_slots =
2025 SystemPageSize() / (small_size + ExtraAllocSize(allocator));
2026 EXPECT_EQ(total_slots - first_slot_span_slots,
2027 slot_span->num_unprovisioned_slots);
2028
2029 allocator.root()->Free(ptr);
2030 EXPECT_TRUE(slot_span->get_freelist_head());
2031 EXPECT_EQ(0u, slot_span->num_allocated_slots);
2032
2033 ASSERT_LT(ExtraAllocSize(allocator), 64u);
2034 size_t very_small_size = (ExtraAllocSize(allocator) <= 32)
2035 ? (32 - ExtraAllocSize(allocator))
2036 : (64 - ExtraAllocSize(allocator));
2037 size_t very_small_adjusted_size =
2038 allocator.root()->AdjustSize0IfNeeded(very_small_size);
2039 bucket_index =
2040 SizeToIndex(very_small_adjusted_size + ExtraAllocSize(allocator));
2041 bucket = &allocator.root()->buckets[bucket_index];
2042 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
2043
2044 ptr = allocator.root()->Alloc(very_small_size, type_name);
2045 EXPECT_TRUE(ptr);
2046 slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2047 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2048 size_t very_small_actual_size = allocator.root()->GetUsableSize(ptr);
2049 total_slots =
2050 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
2051 (very_small_actual_size + ExtraAllocSize(allocator));
2052 first_slot_span_slots =
2053 SystemPageSize() / (very_small_actual_size + ExtraAllocSize(allocator));
2054 EXPECT_EQ(total_slots - first_slot_span_slots,
2055 slot_span->num_unprovisioned_slots);
2056
2057 allocator.root()->Free(ptr);
2058 EXPECT_TRUE(slot_span->get_freelist_head());
2059 EXPECT_EQ(0u, slot_span->num_allocated_slots);
2060
2061 // And try an allocation size (against the generic allocator) that is
2062 // larger than a system page.
2063 size_t page_and_a_half_size =
2064 (SystemPageSize() + (SystemPageSize() / 2)) - ExtraAllocSize(allocator);
2065 ptr = allocator.root()->Alloc(page_and_a_half_size, type_name);
2066 EXPECT_TRUE(ptr);
2067 slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2068 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2069 // Only the first slot was provisioned, and that's the one that was just
2070 // allocated so the free list is empty.
2071 EXPECT_TRUE(!slot_span->get_freelist_head());
2072 total_slots =
2073 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
2074 (page_and_a_half_size + ExtraAllocSize(allocator));
2075 EXPECT_EQ(total_slots - 1, slot_span->num_unprovisioned_slots);
2076 ptr2 = allocator.root()->Alloc(page_and_a_half_size, type_name);
2077 EXPECT_TRUE(ptr);
2078 slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2079 EXPECT_EQ(2u, slot_span->num_allocated_slots);
2080 // As above, only one slot was provisioned.
2081 EXPECT_TRUE(!slot_span->get_freelist_head());
2082 EXPECT_EQ(total_slots - 2, slot_span->num_unprovisioned_slots);
2083 allocator.root()->Free(ptr);
2084 allocator.root()->Free(ptr2);
2085
2086 // And then make sure than exactly the page size only faults one page.
2087 size_t page_size = SystemPageSize() - ExtraAllocSize(allocator);
2088 ptr = allocator.root()->Alloc(page_size, type_name);
2089 EXPECT_TRUE(ptr);
2090 slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2091 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2092 EXPECT_TRUE(slot_span->get_freelist_head());
2093 total_slots =
2094 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
2095 (page_size + ExtraAllocSize(allocator));
2096 EXPECT_EQ(total_slots - 2, slot_span->num_unprovisioned_slots);
2097 allocator.root()->Free(ptr);
2098 }
2099
2100 // Test some of the fragmentation-resistant properties of the allocator.
TEST_P(PartitionAllocTest,SlotSpanRefilling)2101 TEST_P(PartitionAllocTest, SlotSpanRefilling) {
2102 PartitionRoot::Bucket* bucket =
2103 &allocator.root()->buckets[test_bucket_index_];
2104
2105 // Grab two full slot spans and a non-full slot span.
2106 auto* slot_span1 = GetFullSlotSpan(kTestAllocSize);
2107 auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
2108 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
2109 EXPECT_TRUE(ptr);
2110 EXPECT_NE(slot_span1, bucket->active_slot_spans_head);
2111 EXPECT_NE(slot_span2, bucket->active_slot_spans_head);
2112 auto* slot_span =
2113 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2114 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2115
2116 // Work out a pointer into slot_span2 and free it; and then slot_span1 and
2117 // free it.
2118 void* ptr2 = allocator.root()->SlotStartToObject(
2119 SlotSpan::ToSlotSpanStart(slot_span1));
2120 allocator.root()->Free(ptr2);
2121 ptr2 = allocator.root()->SlotStartToObject(
2122 SlotSpan::ToSlotSpanStart(slot_span2));
2123 allocator.root()->Free(ptr2);
2124
2125 // If we perform two allocations from the same bucket now, we expect to
2126 // refill both the nearly full slot spans.
2127 std::ignore = allocator.root()->Alloc(kTestAllocSize, type_name);
2128 std::ignore = allocator.root()->Alloc(kTestAllocSize, type_name);
2129 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2130
2131 FreeFullSlotSpan(allocator.root(), slot_span2);
2132 FreeFullSlotSpan(allocator.root(), slot_span1);
2133 allocator.root()->Free(ptr);
2134 }
2135
2136 // Basic tests to ensure that allocations work for partial page buckets.
TEST_P(PartitionAllocTest,PartialPages)2137 TEST_P(PartitionAllocTest, PartialPages) {
2138 // Find a size that is backed by a partial partition page.
2139 size_t size = sizeof(void*);
2140 size_t bucket_index;
2141
2142 PartitionRoot::Bucket* bucket = nullptr;
2143 constexpr size_t kMaxSize = 4000u;
2144 while (size < kMaxSize) {
2145 bucket_index = SizeToIndex(size + ExtraAllocSize(allocator));
2146 bucket = &allocator.root()->buckets[bucket_index];
2147 if (bucket->num_system_pages_per_slot_span %
2148 NumSystemPagesPerPartitionPage()) {
2149 break;
2150 }
2151 size += sizeof(void*);
2152 }
2153 EXPECT_LT(size, kMaxSize);
2154
2155 auto* slot_span1 = GetFullSlotSpan(size);
2156 auto* slot_span2 = GetFullSlotSpan(size);
2157 FreeFullSlotSpan(allocator.root(), slot_span2);
2158 FreeFullSlotSpan(allocator.root(), slot_span1);
2159 }
2160
2161 // Test correct handling if our mapping collides with another.
TEST_P(PartitionAllocTest,MappingCollision)2162 TEST_P(PartitionAllocTest, MappingCollision) {
2163 size_t num_pages_per_slot_span = GetNumPagesPerSlotSpan(kTestAllocSize);
2164 // The -2 is because the first and last partition pages in a super page are
2165 // guard pages. We also discount the partition pages used for the tag bitmap.
2166 size_t num_slot_span_needed =
2167 (NumPartitionPagesPerSuperPage() - 2 -
2168 partition_alloc::internal::NumPartitionPagesPerFreeSlotBitmap()) /
2169 num_pages_per_slot_span;
2170 size_t num_partition_pages_needed =
2171 num_slot_span_needed * num_pages_per_slot_span;
2172
2173 auto first_super_page_pages =
2174 std::make_unique<SlotSpan*[]>(num_partition_pages_needed);
2175 auto second_super_page_pages =
2176 std::make_unique<SlotSpan*[]>(num_partition_pages_needed);
2177
2178 size_t i;
2179 for (i = 0; i < num_partition_pages_needed; ++i) {
2180 first_super_page_pages[i] = GetFullSlotSpan(kTestAllocSize);
2181 }
2182
2183 uintptr_t slot_span_start =
2184 SlotSpan::ToSlotSpanStart(first_super_page_pages[0]);
2185 EXPECT_EQ(PartitionPageSize() +
2186 partition_alloc::internal::ReservedFreeSlotBitmapSize(),
2187 slot_span_start & kSuperPageOffsetMask);
2188 uintptr_t super_page =
2189 slot_span_start - PartitionPageSize() -
2190 partition_alloc::internal::ReservedFreeSlotBitmapSize();
2191 // Map a single system page either side of the mapping for our allocations,
2192 // with the goal of tripping up alignment of the next mapping.
2193 uintptr_t map1 =
2194 AllocPages(super_page - PageAllocationGranularity(),
2195 PageAllocationGranularity(), PageAllocationGranularity(),
2196 PageAccessibilityConfiguration(
2197 PageAccessibilityConfiguration::kInaccessible),
2198 PageTag::kPartitionAlloc);
2199 EXPECT_TRUE(map1);
2200 uintptr_t map2 =
2201 AllocPages(super_page + kSuperPageSize, PageAllocationGranularity(),
2202 PageAllocationGranularity(),
2203 PageAccessibilityConfiguration(
2204 PageAccessibilityConfiguration::kInaccessible),
2205 PageTag::kPartitionAlloc);
2206 EXPECT_TRUE(map2);
2207
2208 for (i = 0; i < num_partition_pages_needed; ++i) {
2209 second_super_page_pages[i] = GetFullSlotSpan(kTestAllocSize);
2210 }
2211
2212 FreePages(map1, PageAllocationGranularity());
2213 FreePages(map2, PageAllocationGranularity());
2214
2215 super_page = SlotSpan::ToSlotSpanStart(second_super_page_pages[0]);
2216 EXPECT_EQ(PartitionPageSize() +
2217 partition_alloc::internal::ReservedFreeSlotBitmapSize(),
2218 super_page & kSuperPageOffsetMask);
2219 super_page -= PartitionPageSize() +
2220 partition_alloc::internal::ReservedFreeSlotBitmapSize();
2221 // Map a single system page either side of the mapping for our allocations,
2222 // with the goal of tripping up alignment of the next mapping.
2223 map1 = AllocPages(super_page - PageAllocationGranularity(),
2224 PageAllocationGranularity(), PageAllocationGranularity(),
2225 PageAccessibilityConfiguration(
2226 PageAccessibilityConfiguration::kReadWriteTagged),
2227 PageTag::kPartitionAlloc);
2228 EXPECT_TRUE(map1);
2229 map2 = AllocPages(super_page + kSuperPageSize, PageAllocationGranularity(),
2230 PageAllocationGranularity(),
2231 PageAccessibilityConfiguration(
2232 PageAccessibilityConfiguration::kReadWriteTagged),
2233 PageTag::kPartitionAlloc);
2234 EXPECT_TRUE(map2);
2235 EXPECT_TRUE(TrySetSystemPagesAccess(
2236 map1, PageAllocationGranularity(),
2237 PageAccessibilityConfiguration(
2238 PageAccessibilityConfiguration::kInaccessible)));
2239 EXPECT_TRUE(TrySetSystemPagesAccess(
2240 map2, PageAllocationGranularity(),
2241 PageAccessibilityConfiguration(
2242 PageAccessibilityConfiguration::kInaccessible)));
2243
2244 auto* slot_span_in_third_super_page = GetFullSlotSpan(kTestAllocSize);
2245 FreePages(map1, PageAllocationGranularity());
2246 FreePages(map2, PageAllocationGranularity());
2247
2248 EXPECT_EQ(0u, SlotSpan::ToSlotSpanStart(slot_span_in_third_super_page) &
2249 PartitionPageOffsetMask());
2250
2251 // And make sure we really did get a page in a new superpage.
2252 EXPECT_NE(
2253 SlotSpan::ToSlotSpanStart(first_super_page_pages[0]) & kSuperPageBaseMask,
2254 SlotSpan::ToSlotSpanStart(slot_span_in_third_super_page) &
2255 kSuperPageBaseMask);
2256 EXPECT_NE(SlotSpan::ToSlotSpanStart(second_super_page_pages[0]) &
2257 kSuperPageBaseMask,
2258 SlotSpan::ToSlotSpanStart(slot_span_in_third_super_page) &
2259 kSuperPageBaseMask);
2260
2261 FreeFullSlotSpan(allocator.root(), slot_span_in_third_super_page);
2262 for (i = 0; i < num_partition_pages_needed; ++i) {
2263 FreeFullSlotSpan(allocator.root(), first_super_page_pages[i]);
2264 FreeFullSlotSpan(allocator.root(), second_super_page_pages[i]);
2265 }
2266 }
2267
2268 // Tests that slot spans in the free slot span cache do get freed as
2269 // appropriate.
TEST_P(PartitionAllocTest,FreeCache)2270 TEST_P(PartitionAllocTest, FreeCache) {
2271 EXPECT_EQ(0U, allocator.root()->get_total_size_of_committed_pages());
2272
2273 size_t big_size = 1000 - ExtraAllocSize(allocator);
2274 size_t bucket_index = SizeToIndex(big_size + ExtraAllocSize(allocator));
2275 PartitionBucket* bucket = &allocator.root()->buckets[bucket_index];
2276
2277 void* ptr = allocator.root()->Alloc(big_size, type_name);
2278 EXPECT_TRUE(ptr);
2279 auto* slot_span =
2280 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2281 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
2282 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2283 // Lazy commit commits only needed pages.
2284 size_t expected_committed_size =
2285 kUseLazyCommit ? SystemPageSize() : PartitionPageSize();
2286 EXPECT_EQ(expected_committed_size,
2287 allocator.root()->get_total_size_of_committed_pages());
2288 allocator.root()->Free(ptr);
2289 EXPECT_EQ(0u, slot_span->num_allocated_slots);
2290 EXPECT_TRUE(slot_span->in_empty_cache());
2291 EXPECT_TRUE(slot_span->get_freelist_head());
2292
2293 CycleFreeCache(kTestAllocSize);
2294
2295 // Flushing the cache should have really freed the unused slot spans.
2296 EXPECT_FALSE(slot_span->get_freelist_head());
2297 EXPECT_FALSE(slot_span->in_empty_cache());
2298 EXPECT_EQ(0u, slot_span->num_allocated_slots);
2299 size_t num_system_pages_per_slot_span = allocator.root()
2300 ->buckets[test_bucket_index_]
2301 .num_system_pages_per_slot_span;
2302 size_t expected_size =
2303 kUseLazyCommit ? SystemPageSize()
2304 : num_system_pages_per_slot_span * SystemPageSize();
2305 EXPECT_EQ(expected_size,
2306 allocator.root()->get_total_size_of_committed_pages());
2307
2308 // Check that an allocation works ok whilst in this state (a free'd slot span
2309 // as the active slot spans head).
2310 ptr = allocator.root()->Alloc(big_size, type_name);
2311 EXPECT_FALSE(bucket->empty_slot_spans_head);
2312 allocator.root()->Free(ptr);
2313
2314 // Also check that a slot span that is bouncing immediately between empty and
2315 // used does not get freed.
2316 for (size_t i = 0; i < kMaxFreeableSpans * 2; ++i) {
2317 ptr = allocator.root()->Alloc(big_size, type_name);
2318 EXPECT_TRUE(slot_span->get_freelist_head());
2319 allocator.root()->Free(ptr);
2320 EXPECT_TRUE(slot_span->get_freelist_head());
2321 }
2322 EXPECT_EQ(expected_committed_size,
2323 allocator.root()->get_total_size_of_committed_pages());
2324 }
2325
2326 // Tests for a bug we had with losing references to free slot spans.
TEST_P(PartitionAllocTest,LostFreeSlotSpansBug)2327 TEST_P(PartitionAllocTest, LostFreeSlotSpansBug) {
2328 size_t size = PartitionPageSize() - ExtraAllocSize(allocator);
2329
2330 void* ptr = allocator.root()->Alloc(size, type_name);
2331 EXPECT_TRUE(ptr);
2332 void* ptr2 = allocator.root()->Alloc(size, type_name);
2333 EXPECT_TRUE(ptr2);
2334
2335 SlotSpanMetadata* slot_span =
2336 SlotSpanMetadata::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2337 SlotSpanMetadata* slot_span2 = SlotSpanMetadata::FromSlotStart(
2338 allocator.root()->ObjectToSlotStart(ptr2));
2339 PartitionBucket* bucket = slot_span->bucket;
2340
2341 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
2342 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2343 EXPECT_EQ(1u, slot_span2->num_allocated_slots);
2344 EXPECT_TRUE(slot_span->is_full());
2345 EXPECT_TRUE(slot_span2->is_full());
2346 // The first span was kicked out from the active list, but the second one
2347 // wasn't.
2348 EXPECT_TRUE(slot_span->marked_full);
2349 EXPECT_FALSE(slot_span2->marked_full);
2350
2351 allocator.root()->Free(ptr);
2352 allocator.root()->Free(ptr2);
2353
2354 EXPECT_TRUE(bucket->empty_slot_spans_head);
2355 EXPECT_TRUE(bucket->empty_slot_spans_head->next_slot_span);
2356 EXPECT_EQ(0u, slot_span->num_allocated_slots);
2357 EXPECT_EQ(0u, slot_span2->num_allocated_slots);
2358 EXPECT_FALSE(slot_span->is_full());
2359 EXPECT_FALSE(slot_span->is_full());
2360 EXPECT_FALSE(slot_span->marked_full);
2361 EXPECT_FALSE(slot_span2->marked_full);
2362 EXPECT_TRUE(slot_span->get_freelist_head());
2363 EXPECT_TRUE(slot_span2->get_freelist_head());
2364
2365 CycleFreeCache(kTestAllocSize);
2366
2367 EXPECT_FALSE(slot_span->get_freelist_head());
2368 EXPECT_FALSE(slot_span2->get_freelist_head());
2369
2370 EXPECT_TRUE(bucket->empty_slot_spans_head);
2371 EXPECT_TRUE(bucket->empty_slot_spans_head->next_slot_span);
2372 EXPECT_EQ(SlotSpanMetadata::get_sentinel_slot_span(),
2373 bucket->active_slot_spans_head);
2374
2375 // At this moment, we have two decommitted slot spans, on the empty list.
2376 ptr = allocator.root()->Alloc(size, type_name);
2377 EXPECT_TRUE(ptr);
2378 allocator.root()->Free(ptr);
2379
2380 EXPECT_EQ(SlotSpanMetadata::get_sentinel_slot_span(),
2381 bucket->active_slot_spans_head);
2382 EXPECT_TRUE(bucket->empty_slot_spans_head);
2383 EXPECT_TRUE(bucket->decommitted_slot_spans_head);
2384
2385 CycleFreeCache(kTestAllocSize);
2386
2387 // We're now set up to trigger a historical bug by scanning over the active
2388 // slot spans list. The current code gets into a different state, but we'll
2389 // keep the test as being an interesting corner case.
2390 ptr = allocator.root()->Alloc(size, type_name);
2391 EXPECT_TRUE(ptr);
2392 allocator.root()->Free(ptr);
2393
2394 EXPECT_TRUE(bucket->is_valid());
2395 EXPECT_TRUE(bucket->empty_slot_spans_head);
2396 EXPECT_TRUE(bucket->decommitted_slot_spans_head);
2397 }
2398
2399 #if defined(PA_HAS_DEATH_TESTS)
2400
2401 // Unit tests that check if an allocation fails in "return null" mode,
2402 // repeating it doesn't crash, and still returns null. The tests need to
2403 // stress memory subsystem limits to do so, hence they try to allocate
2404 // 6 GB of memory, each with a different per-allocation block sizes.
2405 //
2406 // On 64-bit systems we need to restrict the address space to force allocation
2407 // failure, so these tests run only on POSIX systems that provide setrlimit(),
2408 // and use it to limit address space to 6GB.
2409 //
2410 // Disable these tests on Android because, due to the allocation-heavy behavior,
2411 // they tend to get OOM-killed rather than pass.
2412 //
2413 // Disable these test on Windows, since they run slower, so tend to timout and
2414 // cause flake.
2415 #if !BUILDFLAG(IS_WIN) && \
2416 (!defined(ARCH_CPU_64_BITS) || \
2417 (BUILDFLAG(IS_POSIX) && \
2418 !(BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)))) || \
2419 BUILDFLAG(IS_FUCHSIA)
2420 #define MAYBE_RepeatedAllocReturnNullDirect RepeatedAllocReturnNullDirect
2421 #define MAYBE_RepeatedReallocReturnNullDirect RepeatedReallocReturnNullDirect
2422 #else
2423 #define MAYBE_RepeatedAllocReturnNullDirect \
2424 DISABLED_RepeatedAllocReturnNullDirect
2425 #define MAYBE_RepeatedReallocReturnNullDirect \
2426 DISABLED_RepeatedReallocReturnNullDirect
2427 #endif
2428
2429 // The following four tests wrap a called function in an expect death statement
2430 // to perform their test, because they are non-hermetic. Specifically they are
2431 // going to attempt to exhaust the allocatable memory, which leaves the
2432 // allocator in a bad global state.
2433 // Performing them as death tests causes them to be forked into their own
2434 // process, so they won't pollute other tests.
2435 //
2436 // These tests are *very* slow when BUILDFLAG(PA_DCHECK_IS_ON), because they
2437 // memset() many GiB of data (see crbug.com/1168168).
2438 // TODO(lizeb): make these tests faster.
TEST_P(PartitionAllocDeathTest,MAYBE_RepeatedAllocReturnNullDirect)2439 TEST_P(PartitionAllocDeathTest, MAYBE_RepeatedAllocReturnNullDirect) {
2440 // A direct-mapped allocation size.
2441 size_t direct_map_size = 32 * 1024 * 1024;
2442 ASSERT_GT(direct_map_size, kMaxBucketed);
2443 EXPECT_DEATH(DoReturnNullTest(direct_map_size, kPartitionAlloc),
2444 "Passed DoReturnNullTest");
2445 }
2446
2447 // Repeating above test with Realloc
TEST_P(PartitionAllocDeathTest,MAYBE_RepeatedReallocReturnNullDirect)2448 TEST_P(PartitionAllocDeathTest, MAYBE_RepeatedReallocReturnNullDirect) {
2449 size_t direct_map_size = 32 * 1024 * 1024;
2450 ASSERT_GT(direct_map_size, kMaxBucketed);
2451 EXPECT_DEATH(DoReturnNullTest(direct_map_size, kPartitionRealloc),
2452 "Passed DoReturnNullTest");
2453 }
2454
2455 // TODO(crbug.com/1348221) re-enable the tests below, once the allocator
2456 // actually returns nullptr for non direct-mapped allocations.
2457 // When doing so, they will need to be made MAYBE_ like those above.
2458 //
2459 // Tests "return null" with a 512 kB block size.
TEST_P(PartitionAllocDeathTest,DISABLED_RepeatedAllocReturnNull)2460 TEST_P(PartitionAllocDeathTest, DISABLED_RepeatedAllocReturnNull) {
2461 // A single-slot but non-direct-mapped allocation size.
2462 size_t single_slot_size = 512 * 1024;
2463 ASSERT_GT(single_slot_size, MaxRegularSlotSpanSize());
2464 ASSERT_LE(single_slot_size, kMaxBucketed);
2465 EXPECT_DEATH(DoReturnNullTest(single_slot_size, kPartitionAlloc),
2466 "Passed DoReturnNullTest");
2467 }
2468
2469 // Repeating above test with Realloc.
TEST_P(PartitionAllocDeathTest,DISABLED_RepeatedReallocReturnNull)2470 TEST_P(PartitionAllocDeathTest, DISABLED_RepeatedReallocReturnNull) {
2471 size_t single_slot_size = 512 * 1024;
2472 ASSERT_GT(single_slot_size, MaxRegularSlotSpanSize());
2473 ASSERT_LE(single_slot_size, kMaxBucketed);
2474 EXPECT_DEATH(DoReturnNullTest(single_slot_size, kPartitionRealloc),
2475 "Passed DoReturnNullTest");
2476 }
2477
2478 #if PA_CONFIG(HAS_MEMORY_TAGGING)
2479 // Check that Arm's memory tagging extension (MTE) is correctly protecting
2480 // freed pointers. Writes to a free pointer should result in a crash.
TEST_P(PartitionAllocDeathTest,MTEProtectsFreedPtr)2481 TEST_P(PartitionAllocDeathTest, MTEProtectsFreedPtr) {
2482 base::CPU cpu;
2483 if (!cpu.has_mte()) {
2484 // This test won't pass on systems without MTE.
2485 GTEST_SKIP();
2486 }
2487
2488 constexpr uint64_t kCookie = 0x1234567890ABCDEF;
2489 constexpr uint64_t kQuarantined = 0xEFEFEFEFEFEFEFEF;
2490
2491 // Make an arbitrary-sized small allocation.
2492 size_t alloc_size = 64 - ExtraAllocSize(allocator);
2493 uint64_t* ptr =
2494 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
2495 EXPECT_TRUE(ptr);
2496
2497 // Check that the allocation's writable.
2498 *ptr = kCookie;
2499
2500 // Invalidate ptr by freeing it.
2501 allocator.root()->Free(ptr);
2502
2503 // Writing to ptr after free() should crash
2504 EXPECT_EXIT(
2505 {
2506 // Should be in synchronous MTE mode for running this test.
2507 *ptr = kQuarantined;
2508 },
2509 testing::KilledBySignal(SIGSEGV), "");
2510 }
2511 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
2512
2513 // Make sure that malloc(-1) dies.
2514 // In the past, we had an integer overflow that would alias malloc(-1) to
2515 // malloc(0), which is not good.
TEST_P(PartitionAllocDeathTest,LargeAllocs)2516 TEST_P(PartitionAllocDeathTest, LargeAllocs) {
2517 // Largest alloc.
2518 EXPECT_DEATH(allocator.root()->Alloc(static_cast<size_t>(-1), type_name), "");
2519 // And the smallest allocation we expect to die.
2520 // TODO(bartekn): Separate into its own test, as it wouldn't run (same below).
2521 EXPECT_DEATH(allocator.root()->Alloc(MaxDirectMapped() + 1, type_name), "");
2522 }
2523
2524 // These tests don't work deterministically when BRP is enabled on certain
2525 // architectures. On Free(), BRP's ref-count gets overwritten by an encoded
2526 // freelist pointer. On little-endian 64-bit architectures, this happens to be
2527 // always an even number, which will triggers BRP's own CHECK (sic!). On other
2528 // architectures, it's likely to be an odd number >1, which will fool BRP into
2529 // thinking the memory isn't freed and still referenced, thus making it
2530 // quarantine it and return early, before PA_CHECK(slot_start != freelist_head)
2531 // is reached.
2532 // TODO(bartekn): Enable in the BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) case.
2533 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
2534 (BUILDFLAG(HAS_64_BIT_POINTERS) && defined(ARCH_CPU_LITTLE_ENDIAN))
2535
2536 // Check that our immediate double-free detection works.
TEST_P(PartitionAllocDeathTest,ImmediateDoubleFree)2537 TEST_P(PartitionAllocDeathTest, ImmediateDoubleFree) {
2538 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
2539 EXPECT_TRUE(ptr);
2540 allocator.root()->Free(ptr);
2541 EXPECT_DEATH(allocator.root()->Free(ptr), "");
2542 }
2543
2544 // As above, but when this isn't the only slot in the span.
TEST_P(PartitionAllocDeathTest,ImmediateDoubleFree2ndSlot)2545 TEST_P(PartitionAllocDeathTest, ImmediateDoubleFree2ndSlot) {
2546 void* ptr0 = allocator.root()->Alloc(kTestAllocSize, type_name);
2547 EXPECT_TRUE(ptr0);
2548 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
2549 EXPECT_TRUE(ptr);
2550 allocator.root()->Free(ptr);
2551 EXPECT_DEATH(allocator.root()->Free(ptr), "");
2552 allocator.root()->Free(ptr0);
2553 }
2554
2555 // Check that our double-free detection based on |num_allocated_slots| not going
2556 // below 0 works.
2557 //
2558 // Unlike in ImmediateDoubleFree test, we can't have a 2ndSlot version, as this
2559 // protection wouldn't work when there is another slot present in the span. It
2560 // will prevent |num_allocated_slots| from going below 0.
TEST_P(PartitionAllocDeathTest,NumAllocatedSlotsDoubleFree)2561 TEST_P(PartitionAllocDeathTest, NumAllocatedSlotsDoubleFree) {
2562 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
2563 EXPECT_TRUE(ptr);
2564 void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
2565 EXPECT_TRUE(ptr2);
2566 allocator.root()->Free(ptr);
2567 allocator.root()->Free(ptr2);
2568 // This is not an immediate double-free so our immediate detection won't
2569 // fire. However, it does take |num_allocated_slots| to -1, which is illegal
2570 // and should be trapped.
2571 EXPECT_DEATH(allocator.root()->Free(ptr), "");
2572 }
2573
2574 #endif // !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
2575 // (BUILDFLAG(HAS_64_BIT_POINTERS) && defined(ARCH_CPU_LITTLE_ENDIAN))
2576
2577 // Check that guard pages are present where expected.
TEST_P(PartitionAllocDeathTest,DirectMapGuardPages)2578 TEST_P(PartitionAllocDeathTest, DirectMapGuardPages) {
2579 const size_t kSizes[] = {
2580 kMaxBucketed + ExtraAllocSize(allocator) + 1,
2581 kMaxBucketed + SystemPageSize(), kMaxBucketed + PartitionPageSize(),
2582 partition_alloc::internal::base::bits::AlignUp(
2583 kMaxBucketed + kSuperPageSize, kSuperPageSize) -
2584 PartitionRoot::GetDirectMapMetadataAndGuardPagesSize()};
2585 for (size_t size : kSizes) {
2586 ASSERT_GT(size, kMaxBucketed);
2587 size -= ExtraAllocSize(allocator);
2588 EXPECT_GT(size, kMaxBucketed)
2589 << "allocation not large enough for direct allocation";
2590 void* ptr = allocator.root()->Alloc(size, type_name);
2591
2592 EXPECT_TRUE(ptr);
2593 char* char_ptr = static_cast<char*>(ptr) - kPointerOffset;
2594
2595 EXPECT_DEATH(*(char_ptr - 1) = 'A', "");
2596 EXPECT_DEATH(*(char_ptr + partition_alloc::internal::base::bits::AlignUp(
2597 size, SystemPageSize())) = 'A',
2598 "");
2599
2600 allocator.root()->Free(ptr);
2601 }
2602 }
2603
2604 // These tests rely on precise layout. They handle cookie, not ref-count.
2605 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
2606 PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
2607
TEST_P(PartitionAllocDeathTest,UseAfterFreeDetection)2608 TEST_P(PartitionAllocDeathTest, UseAfterFreeDetection) {
2609 base::CPU cpu;
2610 void* data = allocator.root()->Alloc(100);
2611 allocator.root()->Free(data);
2612
2613 // use after free, not crashing here, but the next allocation should crash,
2614 // since we corrupted the freelist.
2615 memset(data, 0x42, 100);
2616 EXPECT_DEATH(allocator.root()->Alloc(100), "");
2617 }
2618
TEST_P(PartitionAllocDeathTest,FreelistCorruption)2619 TEST_P(PartitionAllocDeathTest, FreelistCorruption) {
2620 base::CPU cpu;
2621 const size_t alloc_size = 2 * sizeof(void*);
2622 void** fake_freelist_entry =
2623 static_cast<void**>(allocator.root()->Alloc(alloc_size));
2624 fake_freelist_entry[0] = nullptr;
2625 fake_freelist_entry[1] = nullptr;
2626
2627 void** uaf_data = static_cast<void**>(allocator.root()->Alloc(alloc_size));
2628 allocator.root()->Free(uaf_data);
2629 // Try to confuse the allocator. This is still easy to circumvent willingly,
2630 // "just" need to set uaf_data[1] to ~uaf_data[0].
2631 void* previous_uaf_data = uaf_data[0];
2632 uaf_data[0] = fake_freelist_entry;
2633 EXPECT_DEATH(allocator.root()->Alloc(alloc_size), "");
2634
2635 // Restore the freelist entry value, otherwise freelist corruption is detected
2636 // in TearDown(), crashing this process.
2637 uaf_data[0] = previous_uaf_data;
2638
2639 allocator.root()->Free(fake_freelist_entry);
2640 }
2641
2642 // With BUILDFLAG(PA_DCHECK_IS_ON), cookie already handles off-by-one detection.
2643 #if !BUILDFLAG(PA_DCHECK_IS_ON)
TEST_P(PartitionAllocDeathTest,OffByOneDetection)2644 TEST_P(PartitionAllocDeathTest, OffByOneDetection) {
2645 base::CPU cpu;
2646 const size_t alloc_size = 2 * sizeof(void*);
2647 char* array = static_cast<char*>(allocator.root()->Alloc(alloc_size));
2648 if (cpu.has_mte()) {
2649 EXPECT_DEATH(array[alloc_size] = 'A', "");
2650 } else {
2651 char previous_value = array[alloc_size];
2652 // volatile is required to prevent the compiler from getting too clever and
2653 // eliding the out-of-bounds write. The root cause is that the PA_MALLOC_FN
2654 // annotation tells the compiler (among other things) that the returned
2655 // value cannot alias anything.
2656 *const_cast<volatile char*>(&array[alloc_size]) = 'A';
2657 // Crash at the next allocation. This assumes that we are touching a new,
2658 // non-randomized slot span, where the next slot to be handed over to the
2659 // application directly follows the current one.
2660 EXPECT_DEATH(allocator.root()->Alloc(alloc_size), "");
2661
2662 // Restore integrity, otherwise the process will crash in TearDown().
2663 array[alloc_size] = previous_value;
2664 }
2665 }
2666
TEST_P(PartitionAllocDeathTest,OffByOneDetectionWithRealisticData)2667 TEST_P(PartitionAllocDeathTest, OffByOneDetectionWithRealisticData) {
2668 base::CPU cpu;
2669 const size_t alloc_size = 2 * sizeof(void*);
2670 void** array = static_cast<void**>(allocator.root()->Alloc(alloc_size));
2671 char valid;
2672 if (cpu.has_mte()) {
2673 EXPECT_DEATH(array[2] = &valid, "");
2674 } else {
2675 void* previous_value = array[2];
2676 // As above, needs volatile to convince the compiler to perform the write.
2677 *const_cast<void* volatile*>(&array[2]) = &valid;
2678 // Crash at the next allocation. This assumes that we are touching a new,
2679 // non-randomized slot span, where the next slot to be handed over to the
2680 // application directly follows the current one.
2681 EXPECT_DEATH(allocator.root()->Alloc(alloc_size), "");
2682 array[2] = previous_value;
2683 }
2684 }
2685 #endif // !BUILDFLAG(PA_DCHECK_IS_ON)
2686
2687 #endif // !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
2688 // PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
2689
2690 #endif // !defined(PA_HAS_DEATH_TESTS)
2691
2692 // Tests that |PartitionDumpStats| and |PartitionDumpStats| run without
2693 // crashing and return non-zero values when memory is allocated.
TEST_P(PartitionAllocTest,DumpMemoryStats)2694 TEST_P(PartitionAllocTest, DumpMemoryStats) {
2695 {
2696 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
2697 MockPartitionStatsDumper mock_stats_dumper;
2698 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2699 &mock_stats_dumper);
2700 EXPECT_TRUE(mock_stats_dumper.IsMemoryAllocationRecorded());
2701 allocator.root()->Free(ptr);
2702 }
2703
2704 // This series of tests checks the active -> empty -> decommitted states.
2705 {
2706 {
2707 void* ptr =
2708 allocator.root()->Alloc(2048 - ExtraAllocSize(allocator), type_name);
2709 MockPartitionStatsDumper dumper;
2710 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2711 &dumper);
2712 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2713
2714 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
2715 EXPECT_TRUE(stats);
2716 EXPECT_TRUE(stats->is_valid);
2717 EXPECT_EQ(2048u, stats->bucket_slot_size);
2718 EXPECT_EQ(2048u, stats->active_bytes);
2719 EXPECT_EQ(1u, stats->active_count);
2720 EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
2721 EXPECT_EQ(0u, stats->decommittable_bytes);
2722 EXPECT_EQ(0u, stats->discardable_bytes);
2723 EXPECT_EQ(0u, stats->num_full_slot_spans);
2724 EXPECT_EQ(1u, stats->num_active_slot_spans);
2725 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2726 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2727 allocator.root()->Free(ptr);
2728 }
2729
2730 {
2731 MockPartitionStatsDumper dumper;
2732 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2733 &dumper);
2734 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
2735
2736 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
2737 EXPECT_TRUE(stats);
2738 EXPECT_TRUE(stats->is_valid);
2739 EXPECT_EQ(2048u, stats->bucket_slot_size);
2740 EXPECT_EQ(0u, stats->active_bytes);
2741 EXPECT_EQ(0u, stats->active_count);
2742 EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
2743 EXPECT_EQ(SystemPageSize(), stats->decommittable_bytes);
2744 EXPECT_EQ(0u, stats->discardable_bytes);
2745 EXPECT_EQ(0u, stats->num_full_slot_spans);
2746 EXPECT_EQ(0u, stats->num_active_slot_spans);
2747 EXPECT_EQ(1u, stats->num_empty_slot_spans);
2748 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2749 }
2750
2751 // TODO(crbug.com/722911): Commenting this out causes this test to fail when
2752 // run singly (--gtest_filter=PartitionAllocTest.DumpMemoryStats), but not
2753 // when run with the others (--gtest_filter=PartitionAllocTest.*).
2754 CycleFreeCache(kTestAllocSize);
2755
2756 {
2757 MockPartitionStatsDumper dumper;
2758 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2759 &dumper);
2760 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
2761
2762 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
2763 EXPECT_TRUE(stats);
2764 EXPECT_TRUE(stats->is_valid);
2765 EXPECT_EQ(2048u, stats->bucket_slot_size);
2766 EXPECT_EQ(0u, stats->active_bytes);
2767 EXPECT_EQ(0u, stats->active_count);
2768 EXPECT_EQ(0u, stats->resident_bytes);
2769 EXPECT_EQ(0u, stats->decommittable_bytes);
2770 EXPECT_EQ(0u, stats->discardable_bytes);
2771 EXPECT_EQ(0u, stats->num_full_slot_spans);
2772 EXPECT_EQ(0u, stats->num_active_slot_spans);
2773 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2774 EXPECT_EQ(1u, stats->num_decommitted_slot_spans);
2775 }
2776 }
2777
2778 // This test checks for correct empty slot span list accounting.
2779 {
2780 size_t size = PartitionPageSize() - ExtraAllocSize(allocator);
2781 void* ptr1 = allocator.root()->Alloc(size, type_name);
2782 void* ptr2 = allocator.root()->Alloc(size, type_name);
2783 allocator.root()->Free(ptr1);
2784 allocator.root()->Free(ptr2);
2785
2786 CycleFreeCache(kTestAllocSize);
2787
2788 ptr1 = allocator.root()->Alloc(size, type_name);
2789
2790 {
2791 MockPartitionStatsDumper dumper;
2792 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2793 &dumper);
2794 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2795
2796 const PartitionBucketMemoryStats* stats =
2797 dumper.GetBucketStats(PartitionPageSize());
2798 EXPECT_TRUE(stats);
2799 EXPECT_TRUE(stats->is_valid);
2800 EXPECT_EQ(PartitionPageSize(), stats->bucket_slot_size);
2801 EXPECT_EQ(PartitionPageSize(), stats->active_bytes);
2802 EXPECT_EQ(1u, stats->active_count);
2803 EXPECT_EQ(PartitionPageSize(), stats->resident_bytes);
2804 EXPECT_EQ(0u, stats->decommittable_bytes);
2805 EXPECT_EQ(0u, stats->discardable_bytes);
2806 EXPECT_EQ(1u, stats->num_full_slot_spans);
2807 EXPECT_EQ(0u, stats->num_active_slot_spans);
2808 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2809 EXPECT_EQ(1u, stats->num_decommitted_slot_spans);
2810 }
2811 allocator.root()->Free(ptr1);
2812 }
2813
2814 // This test checks for correct direct mapped accounting.
2815 {
2816 size_t size_smaller = kMaxBucketed + 1;
2817 size_t size_bigger = (kMaxBucketed * 2) + 1;
2818 size_t real_size_smaller =
2819 (size_smaller + SystemPageOffsetMask()) & SystemPageBaseMask();
2820 size_t real_size_bigger =
2821 (size_bigger + SystemPageOffsetMask()) & SystemPageBaseMask();
2822 void* ptr = allocator.root()->Alloc(size_smaller, type_name);
2823 void* ptr2 = allocator.root()->Alloc(size_bigger, type_name);
2824
2825 {
2826 MockPartitionStatsDumper dumper;
2827 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2828 &dumper);
2829 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2830
2831 const PartitionBucketMemoryStats* stats =
2832 dumper.GetBucketStats(real_size_smaller);
2833 EXPECT_TRUE(stats);
2834 EXPECT_TRUE(stats->is_valid);
2835 EXPECT_TRUE(stats->is_direct_map);
2836 EXPECT_EQ(real_size_smaller, stats->bucket_slot_size);
2837 EXPECT_EQ(real_size_smaller, stats->active_bytes);
2838 EXPECT_EQ(1u, stats->active_count);
2839 EXPECT_EQ(real_size_smaller, stats->resident_bytes);
2840 EXPECT_EQ(0u, stats->decommittable_bytes);
2841 EXPECT_EQ(0u, stats->discardable_bytes);
2842 EXPECT_EQ(1u, stats->num_full_slot_spans);
2843 EXPECT_EQ(0u, stats->num_active_slot_spans);
2844 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2845 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2846
2847 stats = dumper.GetBucketStats(real_size_bigger);
2848 EXPECT_TRUE(stats);
2849 EXPECT_TRUE(stats->is_valid);
2850 EXPECT_TRUE(stats->is_direct_map);
2851 EXPECT_EQ(real_size_bigger, stats->bucket_slot_size);
2852 EXPECT_EQ(real_size_bigger, stats->active_bytes);
2853 EXPECT_EQ(1u, stats->active_count);
2854 EXPECT_EQ(real_size_bigger, stats->resident_bytes);
2855 EXPECT_EQ(0u, stats->decommittable_bytes);
2856 EXPECT_EQ(0u, stats->discardable_bytes);
2857 EXPECT_EQ(1u, stats->num_full_slot_spans);
2858 EXPECT_EQ(0u, stats->num_active_slot_spans);
2859 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2860 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2861 }
2862
2863 allocator.root()->Free(ptr2);
2864 allocator.root()->Free(ptr);
2865
2866 // Whilst we're here, allocate again and free with different ordering to
2867 // give a workout to our linked list code.
2868 ptr = allocator.root()->Alloc(size_smaller, type_name);
2869 ptr2 = allocator.root()->Alloc(size_bigger, type_name);
2870 allocator.root()->Free(ptr);
2871 allocator.root()->Free(ptr2);
2872 }
2873
2874 // This test checks large-but-not-quite-direct allocations.
2875 {
2876 const size_t requested_size = 16 * SystemPageSize();
2877 void* ptr = allocator.root()->Alloc(requested_size + 1, type_name);
2878
2879 {
2880 MockPartitionStatsDumper dumper;
2881 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2882 &dumper);
2883 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2884
2885 size_t slot_size = SizeToBucketSize(requested_size + 1);
2886 const PartitionBucketMemoryStats* stats =
2887 dumper.GetBucketStats(slot_size);
2888 ASSERT_TRUE(stats);
2889 EXPECT_TRUE(stats->is_valid);
2890 EXPECT_FALSE(stats->is_direct_map);
2891 EXPECT_EQ(slot_size, stats->bucket_slot_size);
2892 EXPECT_EQ(requested_size + 1 + ExtraAllocSize(allocator),
2893 stats->active_bytes);
2894 EXPECT_EQ(1u, stats->active_count);
2895 EXPECT_EQ(slot_size, stats->resident_bytes);
2896 EXPECT_EQ(0u, stats->decommittable_bytes);
2897 EXPECT_EQ((slot_size - (requested_size + 1)) / SystemPageSize() *
2898 SystemPageSize(),
2899 stats->discardable_bytes);
2900 EXPECT_EQ(1u, stats->num_full_slot_spans);
2901 EXPECT_EQ(0u, stats->num_active_slot_spans);
2902 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2903 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2904 }
2905
2906 allocator.root()->Free(ptr);
2907
2908 {
2909 MockPartitionStatsDumper dumper;
2910 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2911 &dumper);
2912 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
2913
2914 size_t slot_size = SizeToBucketSize(requested_size + 1);
2915 const PartitionBucketMemoryStats* stats =
2916 dumper.GetBucketStats(slot_size);
2917 EXPECT_TRUE(stats);
2918 EXPECT_TRUE(stats->is_valid);
2919 EXPECT_FALSE(stats->is_direct_map);
2920 EXPECT_EQ(slot_size, stats->bucket_slot_size);
2921 EXPECT_EQ(0u, stats->active_bytes);
2922 EXPECT_EQ(0u, stats->active_count);
2923 EXPECT_EQ(slot_size, stats->resident_bytes);
2924 EXPECT_EQ(slot_size, stats->decommittable_bytes);
2925 EXPECT_EQ(0u, stats->num_full_slot_spans);
2926 EXPECT_EQ(0u, stats->num_active_slot_spans);
2927 EXPECT_EQ(1u, stats->num_empty_slot_spans);
2928 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2929 }
2930
2931 void* ptr2 = allocator.root()->Alloc(requested_size + SystemPageSize() + 1,
2932 type_name);
2933 EXPECT_EQ(ptr, ptr2);
2934
2935 {
2936 MockPartitionStatsDumper dumper;
2937 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2938 &dumper);
2939 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2940
2941 size_t slot_size =
2942 SizeToBucketSize(requested_size + SystemPageSize() + 1);
2943 const PartitionBucketMemoryStats* stats =
2944 dumper.GetBucketStats(slot_size);
2945 EXPECT_TRUE(stats);
2946 EXPECT_TRUE(stats->is_valid);
2947 EXPECT_FALSE(stats->is_direct_map);
2948 EXPECT_EQ(slot_size, stats->bucket_slot_size);
2949 EXPECT_EQ(
2950 requested_size + SystemPageSize() + 1 + ExtraAllocSize(allocator),
2951 stats->active_bytes);
2952 EXPECT_EQ(1u, stats->active_count);
2953 EXPECT_EQ(slot_size, stats->resident_bytes);
2954 EXPECT_EQ(0u, stats->decommittable_bytes);
2955 EXPECT_EQ((slot_size - (requested_size + SystemPageSize() + 1)) /
2956 SystemPageSize() * SystemPageSize(),
2957 stats->discardable_bytes);
2958 EXPECT_EQ(1u, stats->num_full_slot_spans);
2959 EXPECT_EQ(0u, stats->num_active_slot_spans);
2960 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2961 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2962 }
2963
2964 allocator.root()->Free(ptr2);
2965 }
2966 }
2967
2968 // Tests the API to purge freeable memory.
TEST_P(PartitionAllocTest,Purge)2969 TEST_P(PartitionAllocTest, Purge) {
2970 char* ptr = static_cast<char*>(
2971 allocator.root()->Alloc(2048 - ExtraAllocSize(allocator), type_name));
2972 allocator.root()->Free(ptr);
2973 {
2974 MockPartitionStatsDumper dumper;
2975 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2976 &dumper);
2977 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
2978
2979 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
2980 EXPECT_TRUE(stats);
2981 EXPECT_TRUE(stats->is_valid);
2982 EXPECT_EQ(SystemPageSize(), stats->decommittable_bytes);
2983 EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
2984 }
2985 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
2986 {
2987 MockPartitionStatsDumper dumper;
2988 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2989 &dumper);
2990 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
2991
2992 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
2993 EXPECT_TRUE(stats);
2994 EXPECT_TRUE(stats->is_valid);
2995 EXPECT_EQ(0u, stats->decommittable_bytes);
2996 EXPECT_EQ(0u, stats->resident_bytes);
2997 }
2998 // Calling purge again here is a good way of testing we didn't mess up the
2999 // state of the free cache ring.
3000 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
3001
3002 // A single-slot but non-direct-mapped allocation size.
3003 size_t single_slot_size = 512 * 1024;
3004 ASSERT_GT(single_slot_size, MaxRegularSlotSpanSize());
3005 ASSERT_LE(single_slot_size, kMaxBucketed);
3006 char* big_ptr =
3007 static_cast<char*>(allocator.root()->Alloc(single_slot_size, type_name));
3008 allocator.root()->Free(big_ptr);
3009 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
3010
3011 CHECK_PAGE_IN_CORE(ptr - kPointerOffset, false);
3012 CHECK_PAGE_IN_CORE(big_ptr - kPointerOffset, false);
3013 }
3014
3015 // Tests that we prefer to allocate into a non-empty partition page over an
3016 // empty one. This is an important aspect of minimizing memory usage for some
3017 // allocation sizes, particularly larger ones.
TEST_P(PartitionAllocTest,PreferActiveOverEmpty)3018 TEST_P(PartitionAllocTest, PreferActiveOverEmpty) {
3019 size_t size = (SystemPageSize() * 2) - ExtraAllocSize(allocator);
3020 // Allocate 3 full slot spans worth of 8192-byte allocations.
3021 // Each slot span for this size is 16384 bytes, or 1 partition page and 2
3022 // slots.
3023 void* ptr1 = allocator.root()->Alloc(size, type_name);
3024 void* ptr2 = allocator.root()->Alloc(size, type_name);
3025 void* ptr3 = allocator.root()->Alloc(size, type_name);
3026 void* ptr4 = allocator.root()->Alloc(size, type_name);
3027 void* ptr5 = allocator.root()->Alloc(size, type_name);
3028 void* ptr6 = allocator.root()->Alloc(size, type_name);
3029
3030 SlotSpanMetadata* slot_span1 = SlotSpanMetadata::FromSlotStart(
3031 allocator.root()->ObjectToSlotStart(ptr1));
3032 SlotSpanMetadata* slot_span2 = SlotSpanMetadata::FromSlotStart(
3033 allocator.root()->ObjectToSlotStart(ptr3));
3034 SlotSpanMetadata* slot_span3 = SlotSpanMetadata::FromSlotStart(
3035 allocator.root()->ObjectToSlotStart(ptr6));
3036 EXPECT_NE(slot_span1, slot_span2);
3037 EXPECT_NE(slot_span2, slot_span3);
3038 PartitionBucket* bucket = slot_span1->bucket;
3039 EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
3040
3041 // Free up the 2nd slot in each slot span.
3042 // This leaves the active list containing 3 slot spans, each with 1 used and 1
3043 // free slot. The active slot span will be the one containing ptr1.
3044 allocator.root()->Free(ptr6);
3045 allocator.root()->Free(ptr4);
3046 allocator.root()->Free(ptr2);
3047 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
3048
3049 // Empty the middle slot span in the active list.
3050 allocator.root()->Free(ptr3);
3051 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
3052
3053 // Empty the first slot span in the active list -- also the current slot span.
3054 allocator.root()->Free(ptr1);
3055
3056 // A good choice here is to re-fill the third slot span since the first two
3057 // are empty. We used to fail that.
3058 void* ptr7 = allocator.root()->Alloc(size, type_name);
3059 PA_EXPECT_PTR_EQ(ptr6, ptr7);
3060 EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
3061
3062 allocator.root()->Free(ptr5);
3063 allocator.root()->Free(ptr7);
3064 }
3065
3066 // Tests the API to purge discardable memory.
TEST_P(PartitionAllocTest,PurgeDiscardableSecondPage)3067 TEST_P(PartitionAllocTest, PurgeDiscardableSecondPage) {
3068 // Free the second of two 4096 byte allocations and then purge.
3069 void* ptr1 = allocator.root()->Alloc(
3070 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3071 char* ptr2 = static_cast<char*>(allocator.root()->Alloc(
3072 SystemPageSize() - ExtraAllocSize(allocator), type_name));
3073 allocator.root()->Free(ptr2);
3074 SlotSpanMetadata* slot_span = SlotSpanMetadata::FromSlotStart(
3075 allocator.root()->ObjectToSlotStart(ptr1));
3076 EXPECT_EQ(2u, slot_span->num_unprovisioned_slots);
3077 {
3078 MockPartitionStatsDumper dumper;
3079 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3080 &dumper);
3081 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3082
3083 const PartitionBucketMemoryStats* stats =
3084 dumper.GetBucketStats(SystemPageSize());
3085 EXPECT_TRUE(stats);
3086 EXPECT_TRUE(stats->is_valid);
3087 EXPECT_EQ(0u, stats->decommittable_bytes);
3088 EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
3089 EXPECT_EQ(SystemPageSize(), stats->active_bytes);
3090 EXPECT_EQ(2 * SystemPageSize(), stats->resident_bytes);
3091 }
3092 CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, true);
3093 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3094 CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, false);
3095 EXPECT_EQ(3u, slot_span->num_unprovisioned_slots);
3096
3097 allocator.root()->Free(ptr1);
3098 }
3099
TEST_P(PartitionAllocTest,PurgeDiscardableFirstPage)3100 TEST_P(PartitionAllocTest, PurgeDiscardableFirstPage) {
3101 // Free the first of two 4096 byte allocations and then purge.
3102 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3103 SystemPageSize() - ExtraAllocSize(allocator), type_name));
3104 void* ptr2 = allocator.root()->Alloc(
3105 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3106 allocator.root()->Free(ptr1);
3107 {
3108 MockPartitionStatsDumper dumper;
3109 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3110 &dumper);
3111 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3112
3113 const PartitionBucketMemoryStats* stats =
3114 dumper.GetBucketStats(SystemPageSize());
3115 EXPECT_TRUE(stats);
3116 EXPECT_TRUE(stats->is_valid);
3117 EXPECT_EQ(0u, stats->decommittable_bytes);
3118 #if BUILDFLAG(IS_WIN)
3119 EXPECT_EQ(0u, stats->discardable_bytes);
3120 #else
3121 EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
3122 #endif
3123 EXPECT_EQ(SystemPageSize(), stats->active_bytes);
3124 EXPECT_EQ(2 * SystemPageSize(), stats->resident_bytes);
3125 }
3126 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3127 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3128 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, false);
3129
3130 allocator.root()->Free(ptr2);
3131 }
3132
TEST_P(PartitionAllocTest,PurgeDiscardableNonPageSizedAlloc)3133 TEST_P(PartitionAllocTest, PurgeDiscardableNonPageSizedAlloc) {
3134 const size_t requested_size = 2.5 * SystemPageSize();
3135 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3136 requested_size - ExtraAllocSize(allocator), type_name));
3137 void* ptr2 = allocator.root()->Alloc(
3138 requested_size - ExtraAllocSize(allocator), type_name);
3139 void* ptr3 = allocator.root()->Alloc(
3140 requested_size - ExtraAllocSize(allocator), type_name);
3141 void* ptr4 = allocator.root()->Alloc(
3142 requested_size - ExtraAllocSize(allocator), type_name);
3143 memset(ptr1, 'A', requested_size - ExtraAllocSize(allocator));
3144 memset(ptr2, 'A', requested_size - ExtraAllocSize(allocator));
3145 allocator.root()->Free(ptr1);
3146 allocator.root()->Free(ptr2);
3147 {
3148 MockPartitionStatsDumper dumper;
3149 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3150 &dumper);
3151 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3152
3153 const PartitionBucketMemoryStats* stats =
3154 dumper.GetBucketStats(requested_size);
3155 EXPECT_TRUE(stats);
3156 EXPECT_TRUE(stats->is_valid);
3157 EXPECT_EQ(0u, stats->decommittable_bytes);
3158 #if BUILDFLAG(IS_WIN)
3159 EXPECT_EQ(3 * SystemPageSize(), stats->discardable_bytes);
3160 #else
3161 EXPECT_EQ(4 * SystemPageSize(), stats->discardable_bytes);
3162 #endif
3163 EXPECT_EQ(requested_size * 2, stats->active_bytes);
3164 EXPECT_EQ(10 * SystemPageSize(), stats->resident_bytes);
3165 }
3166 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3167 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3168 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3169 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
3170 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), true);
3171 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3172 // Except for Windows, the first page is discardable because the freelist
3173 // pointer on this page is nullptr. Note that CHECK_PAGE_IN_CORE only executes
3174 // checks for Linux and ChromeOS, not for Windows.
3175 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, false);
3176 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
3177 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3178 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
3179 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), false);
3180
3181 allocator.root()->Free(ptr3);
3182 allocator.root()->Free(ptr4);
3183 }
3184
TEST_P(PartitionAllocTest,PurgeDiscardableNonPageSizedAllocOnSlotBoundary)3185 TEST_P(PartitionAllocTest, PurgeDiscardableNonPageSizedAllocOnSlotBoundary) {
3186 const size_t requested_size = 2.5 * SystemPageSize();
3187 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3188 requested_size - ExtraAllocSize(allocator), type_name));
3189 void* ptr2 = allocator.root()->Alloc(
3190 requested_size - ExtraAllocSize(allocator), type_name);
3191 void* ptr3 = allocator.root()->Alloc(
3192 requested_size - ExtraAllocSize(allocator), type_name);
3193 void* ptr4 = allocator.root()->Alloc(
3194 requested_size - ExtraAllocSize(allocator), type_name);
3195 memset(ptr1, 'A', requested_size - ExtraAllocSize(allocator));
3196 memset(ptr2, 'A', requested_size - ExtraAllocSize(allocator));
3197 allocator.root()->Free(ptr2);
3198 allocator.root()->Free(ptr1);
3199 {
3200 MockPartitionStatsDumper dumper;
3201 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3202 &dumper);
3203 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3204
3205 const PartitionBucketMemoryStats* stats =
3206 dumper.GetBucketStats(requested_size);
3207 EXPECT_TRUE(stats);
3208 EXPECT_TRUE(stats->is_valid);
3209 EXPECT_EQ(0u, stats->decommittable_bytes);
3210 #if BUILDFLAG(IS_WIN)
3211 EXPECT_EQ(3 * SystemPageSize(), stats->discardable_bytes);
3212 #else
3213 EXPECT_EQ(4 * SystemPageSize(), stats->discardable_bytes);
3214 #endif
3215 EXPECT_EQ(requested_size * 2, stats->active_bytes);
3216 EXPECT_EQ(10 * SystemPageSize(), stats->resident_bytes);
3217 }
3218 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3219 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3220 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3221 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
3222 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), true);
3223 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3224 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3225 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
3226 // Except for Windows, the third page is discardable because the freelist
3227 // pointer on this page is nullptr. Note that CHECK_PAGE_IN_CORE only executes
3228 // checks for Linux and ChromeOS, not for Windows.
3229 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), false);
3230 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
3231 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), false);
3232
3233 allocator.root()->Free(ptr3);
3234 allocator.root()->Free(ptr4);
3235 }
3236
TEST_P(PartitionAllocTest,PurgeDiscardableManyPages)3237 TEST_P(PartitionAllocTest, PurgeDiscardableManyPages) {
3238 // On systems with large pages, use less pages because:
3239 // 1) There must be a bucket for kFirstAllocPages * SystemPageSize(), and
3240 // 2) On low-end systems, using too many large pages can OOM during the test
3241 const bool kHasLargePages = SystemPageSize() > 4096;
3242 const size_t kFirstAllocPages = kHasLargePages ? 32 : 64;
3243 const size_t kSecondAllocPages = kHasLargePages ? 31 : 61;
3244
3245 // Detect case (1) from above.
3246 PA_DCHECK(kFirstAllocPages * SystemPageSize() < (1UL << kMaxBucketedOrder));
3247
3248 const size_t kDeltaPages = kFirstAllocPages - kSecondAllocPages;
3249
3250 {
3251 ScopedPageAllocation p(allocator, kFirstAllocPages);
3252 p.TouchAllPages();
3253 }
3254
3255 ScopedPageAllocation p(allocator, kSecondAllocPages);
3256
3257 MockPartitionStatsDumper dumper;
3258 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3259 &dumper);
3260 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3261
3262 const PartitionBucketMemoryStats* stats =
3263 dumper.GetBucketStats(kFirstAllocPages * SystemPageSize());
3264 EXPECT_TRUE(stats);
3265 EXPECT_TRUE(stats->is_valid);
3266 EXPECT_EQ(0u, stats->decommittable_bytes);
3267 EXPECT_EQ(kDeltaPages * SystemPageSize(), stats->discardable_bytes);
3268 EXPECT_EQ(kSecondAllocPages * SystemPageSize(), stats->active_bytes);
3269 EXPECT_EQ(kFirstAllocPages * SystemPageSize(), stats->resident_bytes);
3270
3271 for (size_t i = 0; i < kFirstAllocPages; i++) {
3272 CHECK_PAGE_IN_CORE(p.PageAtIndex(i), true);
3273 }
3274
3275 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3276
3277 for (size_t i = 0; i < kSecondAllocPages; i++) {
3278 CHECK_PAGE_IN_CORE(p.PageAtIndex(i), true);
3279 }
3280 for (size_t i = kSecondAllocPages; i < kFirstAllocPages; i++) {
3281 CHECK_PAGE_IN_CORE(p.PageAtIndex(i), false);
3282 }
3283 }
3284
TEST_P(PartitionAllocTest,PurgeDiscardableWithFreeListStraightening)3285 TEST_P(PartitionAllocTest, PurgeDiscardableWithFreeListStraightening) {
3286 // This sub-test tests truncation of the provisioned slots in a trickier
3287 // case where the freelist is rewritten.
3288 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
3289 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3290 SystemPageSize() - ExtraAllocSize(allocator), type_name));
3291 void* ptr2 = allocator.root()->Alloc(
3292 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3293 void* ptr3 = allocator.root()->Alloc(
3294 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3295 void* ptr4 = allocator.root()->Alloc(
3296 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3297 ptr1[0] = 'A';
3298 ptr1[SystemPageSize()] = 'A';
3299 ptr1[SystemPageSize() * 2] = 'A';
3300 ptr1[SystemPageSize() * 3] = 'A';
3301 SlotSpanMetadata* slot_span = SlotSpanMetadata::FromSlotStart(
3302 allocator.root()->ObjectToSlotStart(ptr1));
3303 allocator.root()->Free(ptr2);
3304 allocator.root()->Free(ptr4);
3305 allocator.root()->Free(ptr1);
3306 EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
3307
3308 {
3309 MockPartitionStatsDumper dumper;
3310 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3311 &dumper);
3312 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3313
3314 const PartitionBucketMemoryStats* stats =
3315 dumper.GetBucketStats(SystemPageSize());
3316 EXPECT_TRUE(stats);
3317 EXPECT_TRUE(stats->is_valid);
3318 EXPECT_EQ(0u, stats->decommittable_bytes);
3319 #if BUILDFLAG(IS_WIN)
3320 EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
3321 #else
3322 EXPECT_EQ(2 * SystemPageSize(), stats->discardable_bytes);
3323 #endif
3324 EXPECT_EQ(SystemPageSize(), stats->active_bytes);
3325 EXPECT_EQ(4 * SystemPageSize(), stats->resident_bytes);
3326 }
3327 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3328 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3329 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3330 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
3331 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3332 EXPECT_EQ(1u, slot_span->num_unprovisioned_slots);
3333 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3334 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
3335 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3336 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
3337
3338 // Let's check we didn't brick the freelist.
3339 void* ptr1b = allocator.root()->Alloc(
3340 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3341 PA_EXPECT_PTR_EQ(ptr1, ptr1b);
3342 void* ptr2b = allocator.root()->Alloc(
3343 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3344 PA_EXPECT_PTR_EQ(ptr2, ptr2b);
3345 EXPECT_FALSE(slot_span->get_freelist_head()); // ptr4 was unprovisioned
3346 void* ptr4b = allocator.root()->Alloc(
3347 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3348 PA_EXPECT_PTR_EQ(ptr4, ptr4b);
3349 EXPECT_FALSE(slot_span->get_freelist_head());
3350
3351 // Free objects such that they're in this order on the list:
3352 // head -> ptr2 -> ptr3 -> ptr1
3353 // However, ptr4 is still unfreed preventing any unprovisioning.
3354 allocator.root()->Free(ptr1);
3355 allocator.root()->Free(ptr3);
3356 allocator.root()->Free(ptr2);
3357 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3358 // The test by default runs in
3359 // StraightenLargerSlotSpanFreeListsMode::kOnlyWhenUnprovisioning mode, so the
3360 // freelist wasn't modified, and the allocations will happen in LIFO order.
3361 ptr2b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
3362 type_name);
3363 PA_EXPECT_PTR_EQ(ptr2, ptr2b);
3364 void* ptr3b = allocator.root()->Alloc(
3365 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3366 PA_EXPECT_PTR_EQ(ptr3, ptr3b);
3367 ptr1b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
3368 type_name);
3369 PA_EXPECT_PTR_EQ(ptr1, ptr1b);
3370 EXPECT_FALSE(slot_span->get_freelist_head());
3371
3372 // Free objects such that they're in this order on the list:
3373 // head -> ptr2 -> ptr3 -> ptr1
3374 // However, ptr4 is still unfreed preventing any unprovisioning.
3375 allocator.root()->Free(ptr1);
3376 allocator.root()->Free(ptr3);
3377 allocator.root()->Free(ptr2);
3378 PartitionRoot::SetStraightenLargerSlotSpanFreeListsMode(
3379 StraightenLargerSlotSpanFreeListsMode::kAlways);
3380 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3381 // In StraightenLargerSlotSpanFreeListsMode::kAlways mode, the freelist is
3382 // ordered from left to right.
3383 ptr1b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
3384 type_name);
3385 PA_EXPECT_PTR_EQ(ptr1, ptr1b);
3386 ptr2b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
3387 type_name);
3388 PA_EXPECT_PTR_EQ(ptr2, ptr2b);
3389 ptr3b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
3390 type_name);
3391 PA_EXPECT_PTR_EQ(ptr3, ptr3b);
3392 EXPECT_FALSE(slot_span->get_freelist_head());
3393
3394 // Free objects such that they're in this order on the list:
3395 // head -> ptr2 -> ptr4 -> ptr1
3396 // ptr3 is still unfreed preventing unprovisioning of ptr1 and ptr2, but not
3397 // ptr4.
3398 allocator.root()->Free(ptr1);
3399 allocator.root()->Free(ptr4);
3400 allocator.root()->Free(ptr2);
3401 PartitionRoot::SetStraightenLargerSlotSpanFreeListsMode(
3402 StraightenLargerSlotSpanFreeListsMode::kNever);
3403 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3404 // In StraightenLargerSlotSpanFreeListsMode::kNever mode, unprovisioned
3405 // entries willbe removed form the freelist but the list won't be reordered.
3406 ptr2b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
3407 type_name);
3408 PA_EXPECT_PTR_EQ(ptr2, ptr2b);
3409 ptr1b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
3410 type_name);
3411 PA_EXPECT_PTR_EQ(ptr1, ptr1b);
3412 EXPECT_FALSE(slot_span->get_freelist_head());
3413 ptr4b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
3414 type_name);
3415 PA_EXPECT_PTR_EQ(ptr4, ptr4b);
3416 EXPECT_FALSE(slot_span->get_freelist_head());
3417
3418 // Clean up.
3419 allocator.root()->Free(ptr1);
3420 allocator.root()->Free(ptr2);
3421 allocator.root()->Free(ptr3);
3422 allocator.root()->Free(ptr4);
3423 }
3424
TEST_P(PartitionAllocTest,PurgeDiscardableDoubleTruncateFreeList)3425 TEST_P(PartitionAllocTest, PurgeDiscardableDoubleTruncateFreeList) {
3426 // This sub-test is similar, but tests a double-truncation.
3427 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
3428 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3429 SystemPageSize() - ExtraAllocSize(allocator), type_name));
3430 void* ptr2 = allocator.root()->Alloc(
3431 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3432 void* ptr3 = allocator.root()->Alloc(
3433 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3434 void* ptr4 = allocator.root()->Alloc(
3435 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3436 ptr1[0] = 'A';
3437 ptr1[SystemPageSize()] = 'A';
3438 ptr1[SystemPageSize() * 2] = 'A';
3439 ptr1[SystemPageSize() * 3] = 'A';
3440 SlotSpanMetadata* slot_span = SlotSpanMetadata::FromSlotStart(
3441 allocator.root()->ObjectToSlotStart(ptr1));
3442 allocator.root()->Free(ptr4);
3443 allocator.root()->Free(ptr3);
3444 EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
3445
3446 {
3447 MockPartitionStatsDumper dumper;
3448 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3449 &dumper);
3450 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3451
3452 const PartitionBucketMemoryStats* stats =
3453 dumper.GetBucketStats(SystemPageSize());
3454 EXPECT_TRUE(stats);
3455 EXPECT_TRUE(stats->is_valid);
3456 EXPECT_EQ(0u, stats->decommittable_bytes);
3457 EXPECT_EQ(2 * SystemPageSize(), stats->discardable_bytes);
3458 EXPECT_EQ(2 * SystemPageSize(), stats->active_bytes);
3459 EXPECT_EQ(4 * SystemPageSize(), stats->resident_bytes);
3460 }
3461 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3462 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3463 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3464 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
3465 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3466 EXPECT_EQ(2u, slot_span->num_unprovisioned_slots);
3467 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3468 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3469 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), false);
3470 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
3471
3472 EXPECT_FALSE(slot_span->get_freelist_head());
3473
3474 allocator.root()->Free(ptr1);
3475 allocator.root()->Free(ptr2);
3476 }
3477
TEST_P(PartitionAllocTest,PurgeDiscardableSmallSlotsWithTruncate)3478 TEST_P(PartitionAllocTest, PurgeDiscardableSmallSlotsWithTruncate) {
3479 size_t requested_size = 0.5 * SystemPageSize();
3480 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3481 requested_size - ExtraAllocSize(allocator), type_name));
3482 void* ptr2 = allocator.root()->Alloc(
3483 requested_size - ExtraAllocSize(allocator), type_name);
3484 void* ptr3 = allocator.root()->Alloc(
3485 requested_size - ExtraAllocSize(allocator), type_name);
3486 void* ptr4 = allocator.root()->Alloc(
3487 requested_size - ExtraAllocSize(allocator), type_name);
3488 allocator.root()->Free(ptr3);
3489 allocator.root()->Free(ptr4);
3490 SlotSpanMetadata* slot_span = SlotSpanMetadata::FromSlotStart(
3491 allocator.root()->ObjectToSlotStart(ptr1));
3492 EXPECT_EQ(4u, slot_span->num_unprovisioned_slots);
3493 {
3494 MockPartitionStatsDumper dumper;
3495 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3496 &dumper);
3497 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3498
3499 const PartitionBucketMemoryStats* stats =
3500 dumper.GetBucketStats(requested_size);
3501 EXPECT_TRUE(stats);
3502 EXPECT_TRUE(stats->is_valid);
3503 EXPECT_EQ(0u, stats->decommittable_bytes);
3504 EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
3505 EXPECT_EQ(requested_size * 2, stats->active_bytes);
3506 EXPECT_EQ(2 * SystemPageSize(), stats->resident_bytes);
3507 }
3508 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3509 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3510 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3511 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3512 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
3513 EXPECT_EQ(6u, slot_span->num_unprovisioned_slots);
3514
3515 allocator.root()->Free(ptr1);
3516 allocator.root()->Free(ptr2);
3517 }
3518
TEST_P(PartitionAllocTest,ActiveListMaintenance)3519 TEST_P(PartitionAllocTest, ActiveListMaintenance) {
3520 size_t size = SystemPageSize() - ExtraAllocSize(allocator);
3521 size_t real_size = size + ExtraAllocSize(allocator);
3522 size_t bucket_index =
3523 allocator.root()->SizeToBucketIndex(real_size, GetBucketDistribution());
3524 PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
3525 ASSERT_EQ(bucket->slot_size, real_size);
3526 size_t slots_per_span = bucket->num_system_pages_per_slot_span;
3527
3528 // Make 10 full slot spans.
3529 constexpr int kSpans = 10;
3530 std::vector<std::vector<void*>> allocated_memory_spans(kSpans);
3531 for (int span_index = 0; span_index < kSpans; span_index++) {
3532 for (size_t i = 0; i < slots_per_span; i++) {
3533 allocated_memory_spans[span_index].push_back(
3534 allocator.root()->Alloc(size));
3535 }
3536 }
3537
3538 // Free one entry in the middle span, creating a partial slot span.
3539 constexpr size_t kSpanIndex = 5;
3540 allocator.root()->Free(allocated_memory_spans[kSpanIndex].back());
3541 allocated_memory_spans[kSpanIndex].pop_back();
3542
3543 // Empty the last slot span.
3544 for (void* ptr : allocated_memory_spans[kSpans - 1]) {
3545 allocator.root()->Free(ptr);
3546 }
3547 allocated_memory_spans.pop_back();
3548
3549 // The active list now is:
3550 // Partial -> Empty -> Full -> Full -> ... -> Full
3551 bucket->MaintainActiveList();
3552
3553 // Only one entry in the active list.
3554 ASSERT_NE(bucket->active_slot_spans_head,
3555 SlotSpanMetadata::get_sentinel_slot_span());
3556 EXPECT_FALSE(bucket->active_slot_spans_head->next_slot_span);
3557
3558 // The empty list has 1 entry.
3559 ASSERT_NE(bucket->empty_slot_spans_head,
3560 SlotSpanMetadata::get_sentinel_slot_span());
3561 EXPECT_FALSE(bucket->empty_slot_spans_head->next_slot_span);
3562
3563 // The rest are full slot spans.
3564 EXPECT_EQ(8u, bucket->num_full_slot_spans);
3565
3566 // Free all memory.
3567 for (const auto& span : allocated_memory_spans) {
3568 for (void* ptr : span) {
3569 allocator.root()->Free(ptr);
3570 }
3571 }
3572 }
3573
TEST_P(PartitionAllocTest,ReallocMovesCookie)3574 TEST_P(PartitionAllocTest, ReallocMovesCookie) {
3575 // Resize so as to be sure to hit a "resize in place" case, and ensure that
3576 // use of the entire result is compatible with the debug mode's cookie, even
3577 // when the bucket size is large enough to span more than one partition page
3578 // and we can track the "raw" size. See https://crbug.com/709271
3579 static const size_t kSize = MaxRegularSlotSpanSize();
3580 void* ptr = allocator.root()->Alloc(kSize + 1, type_name);
3581 EXPECT_TRUE(ptr);
3582
3583 memset(ptr, 0xbd, kSize + 1);
3584 ptr = allocator.root()->Realloc(ptr, kSize + 2, type_name);
3585 EXPECT_TRUE(ptr);
3586
3587 memset(ptr, 0xbd, kSize + 2);
3588 allocator.root()->Free(ptr);
3589 }
3590
TEST_P(PartitionAllocTest,SmallReallocDoesNotMoveTrailingCookie)3591 TEST_P(PartitionAllocTest, SmallReallocDoesNotMoveTrailingCookie) {
3592 // For crbug.com/781473
3593 static constexpr size_t kSize = 264;
3594 void* ptr = allocator.root()->Alloc(kSize, type_name);
3595 EXPECT_TRUE(ptr);
3596
3597 ptr = allocator.root()->Realloc(ptr, kSize + 16, type_name);
3598 EXPECT_TRUE(ptr);
3599
3600 allocator.root()->Free(ptr);
3601 }
3602
TEST_P(PartitionAllocTest,ZeroFill)3603 TEST_P(PartitionAllocTest, ZeroFill) {
3604 static constexpr size_t kAllZerosSentinel =
3605 std::numeric_limits<size_t>::max();
3606 for (size_t size : kTestSizes) {
3607 char* p = static_cast<char*>(
3608 allocator.root()->Alloc<AllocFlags::kZeroFill>(size));
3609 size_t non_zero_position = kAllZerosSentinel;
3610 for (size_t i = 0; i < size; ++i) {
3611 if (0 != p[i]) {
3612 non_zero_position = i;
3613 break;
3614 }
3615 }
3616 EXPECT_EQ(kAllZerosSentinel, non_zero_position)
3617 << "test allocation size: " << size;
3618 allocator.root()->Free(p);
3619 }
3620
3621 for (int i = 0; i < 10; ++i) {
3622 SCOPED_TRACE(i);
3623 AllocateRandomly<AllocFlags::kZeroFill>(allocator.root(), 250);
3624 }
3625 }
3626
TEST_P(PartitionAllocTest,SchedulerLoopQuarantine)3627 TEST_P(PartitionAllocTest, SchedulerLoopQuarantine) {
3628 SchedulerLoopQuarantineBranch& branch =
3629 allocator.root()->GetSchedulerLoopQuarantineBranchForTesting();
3630
3631 constexpr size_t kCapacityInBytes = std::numeric_limits<size_t>::max();
3632 size_t original_capacity_in_bytes = branch.GetRoot().GetCapacityInBytes();
3633 branch.GetRoot().SetCapacityInBytesForTesting(kCapacityInBytes);
3634
3635 for (size_t size : kTestSizes) {
3636 SCOPED_TRACE(size);
3637
3638 void* object = allocator.root()->Alloc(size);
3639 allocator.root()->Free<FreeFlags::kSchedulerLoopQuarantine>(object);
3640
3641 ASSERT_TRUE(branch.IsQuarantinedForTesting(object));
3642 }
3643
3644 for (int i = 0; i < 10; ++i) {
3645 SCOPED_TRACE(i);
3646 AllocateRandomly<AllocFlags::kNone, FreeFlags::kSchedulerLoopQuarantine>(
3647 allocator.root(), 250);
3648 }
3649
3650 branch.Purge();
3651 branch.GetRoot().SetCapacityInBytesForTesting(original_capacity_in_bytes);
3652 }
3653
TEST_P(PartitionAllocTest,ZapOnFree)3654 TEST_P(PartitionAllocTest, ZapOnFree) {
3655 void* ptr = allocator.root()->Alloc(1, type_name);
3656 EXPECT_TRUE(ptr);
3657 memset(ptr, 'A', 1);
3658 allocator.root()->Free<FreeFlags::kZap>(ptr);
3659 EXPECT_NE('A', *static_cast<unsigned char*>(ptr));
3660
3661 constexpr size_t size = 1024;
3662 ptr = allocator.root()->Alloc(size, type_name);
3663 EXPECT_TRUE(ptr);
3664 memset(ptr, 'A', size);
3665 allocator.root()->Free<FreeFlags::kZap>(ptr);
3666 EXPECT_NE('A', *static_cast<unsigned char*>(ptr));
3667 EXPECT_EQ(kFreedByte,
3668 *(static_cast<unsigned char*>(ptr) + 2 * sizeof(void*)));
3669 EXPECT_EQ(kFreedByte, *(static_cast<unsigned char*>(ptr) + size - 1));
3670 }
3671
TEST_P(PartitionAllocTest,Bug_897585)3672 TEST_P(PartitionAllocTest, Bug_897585) {
3673 // Need sizes big enough to be direct mapped and a delta small enough to
3674 // allow re-use of the slot span when cookied. These numbers fall out of the
3675 // test case in the indicated bug.
3676 size_t kInitialSize = 983050;
3677 size_t kDesiredSize = 983100;
3678 ASSERT_GT(kInitialSize, kMaxBucketed);
3679 ASSERT_GT(kDesiredSize, kMaxBucketed);
3680 void* ptr = allocator.root()->Alloc<AllocFlags::kReturnNull>(kInitialSize);
3681 ASSERT_NE(nullptr, ptr);
3682 ptr = allocator.root()->Realloc<AllocFlags::kReturnNull>(ptr, kDesiredSize,
3683 nullptr);
3684 ASSERT_NE(nullptr, ptr);
3685 memset(ptr, 0xbd, kDesiredSize);
3686 allocator.root()->Free(ptr);
3687 }
3688
TEST_P(PartitionAllocTest,OverrideHooks)3689 TEST_P(PartitionAllocTest, OverrideHooks) {
3690 constexpr size_t kOverriddenSize = 1234;
3691 constexpr const char* kOverriddenType = "Overridden type";
3692 constexpr unsigned char kOverriddenChar = 'A';
3693
3694 // Marked static so that we can use them in non-capturing lambdas below.
3695 // (Non-capturing lambdas convert directly to function pointers.)
3696 static volatile bool free_called = false;
3697 static void* overridden_allocation = nullptr;
3698 overridden_allocation = malloc(kOverriddenSize);
3699 memset(overridden_allocation, kOverriddenChar, kOverriddenSize);
3700
3701 PartitionAllocHooks::SetOverrideHooks(
3702 [](void** out, AllocFlags flags, size_t size,
3703 const char* type_name) -> bool {
3704 if (size == kOverriddenSize && type_name == kOverriddenType) {
3705 *out = overridden_allocation;
3706 return true;
3707 }
3708 return false;
3709 },
3710 [](void* address) -> bool {
3711 if (address == overridden_allocation) {
3712 free_called = true;
3713 return true;
3714 }
3715 return false;
3716 },
3717 [](size_t* out, void* address) -> bool {
3718 if (address == overridden_allocation) {
3719 *out = kOverriddenSize;
3720 return true;
3721 }
3722 return false;
3723 });
3724
3725 void* ptr = allocator.root()->Alloc<AllocFlags::kReturnNull>(kOverriddenSize,
3726 kOverriddenType);
3727 ASSERT_EQ(ptr, overridden_allocation);
3728
3729 allocator.root()->Free(ptr);
3730 EXPECT_TRUE(free_called);
3731
3732 // overridden_allocation has not actually been freed so we can now immediately
3733 // realloc it.
3734 free_called = false;
3735 ptr = allocator.root()->Realloc<AllocFlags::kReturnNull>(ptr, 1, nullptr);
3736 ASSERT_NE(ptr, nullptr);
3737 EXPECT_NE(ptr, overridden_allocation);
3738 EXPECT_TRUE(free_called);
3739 EXPECT_EQ(*(char*)ptr, kOverriddenChar);
3740 allocator.root()->Free(ptr);
3741
3742 PartitionAllocHooks::SetOverrideHooks(nullptr, nullptr, nullptr);
3743 free(overridden_allocation);
3744 }
3745
TEST_P(PartitionAllocTest,Alignment)3746 TEST_P(PartitionAllocTest, Alignment) {
3747 std::vector<void*> allocated_ptrs;
3748
3749 for (size_t size = 1; size <= PartitionPageSize(); size <<= 1) {
3750 if (size <= ExtraAllocSize(allocator)) {
3751 continue;
3752 }
3753 size_t requested_size = size - ExtraAllocSize(allocator);
3754
3755 // All allocations which are not direct-mapped occupy contiguous slots of a
3756 // span, starting on a page boundary. This means that allocations are first
3757 // rounded up to the nearest bucket size, then have an address of the form:
3758 // (partition-page-aligned address) + i * bucket_size.
3759 //
3760 // All powers of two are bucket sizes, meaning that all power of two
3761 // allocations smaller than a page will be aligned on the allocation size.
3762 size_t expected_alignment = size;
3763 for (int index = 0; index < 3; index++) {
3764 void* ptr = allocator.root()->Alloc(requested_size);
3765 allocated_ptrs.push_back(ptr);
3766 EXPECT_EQ(0u,
3767 allocator.root()->ObjectToSlotStart(ptr) % expected_alignment)
3768 << (index + 1) << "-th allocation of size=" << size;
3769 }
3770 }
3771
3772 for (void* ptr : allocated_ptrs) {
3773 allocator.root()->Free(ptr);
3774 }
3775 }
3776
TEST_P(PartitionAllocTest,FundamentalAlignment)3777 TEST_P(PartitionAllocTest, FundamentalAlignment) {
3778 // See the test above for details. Essentially, checking the bucket size is
3779 // sufficient to ensure that alignment will always be respected, as long as
3780 // the fundamental alignment is <= 16 bytes.
3781 size_t fundamental_alignment = kAlignment;
3782 for (size_t size = 0; size < SystemPageSize(); size++) {
3783 // Allocate several pointers, as the first one in use in a size class will
3784 // be aligned on a page boundary.
3785 void* ptr = allocator.root()->Alloc(size);
3786 void* ptr2 = allocator.root()->Alloc(size);
3787 void* ptr3 = allocator.root()->Alloc(size);
3788
3789 EXPECT_EQ(UntagPtr(ptr) % fundamental_alignment, 0u);
3790 EXPECT_EQ(UntagPtr(ptr2) % fundamental_alignment, 0u);
3791 EXPECT_EQ(UntagPtr(ptr3) % fundamental_alignment, 0u);
3792
3793 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
3794 #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
3795 // The capacity(C) is slot size - ExtraAllocSize(allocator).
3796 // Since slot size is multiples of kAlignment,
3797 // C % kAlignment == (slot_size - ExtraAllocSize(allocator)) % kAlignment.
3798 // C % kAlignment == (-ExtraAllocSize(allocator)) % kAlignment.
3799 // Since kCookieSize is a multiple of kAlignment,
3800 // C % kAlignment == (-kInSlotRefCountBufferSize) % kAlignment
3801 // == (kAlignment - kInSlotRefCountBufferSize) % kAlignment.
3802 EXPECT_EQ(allocator.root()->AllocationCapacityFromSlotStart(slot_start) %
3803 fundamental_alignment,
3804 UseBRPPool()
3805 ? (-ExtraAllocSize(allocator) % fundamental_alignment)
3806 : 0);
3807 #else
3808 EXPECT_EQ(allocator.root()->AllocationCapacityFromSlotStart(slot_start) %
3809 fundamental_alignment,
3810 -ExtraAllocSize(allocator) % fundamental_alignment);
3811 #endif
3812
3813 allocator.root()->Free(ptr);
3814 allocator.root()->Free(ptr2);
3815 allocator.root()->Free(ptr3);
3816 }
3817 }
3818
VerifyAlignment(PartitionRoot * root,size_t size,size_t alignment)3819 void VerifyAlignment(PartitionRoot* root, size_t size, size_t alignment) {
3820 std::vector<void*> allocated_ptrs;
3821
3822 for (int index = 0; index < 3; index++) {
3823 void* ptr = root->AlignedAlloc(alignment, size);
3824 ASSERT_TRUE(ptr);
3825 allocated_ptrs.push_back(ptr);
3826 EXPECT_EQ(0ull, UntagPtr(ptr) % alignment)
3827 << (index + 1) << "-th allocation of size=" << size
3828 << ", alignment=" << alignment;
3829 }
3830
3831 for (void* ptr : allocated_ptrs) {
3832 root->Free(ptr);
3833 }
3834 }
3835
TEST_P(PartitionAllocTest,AlignedAllocations)3836 TEST_P(PartitionAllocTest, AlignedAllocations) {
3837 size_t alloc_sizes[] = {1,
3838 10,
3839 100,
3840 1000,
3841 10000,
3842 60000,
3843 70000,
3844 130000,
3845 500000,
3846 900000,
3847 kMaxBucketed + 1,
3848 2 * kMaxBucketed,
3849 kSuperPageSize - 2 * PartitionPageSize(),
3850 4 * kMaxBucketed};
3851 for (size_t alloc_size : alloc_sizes) {
3852 for (size_t alignment = 1; alignment <= kMaxSupportedAlignment;
3853 alignment <<= 1) {
3854 VerifyAlignment(aligned_allocator.root(), alloc_size, alignment);
3855
3856 // Verify alignment on the regular allocator only when BRP is off, or when
3857 // it's on in the "previous slot" mode. See the comment in SetUp().
3858 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
3859 BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
3860 VerifyAlignment(allocator.root(), alloc_size, alignment);
3861 #endif
3862 }
3863 }
3864 }
3865
3866 // Test that the optimized `GetSlotNumber` implementation produces valid
3867 // results.
TEST_P(PartitionAllocTest,OptimizedGetSlotNumber)3868 TEST_P(PartitionAllocTest, OptimizedGetSlotNumber) {
3869 for (size_t i = 0; i < kNumBuckets; ++i) {
3870 auto& bucket = allocator.root()->buckets[i];
3871 if (SizeToIndex(bucket.slot_size) != i) {
3872 continue;
3873 }
3874 for (size_t slot = 0, offset = 0; slot < bucket.get_slots_per_span();
3875 ++slot, offset += bucket.slot_size) {
3876 EXPECT_EQ(slot, bucket.GetSlotNumber(offset));
3877 EXPECT_EQ(slot, bucket.GetSlotNumber(offset + bucket.slot_size / 2));
3878 EXPECT_EQ(slot, bucket.GetSlotNumber(offset + bucket.slot_size - 1));
3879 }
3880 }
3881 }
3882
TEST_P(PartitionAllocTest,GetUsableSizeNull)3883 TEST_P(PartitionAllocTest, GetUsableSizeNull) {
3884 EXPECT_EQ(0ULL, PartitionRoot::GetUsableSize(nullptr));
3885 }
3886
TEST_P(PartitionAllocTest,GetUsableSize)3887 TEST_P(PartitionAllocTest, GetUsableSize) {
3888 #if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
3889 allocator.root()->EnableMac11MallocSizeHackForTesting(
3890 GetParam().ref_count_size);
3891 #endif
3892 size_t delta = 31;
3893 for (size_t size = 1; size <= kMinDirectMappedDownsize; size += delta) {
3894 void* ptr = allocator.root()->Alloc(size);
3895 EXPECT_TRUE(ptr);
3896 size_t usable_size = PartitionRoot::GetUsableSize(ptr);
3897 size_t usable_size_with_hack =
3898 PartitionRoot::GetUsableSizeWithMac11MallocSizeHack(ptr);
3899 #if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
3900 if (size != internal::kMac11MallocSizeHackRequestedSize)
3901 #endif
3902 EXPECT_EQ(usable_size_with_hack, usable_size);
3903 EXPECT_LE(size, usable_size);
3904 memset(ptr, 0xDE, usable_size);
3905 // Should not crash when free the ptr.
3906 allocator.root()->Free(ptr);
3907 }
3908 }
3909
3910 #if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
TEST_P(PartitionAllocTest,GetUsableSizeWithMac11MallocSizeHack)3911 TEST_P(PartitionAllocTest, GetUsableSizeWithMac11MallocSizeHack) {
3912 if (internal::base::mac::MacOSMajorVersion() != 11) {
3913 GTEST_SKIP() << "Skipping because the test is for Mac11.";
3914 }
3915
3916 allocator.root()->EnableMac11MallocSizeHackForTesting(
3917 GetParam().ref_count_size);
3918 size_t size = internal::kMac11MallocSizeHackRequestedSize;
3919 void* ptr = allocator.root()->Alloc(size);
3920 size_t usable_size = PartitionRoot::GetUsableSize(ptr);
3921 size_t usable_size_with_hack =
3922 PartitionRoot::GetUsableSizeWithMac11MallocSizeHack(ptr);
3923 EXPECT_EQ(usable_size,
3924 allocator.root()->settings.mac11_malloc_size_hack_usable_size_);
3925 EXPECT_EQ(usable_size_with_hack, size);
3926
3927 allocator.root()->Free(ptr);
3928 }
3929 #endif // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
3930
TEST_P(PartitionAllocTest,Bookkeeping)3931 TEST_P(PartitionAllocTest, Bookkeeping) {
3932 auto& root = *allocator.root();
3933
3934 EXPECT_EQ(0U, root.total_size_of_committed_pages);
3935 EXPECT_EQ(0U, root.max_size_of_committed_pages);
3936 EXPECT_EQ(0U, root.get_total_size_of_allocated_bytes());
3937 EXPECT_EQ(0U, root.get_max_size_of_allocated_bytes());
3938 EXPECT_EQ(0U, root.total_size_of_super_pages);
3939 size_t small_size = 1000;
3940
3941 // A full slot span of size 1 partition page is committed.
3942 void* ptr = root.Alloc(small_size - ExtraAllocSize(allocator), type_name);
3943 // Lazy commit commits only needed pages.
3944 size_t expected_committed_size =
3945 kUseLazyCommit ? SystemPageSize() : PartitionPageSize();
3946 size_t expected_super_pages_size = kSuperPageSize;
3947 size_t expected_max_committed_size = expected_committed_size;
3948 size_t bucket_index = SizeToIndex(small_size - ExtraAllocSize(allocator));
3949 PartitionBucket* bucket = &root.buckets[bucket_index];
3950 size_t expected_total_allocated_size = bucket->slot_size;
3951 size_t expected_max_allocated_size = expected_total_allocated_size;
3952
3953 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3954 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3955 EXPECT_EQ(expected_total_allocated_size,
3956 root.get_total_size_of_allocated_bytes());
3957 EXPECT_EQ(expected_max_allocated_size,
3958 root.get_max_size_of_allocated_bytes());
3959 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3960
3961 // Freeing memory doesn't result in decommitting pages right away.
3962 root.Free(ptr);
3963 expected_total_allocated_size = 0U;
3964 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3965 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3966 EXPECT_EQ(expected_total_allocated_size,
3967 root.get_total_size_of_allocated_bytes());
3968 EXPECT_EQ(expected_max_allocated_size,
3969 root.get_max_size_of_allocated_bytes());
3970 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3971
3972 // Allocating the same size lands it in the same slot span.
3973 ptr = root.Alloc(small_size - ExtraAllocSize(allocator), type_name);
3974 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3975 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3976 EXPECT_EQ(expected_max_allocated_size,
3977 root.get_max_size_of_allocated_bytes());
3978 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3979
3980 // Freeing memory doesn't result in decommitting pages right away.
3981 root.Free(ptr);
3982 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3983 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3984 EXPECT_EQ(expected_max_allocated_size,
3985 root.get_max_size_of_allocated_bytes());
3986 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3987
3988 // Allocating another size commits another slot span.
3989 ptr = root.Alloc(2 * small_size - ExtraAllocSize(allocator), type_name);
3990 expected_committed_size +=
3991 kUseLazyCommit ? SystemPageSize() : PartitionPageSize();
3992 expected_max_committed_size =
3993 std::max(expected_max_committed_size, expected_committed_size);
3994 expected_max_allocated_size =
3995 std::max(expected_max_allocated_size, static_cast<size_t>(2048));
3996 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3997 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3998 EXPECT_EQ(expected_max_allocated_size,
3999 root.get_max_size_of_allocated_bytes());
4000 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
4001
4002 // Freeing memory doesn't result in decommitting pages right away.
4003 root.Free(ptr);
4004 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
4005 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
4006 EXPECT_EQ(expected_max_allocated_size,
4007 root.get_max_size_of_allocated_bytes());
4008 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
4009
4010 // Single-slot slot spans...
4011 //
4012 // When the system page size is larger than 4KiB, we don't necessarily have
4013 // enough space in the superpage to store two of the largest bucketed
4014 // allocations, particularly when we reserve extra space for e.g. bitmaps.
4015 // To avoid this, we use something just below kMaxBucketed.
4016 size_t big_size = kMaxBucketed * 4 / 5 - SystemPageSize();
4017
4018 ASSERT_GT(big_size, MaxRegularSlotSpanSize());
4019 ASSERT_LE(big_size, kMaxBucketed);
4020 bucket_index = SizeToIndex(big_size - ExtraAllocSize(allocator));
4021 bucket = &root.buckets[bucket_index];
4022 // Assert the allocation doesn't fill the entire span nor entire partition
4023 // page, to make the test more interesting.
4024 ASSERT_LT(big_size, bucket->get_bytes_per_span());
4025 ASSERT_NE(big_size % PartitionPageSize(), 0U);
4026 ptr = root.Alloc(big_size - ExtraAllocSize(allocator), type_name);
4027 expected_committed_size += bucket->get_bytes_per_span();
4028 expected_max_committed_size =
4029 std::max(expected_max_committed_size, expected_committed_size);
4030 expected_total_allocated_size += bucket->get_bytes_per_span();
4031 expected_max_allocated_size =
4032 std::max(expected_max_allocated_size, expected_total_allocated_size);
4033 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
4034 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
4035 EXPECT_EQ(expected_total_allocated_size,
4036 root.get_total_size_of_allocated_bytes());
4037 EXPECT_EQ(expected_max_allocated_size,
4038 root.get_max_size_of_allocated_bytes());
4039 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
4040
4041 // Allocating 2nd time doesn't overflow the super page...
4042 void* ptr2 = root.Alloc(big_size - ExtraAllocSize(allocator), type_name);
4043 expected_committed_size += bucket->get_bytes_per_span();
4044 expected_max_committed_size =
4045 std::max(expected_max_committed_size, expected_committed_size);
4046 expected_total_allocated_size += bucket->get_bytes_per_span();
4047 expected_max_allocated_size =
4048 std::max(expected_max_allocated_size, expected_total_allocated_size);
4049 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
4050 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
4051 EXPECT_EQ(expected_total_allocated_size,
4052 root.get_total_size_of_allocated_bytes());
4053 EXPECT_EQ(expected_max_allocated_size,
4054 root.get_max_size_of_allocated_bytes());
4055 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
4056
4057 // ... but 3rd time does.
4058 void* ptr3 = root.Alloc(big_size - ExtraAllocSize(allocator), type_name);
4059 expected_committed_size += bucket->get_bytes_per_span();
4060 expected_max_committed_size =
4061 std::max(expected_max_committed_size, expected_committed_size);
4062 expected_total_allocated_size += bucket->get_bytes_per_span();
4063 expected_max_allocated_size =
4064 std::max(expected_max_allocated_size, expected_total_allocated_size);
4065 expected_super_pages_size += kSuperPageSize;
4066 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
4067 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
4068 EXPECT_EQ(expected_total_allocated_size,
4069 root.get_total_size_of_allocated_bytes());
4070 EXPECT_EQ(expected_max_allocated_size,
4071 root.get_max_size_of_allocated_bytes());
4072 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
4073
4074 // Freeing memory doesn't result in decommitting pages right away.
4075 root.Free(ptr);
4076 root.Free(ptr2);
4077 root.Free(ptr3);
4078 expected_total_allocated_size -= 3 * bucket->get_bytes_per_span();
4079 expected_max_allocated_size =
4080 std::max(expected_max_allocated_size, expected_total_allocated_size);
4081 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
4082 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
4083 EXPECT_EQ(expected_total_allocated_size,
4084 root.get_total_size_of_allocated_bytes());
4085 EXPECT_EQ(expected_max_allocated_size,
4086 root.get_max_size_of_allocated_bytes());
4087 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
4088
4089 // Now everything should be decommitted. The reserved space for super pages
4090 // stays the same and will never go away (by design).
4091 root.PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
4092 expected_committed_size = 0;
4093 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
4094 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
4095 EXPECT_EQ(expected_total_allocated_size,
4096 root.get_total_size_of_allocated_bytes());
4097 EXPECT_EQ(expected_max_allocated_size,
4098 root.get_max_size_of_allocated_bytes());
4099 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
4100
4101 // None of the above should affect the direct map space.
4102 EXPECT_EQ(0U, root.total_size_of_direct_mapped_pages);
4103
4104 size_t huge_sizes[] = {
4105 kMaxBucketed + SystemPageSize(),
4106 kMaxBucketed + SystemPageSize() + 123,
4107 kSuperPageSize - PageAllocationGranularity(),
4108 kSuperPageSize - SystemPageSize() - PartitionPageSize(),
4109 kSuperPageSize - PartitionPageSize(),
4110 kSuperPageSize - SystemPageSize(),
4111 kSuperPageSize,
4112 kSuperPageSize + SystemPageSize(),
4113 kSuperPageSize + PartitionPageSize(),
4114 kSuperPageSize + SystemPageSize() + PartitionPageSize(),
4115 kSuperPageSize + PageAllocationGranularity(),
4116 kSuperPageSize + DirectMapAllocationGranularity(),
4117 };
4118 size_t alignments[] = {
4119 PartitionPageSize(),
4120 2 * PartitionPageSize(),
4121 kMaxSupportedAlignment / 2,
4122 kMaxSupportedAlignment,
4123 };
4124 for (size_t huge_size : huge_sizes) {
4125 ASSERT_GT(huge_size, kMaxBucketed);
4126 for (size_t alignment : alignments) {
4127 // For direct map, we commit only as many pages as needed.
4128 size_t aligned_size = partition_alloc::internal::base::bits::AlignUp(
4129 huge_size, SystemPageSize());
4130 ptr = root.AllocInternalForTesting(huge_size - ExtraAllocSize(allocator),
4131 alignment, type_name);
4132 expected_committed_size += aligned_size;
4133 expected_max_committed_size =
4134 std::max(expected_max_committed_size, expected_committed_size);
4135 expected_total_allocated_size += aligned_size;
4136 expected_max_allocated_size =
4137 std::max(expected_max_allocated_size, expected_total_allocated_size);
4138 // The total reserved map includes metadata and guard pages at the ends.
4139 // It also includes alignment. However, these would double count the first
4140 // partition page, so it needs to be subtracted.
4141 size_t surrounding_pages_size =
4142 PartitionRoot::GetDirectMapMetadataAndGuardPagesSize() + alignment -
4143 PartitionPageSize();
4144 size_t expected_direct_map_size =
4145 partition_alloc::internal::base::bits::AlignUp(
4146 aligned_size + surrounding_pages_size,
4147 DirectMapAllocationGranularity());
4148 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
4149 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
4150 EXPECT_EQ(expected_total_allocated_size,
4151 root.get_total_size_of_allocated_bytes());
4152 EXPECT_EQ(expected_max_allocated_size,
4153 root.get_max_size_of_allocated_bytes());
4154 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
4155 EXPECT_EQ(expected_direct_map_size,
4156 root.total_size_of_direct_mapped_pages);
4157
4158 // Freeing memory in the diret map decommits pages right away. The address
4159 // space is released for re-use too.
4160 root.Free(ptr);
4161 expected_committed_size -= aligned_size;
4162 expected_direct_map_size = 0;
4163 expected_max_committed_size =
4164 std::max(expected_max_committed_size, expected_committed_size);
4165 expected_total_allocated_size -= aligned_size;
4166 expected_max_allocated_size =
4167 std::max(expected_max_allocated_size, expected_total_allocated_size);
4168 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
4169 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
4170 EXPECT_EQ(expected_total_allocated_size,
4171 root.get_total_size_of_allocated_bytes());
4172 EXPECT_EQ(expected_max_allocated_size,
4173 root.get_max_size_of_allocated_bytes());
4174 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
4175 EXPECT_EQ(expected_direct_map_size,
4176 root.total_size_of_direct_mapped_pages);
4177 }
4178 }
4179 }
4180
4181 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
4182
TEST_P(PartitionAllocTest,RefCountBasic)4183 TEST_P(PartitionAllocTest, RefCountBasic) {
4184 if (!UseBRPPool()) {
4185 return;
4186 }
4187
4188 constexpr uint64_t kCookie = 0x1234567890ABCDEF;
4189 constexpr uint64_t kQuarantined = 0xEFEFEFEFEFEFEFEF;
4190
4191 size_t alloc_size = 64 - ExtraAllocSize(allocator);
4192 uint64_t* ptr1 =
4193 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
4194 EXPECT_TRUE(ptr1);
4195
4196 *ptr1 = kCookie;
4197
4198 auto* ref_count =
4199 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr1));
4200 EXPECT_TRUE(ref_count->IsAliveWithNoKnownRefs());
4201
4202 ref_count->Acquire();
4203 EXPECT_FALSE(ref_count->Release());
4204 EXPECT_TRUE(ref_count->IsAliveWithNoKnownRefs());
4205 EXPECT_EQ(*ptr1, kCookie);
4206
4207 ref_count->AcquireFromUnprotectedPtr();
4208 EXPECT_FALSE(ref_count->IsAliveWithNoKnownRefs());
4209
4210 allocator.root()->Free(ptr1);
4211 // The allocation shouldn't be reclaimed, and its contents should be zapped.
4212 // Retag ptr1 to get its correct MTE tag.
4213 ptr1 = TagPtr(ptr1);
4214 EXPECT_NE(*ptr1, kCookie);
4215 EXPECT_EQ(*ptr1, kQuarantined);
4216
4217 // The allocator should not reuse the original slot since its reference count
4218 // doesn't equal zero.
4219 uint64_t* ptr2 =
4220 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
4221 EXPECT_NE(ptr1, ptr2);
4222 allocator.root()->Free(ptr2);
4223
4224 // When the last reference is released, the slot should become reusable.
4225 // Retag ref_count because PartitionAlloc retags ptr to enforce quarantine.
4226 ref_count = TagPtr(ref_count);
4227 EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
4228 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr1));
4229 uint64_t* ptr3 =
4230 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
4231 EXPECT_EQ(ptr1, ptr3);
4232 allocator.root()->Free(ptr3);
4233 }
4234
RunRefCountReallocSubtest(size_t orig_size,size_t new_size)4235 void PartitionAllocTest::RunRefCountReallocSubtest(size_t orig_size,
4236 size_t new_size) {
4237 void* ptr1 = allocator.root()->Alloc(orig_size, type_name);
4238 EXPECT_TRUE(ptr1);
4239
4240 auto* ref_count1 =
4241 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr1));
4242 EXPECT_TRUE(ref_count1->IsAliveWithNoKnownRefs());
4243
4244 ref_count1->AcquireFromUnprotectedPtr();
4245 EXPECT_FALSE(ref_count1->IsAliveWithNoKnownRefs());
4246
4247 void* ptr2 = allocator.root()->Realloc(ptr1, new_size, type_name);
4248 EXPECT_TRUE(ptr2);
4249
4250 // PartitionAlloc may retag memory areas on realloc (even if they
4251 // do not move), so recover the true tag here.
4252 ref_count1 = TagPtr(ref_count1);
4253
4254 // Re-query ref-count. It may have moved if Realloc changed the slot.
4255 auto* ref_count2 =
4256 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr2));
4257
4258 if (UntagPtr(ptr1) == UntagPtr(ptr2)) {
4259 // If the slot didn't change, ref-count should stay the same.
4260 EXPECT_EQ(ref_count1, ref_count2);
4261 EXPECT_FALSE(ref_count2->IsAliveWithNoKnownRefs());
4262
4263 EXPECT_FALSE(ref_count2->ReleaseFromUnprotectedPtr());
4264 } else {
4265 // If the allocation was moved to another slot, the old ref-count stayed
4266 // in the same location in memory, is no longer alive, but still has a
4267 // reference. The new ref-count is alive, but has no references.
4268 EXPECT_NE(ref_count1, ref_count2);
4269 EXPECT_FALSE(ref_count1->IsAlive());
4270 EXPECT_FALSE(ref_count1->IsAliveWithNoKnownRefs());
4271 EXPECT_TRUE(ref_count2->IsAliveWithNoKnownRefs());
4272
4273 EXPECT_TRUE(ref_count1->ReleaseFromUnprotectedPtr());
4274 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr1));
4275 }
4276
4277 allocator.root()->Free(ptr2);
4278 }
4279
TEST_P(PartitionAllocTest,RefCountRealloc)4280 TEST_P(PartitionAllocTest, RefCountRealloc) {
4281 if (!UseBRPPool()) {
4282 return;
4283 }
4284
4285 size_t alloc_sizes[] = {500, 5000, 50000, 400000};
4286
4287 for (size_t alloc_size : alloc_sizes) {
4288 alloc_size -= ExtraAllocSize(allocator);
4289 RunRefCountReallocSubtest(alloc_size, alloc_size - 9);
4290 RunRefCountReallocSubtest(alloc_size, alloc_size + 9);
4291 RunRefCountReallocSubtest(alloc_size, alloc_size * 2);
4292 RunRefCountReallocSubtest(alloc_size, alloc_size / 2);
4293 }
4294 }
4295
4296 int g_unretained_dangling_raw_ptr_detected_count = 0;
4297
4298 class UnretainedDanglingRawPtrTest : public PartitionAllocTest {
4299 public:
SetUp()4300 void SetUp() override {
4301 PartitionAllocTest::SetUp();
4302 g_unretained_dangling_raw_ptr_detected_count = 0;
4303 old_detected_fn_ = partition_alloc::GetUnretainedDanglingRawPtrDetectedFn();
4304
4305 partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
4306 &UnretainedDanglingRawPtrTest::DanglingRawPtrDetected);
4307 old_unretained_dangling_ptr_enabled_ =
4308 partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(true);
4309 }
TearDown()4310 void TearDown() override {
4311 partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(old_detected_fn_);
4312 partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(
4313 old_unretained_dangling_ptr_enabled_);
4314 PartitionAllocTest::TearDown();
4315 }
4316
4317 private:
DanglingRawPtrDetected(uintptr_t)4318 static void DanglingRawPtrDetected(uintptr_t) {
4319 g_unretained_dangling_raw_ptr_detected_count++;
4320 }
4321
4322 partition_alloc::DanglingRawPtrDetectedFn* old_detected_fn_;
4323 bool old_unretained_dangling_ptr_enabled_;
4324 };
4325
4326 INSTANTIATE_TEST_SUITE_P(AlternateTestParams,
4327 UnretainedDanglingRawPtrTest,
4328 testing::ValuesIn(GetPartitionAllocTestParams()));
4329
TEST_P(UnretainedDanglingRawPtrTest,UnretainedDanglingPtrNoReport)4330 TEST_P(UnretainedDanglingRawPtrTest, UnretainedDanglingPtrNoReport) {
4331 if (!UseBRPPool()) {
4332 return;
4333 }
4334
4335 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
4336 EXPECT_TRUE(ptr);
4337 auto* ref_count =
4338 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4339 ref_count->Acquire();
4340 EXPECT_TRUE(ref_count->IsAlive());
4341 // Allocation is still live, so calling ReportIfDangling() should not result
4342 // in any detections.
4343 ref_count->ReportIfDangling();
4344 EXPECT_EQ(g_unretained_dangling_raw_ptr_detected_count, 0);
4345 EXPECT_FALSE(ref_count->Release());
4346 allocator.root()->Free(ptr);
4347 }
4348
TEST_P(UnretainedDanglingRawPtrTest,UnretainedDanglingPtrShouldReport)4349 TEST_P(UnretainedDanglingRawPtrTest, UnretainedDanglingPtrShouldReport) {
4350 if (!UseBRPPool()) {
4351 return;
4352 }
4353
4354 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
4355 EXPECT_TRUE(ptr);
4356 auto* ref_count =
4357 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4358 ref_count->AcquireFromUnprotectedPtr();
4359 EXPECT_TRUE(ref_count->IsAlive());
4360 allocator.root()->Free(ptr);
4361 // At this point, memory shouldn't be alive...
4362 EXPECT_FALSE(ref_count->IsAlive());
4363 // ...and we should report the ptr as dangling.
4364 ref_count->ReportIfDangling();
4365 EXPECT_EQ(g_unretained_dangling_raw_ptr_detected_count, 1);
4366 EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
4367
4368 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4369 }
4370
4371 #if !BUILDFLAG(HAS_64_BIT_POINTERS)
TEST_P(PartitionAllocTest,BackupRefPtrGuardRegion)4372 TEST_P(PartitionAllocTest, BackupRefPtrGuardRegion) {
4373 if (!UseBRPPool()) {
4374 return;
4375 }
4376
4377 size_t alignment = internal::PageAllocationGranularity();
4378
4379 uintptr_t requested_address;
4380 memset(&requested_address, internal::kQuarantinedByte,
4381 sizeof(requested_address));
4382 requested_address = RoundDownToPageAllocationGranularity(requested_address);
4383
4384 uintptr_t allocated_address =
4385 AllocPages(requested_address, alignment, alignment,
4386 PageAccessibilityConfiguration(
4387 PageAccessibilityConfiguration::kReadWrite),
4388 PageTag::kPartitionAlloc);
4389 EXPECT_NE(allocated_address, requested_address);
4390
4391 if (allocated_address) {
4392 FreePages(allocated_address, alignment);
4393 }
4394 }
4395 #endif // !BUILDFLAG(HAS_64_BIT_POINTERS)
4396 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
4397
4398 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
4399
4400 // Allocate memory, and reference it from 3 raw_ptr. Among them 2 will be
4401 // dangling.
TEST_P(PartitionAllocTest,DanglingPtr)4402 TEST_P(PartitionAllocTest, DanglingPtr) {
4403 if (!UseBRPPool()) {
4404 return;
4405 }
4406
4407 CountDanglingRawPtr dangling_checks;
4408
4409 // Allocate memory, and reference it from 3 raw_ptr.
4410 uint64_t* ptr = static_cast<uint64_t*>(
4411 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name));
4412 auto* ref_count =
4413 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4414 ref_count->Acquire();
4415 ref_count->Acquire();
4416 ref_count->Acquire();
4417 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4418 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4419
4420 // The first raw_ptr stops referencing it, before the memory has been
4421 // released.
4422 EXPECT_FALSE(ref_count->Release());
4423 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4424 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4425
4426 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
4427 // Free it. This creates two dangling pointer.
4428 allocator.root()->Free(ptr);
4429 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4430 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4431
4432 // The dangling raw_ptr stop referencing it.
4433 EXPECT_FALSE(ref_count->Release());
4434 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4435 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4436
4437 // The dangling raw_ptr stop referencing it again.
4438 EXPECT_TRUE(ref_count->Release());
4439 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4440 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4441 #else
4442 // Free it. This creates two dangling pointer.
4443 allocator.root()->Free(ptr);
4444 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4445 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4446
4447 // The dangling raw_ptr stop referencing it.
4448 EXPECT_FALSE(ref_count->Release());
4449 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4450 EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
4451
4452 // The dangling raw_ptr stop referencing it again.
4453 EXPECT_TRUE(ref_count->Release());
4454 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4455 EXPECT_EQ(g_dangling_raw_ptr_released_count, 2);
4456 #endif
4457
4458 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4459 }
4460
4461 // Allocate memory, and reference it from 3
4462 // raw_ptr<T, DisableDanglingPtrDetection>. Among them 2 will be dangling. This
4463 // doesn't trigger any dangling raw_ptr checks.
TEST_P(PartitionAllocTest,DanglingDanglingPtr)4464 TEST_P(PartitionAllocTest, DanglingDanglingPtr) {
4465 if (!UseBRPPool()) {
4466 return;
4467 }
4468
4469 CountDanglingRawPtr dangling_checks;
4470
4471 // Allocate memory, and reference it from 3 raw_ptr.
4472 uint64_t* ptr = static_cast<uint64_t*>(
4473 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name));
4474 auto* ref_count =
4475 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4476 ref_count->AcquireFromUnprotectedPtr();
4477 ref_count->AcquireFromUnprotectedPtr();
4478 ref_count->AcquireFromUnprotectedPtr();
4479 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4480 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4481
4482 // The first raw_ptr<T, DisableDanglingPtrDetection> stops referencing it,
4483 // before the memory has been released.
4484 EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
4485 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4486 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4487
4488 // Free it. This creates two dangling raw_ptr<T, DisableDanglingPtrDetection>.
4489 allocator.root()->Free(ptr);
4490 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4491 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4492
4493 // The dangling raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
4494 EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
4495 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4496 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4497
4498 // The dangling raw_ptr<T, DisableDanglingPtrDetection> stop referencing it
4499 // again.
4500 EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
4501 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4502 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4503
4504 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4505 }
4506
4507 // When 'free' is called, it remain one raw_ptr<> and one
4508 // raw_ptr<T, DisableDanglingPtrDetection>. The raw_ptr<> is released first.
TEST_P(PartitionAllocTest,DanglingMixedReleaseRawPtrFirst)4509 TEST_P(PartitionAllocTest, DanglingMixedReleaseRawPtrFirst) {
4510 if (!UseBRPPool()) {
4511 return;
4512 }
4513
4514 CountDanglingRawPtr dangling_checks;
4515
4516 uint64_t* ptr = static_cast<uint64_t*>(
4517 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name));
4518 auto* ref_count =
4519 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4520 // Acquire a raw_ptr<T, DisableDanglingPtrDetection> and a raw_ptr<>.
4521 ref_count->AcquireFromUnprotectedPtr();
4522 ref_count->Acquire();
4523 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4524 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4525
4526 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
4527 // Free it.
4528 allocator.root()->Free(ptr);
4529 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4530 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4531
4532 // The raw_ptr<> stops referencing it.
4533 EXPECT_FALSE(ref_count->Release());
4534 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4535 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4536
4537 // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
4538 EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
4539 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4540 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4541 #else
4542 // Free it.
4543 allocator.root()->Free(ptr);
4544 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4545 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4546
4547 // The raw_ptr<> stops referencing it.
4548 EXPECT_FALSE(ref_count->Release());
4549 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4550 EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
4551
4552 // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
4553 EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
4554 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4555 EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
4556 #endif
4557
4558 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4559 }
4560
4561 // When 'free' is called, it remain one raw_ptr<> and one
4562 // raw_ptr<T, DisableDanglingPtrDetection>.
4563 // The raw_ptr<T, DisableDanglingPtrDetection> is released first. This
4564 // triggers the dangling raw_ptr<> checks.
TEST_P(PartitionAllocTest,DanglingMixedReleaseDanglingPtrFirst)4565 TEST_P(PartitionAllocTest, DanglingMixedReleaseDanglingPtrFirst) {
4566 if (!UseBRPPool()) {
4567 return;
4568 }
4569
4570 CountDanglingRawPtr dangling_checks;
4571
4572 void* ptr =
4573 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4574 auto* ref_count =
4575 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4576 // Acquire a raw_ptr<T, DisableDanglingPtrDetection> and a raw_ptr<>.
4577 ref_count->AcquireFromUnprotectedPtr();
4578 ref_count->Acquire();
4579 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4580 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4581
4582 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
4583 // Free it.
4584 allocator.root()->Free(ptr);
4585 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4586 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4587
4588 // The raw_ptr<> stops referencing it.
4589 EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
4590 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4591 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4592
4593 // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
4594 EXPECT_TRUE(ref_count->Release());
4595 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4596 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4597 #else
4598 // Free it.
4599 allocator.root()->Free(ptr);
4600 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4601 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4602
4603 // The raw_ptr<> stops referencing it.
4604 EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
4605 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4606 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4607
4608 // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
4609 EXPECT_TRUE(ref_count->Release());
4610 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4611 EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
4612 #endif
4613
4614 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4615 }
4616
4617 // When 'free' is called, it remains one
4618 // raw_ptr<T, DisableDanglingPtrDetection>, then it is used to acquire one
4619 // dangling raw_ptr<>. Release the raw_ptr<> first.
TEST_P(PartitionAllocTest,DanglingPtrUsedToAcquireNewRawPtr)4620 TEST_P(PartitionAllocTest, DanglingPtrUsedToAcquireNewRawPtr) {
4621 if (!UseBRPPool()) {
4622 return;
4623 }
4624
4625 CountDanglingRawPtr dangling_checks;
4626
4627 void* ptr =
4628 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4629 auto* ref_count =
4630 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4631 // Acquire a raw_ptr<T, DisableDanglingPtrDetection>.
4632 ref_count->AcquireFromUnprotectedPtr();
4633 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4634 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4635
4636 // Free it once.
4637 allocator.root()->Free(ptr);
4638 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4639 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4640
4641 // A raw_ptr<> starts referencing it.
4642 ref_count->Acquire();
4643 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4644 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4645
4646 // The raw_ptr<> stops referencing it.
4647 EXPECT_FALSE(ref_count->Release());
4648 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4649 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4650
4651 // The raw_ptr<T, DisableDanglingPtrDetection> stops referencing it.
4652 EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
4653 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4654 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4655
4656 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4657 }
4658
4659 // Same as 'DanglingPtrUsedToAcquireNewRawPtr', but release the
4660 // raw_ptr<T, DisableDanglingPtrDetection> before the raw_ptr<>.
TEST_P(PartitionAllocTest,DanglingPtrUsedToAcquireNewRawPtrVariant)4661 TEST_P(PartitionAllocTest, DanglingPtrUsedToAcquireNewRawPtrVariant) {
4662 if (!UseBRPPool()) {
4663 return;
4664 }
4665
4666 CountDanglingRawPtr dangling_checks;
4667
4668 void* ptr =
4669 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4670 auto* ref_count =
4671 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4672 // Acquire a raw_ptr<T, DisableDanglingPtrDetection>.
4673 ref_count->AcquireFromUnprotectedPtr();
4674 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4675 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4676
4677 // Free it.
4678 allocator.root()->Free(ptr);
4679 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4680 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4681
4682 // A raw_ptr<> starts referencing it.
4683 ref_count->Acquire();
4684 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4685 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4686
4687 // The raw_ptr<> stops referencing it.
4688 EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
4689 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4690 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4691
4692 // The raw_ptr<T, DisableDanglingPtrDetection> stops referencing it.
4693 EXPECT_TRUE(ref_count->Release());
4694 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4695 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4696
4697 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4698 }
4699
4700 // Acquire a raw_ptr<T>, and release it before freeing memory. In the
4701 // background, there is one raw_ptr<T, DisableDanglingPtrDetection>. This
4702 // doesn't trigger any dangling raw_ptr<T> checks.
TEST_P(PartitionAllocTest,RawPtrReleasedBeforeFree)4703 TEST_P(PartitionAllocTest, RawPtrReleasedBeforeFree) {
4704 if (!UseBRPPool()) {
4705 return;
4706 }
4707
4708 CountDanglingRawPtr dangling_checks;
4709
4710 void* ptr =
4711 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4712 auto* ref_count =
4713 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4714 // Acquire a raw_ptr<T, DisableDanglingPtrDetection> and a raw_ptr<>.
4715 ref_count->Acquire();
4716 ref_count->AcquireFromUnprotectedPtr();
4717 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4718 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4719
4720 // Release the raw_ptr<>.
4721 EXPECT_FALSE(ref_count->Release());
4722 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4723 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4724
4725 // Free it.
4726 allocator.root()->Free(ptr);
4727 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4728 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4729
4730 // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
4731 EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
4732 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4733 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4734
4735 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4736 }
4737
4738 #if defined(PA_HAS_DEATH_TESTS)
4739 // DCHECK message are stripped in official build. It causes death tests with
4740 // matchers to fail.
4741 #if !defined(OFFICIAL_BUILD) || !defined(NDEBUG)
4742
4743 // Acquire() once, Release() twice => CRASH
TEST_P(PartitionAllocDeathTest,ReleaseUnderflowRawPtr)4744 TEST_P(PartitionAllocDeathTest, ReleaseUnderflowRawPtr) {
4745 if (!UseBRPPool()) {
4746 return;
4747 }
4748
4749 void* ptr =
4750 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4751 auto* ref_count =
4752 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4753 ref_count->Acquire();
4754 EXPECT_FALSE(ref_count->Release());
4755 EXPECT_DCHECK_DEATH(ref_count->Release());
4756 allocator.root()->Free(ptr);
4757 }
4758
4759 // AcquireFromUnprotectedPtr() once, ReleaseFromUnprotectedPtr() twice => CRASH
TEST_P(PartitionAllocDeathTest,ReleaseUnderflowDanglingPtr)4760 TEST_P(PartitionAllocDeathTest, ReleaseUnderflowDanglingPtr) {
4761 if (!UseBRPPool()) {
4762 return;
4763 }
4764
4765 void* ptr =
4766 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4767 auto* ref_count =
4768 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4769 ref_count->AcquireFromUnprotectedPtr();
4770 EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
4771 EXPECT_DCHECK_DEATH(ref_count->ReleaseFromUnprotectedPtr());
4772 allocator.root()->Free(ptr);
4773 }
4774
4775 #endif //! defined(OFFICIAL_BUILD) || !defined(NDEBUG)
4776 #endif // defined(PA_HAS_DEATH_TESTS)
4777 #endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
4778
TEST_P(PartitionAllocTest,ReservationOffset)4779 TEST_P(PartitionAllocTest, ReservationOffset) {
4780 // For normal buckets, offset should be kOffsetTagNormalBuckets.
4781 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
4782 EXPECT_TRUE(ptr);
4783 uintptr_t address = UntagPtr(ptr);
4784 EXPECT_EQ(kOffsetTagNormalBuckets, *ReservationOffsetPointer(address));
4785 allocator.root()->Free(ptr);
4786
4787 // For direct-map,
4788 size_t large_size = kSuperPageSize * 5 + PartitionPageSize() * .5f;
4789 ASSERT_GT(large_size, kMaxBucketed);
4790 ptr = allocator.root()->Alloc(large_size, type_name);
4791 EXPECT_TRUE(ptr);
4792 address = UntagPtr(ptr);
4793 EXPECT_EQ(0U, *ReservationOffsetPointer(address));
4794 EXPECT_EQ(1U, *ReservationOffsetPointer(address + kSuperPageSize));
4795 EXPECT_EQ(2U, *ReservationOffsetPointer(address + kSuperPageSize * 2));
4796 EXPECT_EQ(3U, *ReservationOffsetPointer(address + kSuperPageSize * 3));
4797 EXPECT_EQ(4U, *ReservationOffsetPointer(address + kSuperPageSize * 4));
4798 EXPECT_EQ(5U, *ReservationOffsetPointer(address + kSuperPageSize * 5));
4799
4800 // In-place realloc doesn't affect the offsets.
4801 void* new_ptr = allocator.root()->Realloc(ptr, large_size * .8, type_name);
4802 EXPECT_EQ(new_ptr, ptr);
4803 EXPECT_EQ(0U, *ReservationOffsetPointer(address));
4804 EXPECT_EQ(1U, *ReservationOffsetPointer(address + kSuperPageSize));
4805 EXPECT_EQ(2U, *ReservationOffsetPointer(address + kSuperPageSize * 2));
4806 EXPECT_EQ(3U, *ReservationOffsetPointer(address + kSuperPageSize * 3));
4807 EXPECT_EQ(4U, *ReservationOffsetPointer(address + kSuperPageSize * 4));
4808 EXPECT_EQ(5U, *ReservationOffsetPointer(address + kSuperPageSize * 5));
4809
4810 allocator.root()->Free(ptr);
4811 // After free, the offsets must be kOffsetTagNotAllocated.
4812 EXPECT_EQ(kOffsetTagNotAllocated, *ReservationOffsetPointer(address));
4813 EXPECT_EQ(kOffsetTagNotAllocated,
4814 *ReservationOffsetPointer(address + kSuperPageSize));
4815 EXPECT_EQ(kOffsetTagNotAllocated,
4816 *ReservationOffsetPointer(address + kSuperPageSize * 2));
4817 EXPECT_EQ(kOffsetTagNotAllocated,
4818 *ReservationOffsetPointer(address + kSuperPageSize * 3));
4819 EXPECT_EQ(kOffsetTagNotAllocated,
4820 *ReservationOffsetPointer(address + kSuperPageSize * 4));
4821 EXPECT_EQ(kOffsetTagNotAllocated,
4822 *ReservationOffsetPointer(address + kSuperPageSize * 5));
4823 }
4824
TEST_P(PartitionAllocTest,GetReservationStart)4825 TEST_P(PartitionAllocTest, GetReservationStart) {
4826 size_t large_size = kSuperPageSize * 3 + PartitionPageSize() * .5f;
4827 ASSERT_GT(large_size, kMaxBucketed);
4828 void* ptr = allocator.root()->Alloc(large_size, type_name);
4829 EXPECT_TRUE(ptr);
4830 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
4831 uintptr_t reservation_start = slot_start - PartitionPageSize();
4832 EXPECT_EQ(0U, reservation_start & DirectMapAllocationGranularityOffsetMask());
4833
4834 uintptr_t address = UntagPtr(ptr);
4835 for (uintptr_t a = address; a < address + large_size; ++a) {
4836 uintptr_t address2 = GetDirectMapReservationStart(a) + PartitionPageSize();
4837 EXPECT_EQ(slot_start, address2);
4838 }
4839
4840 EXPECT_EQ(reservation_start, GetDirectMapReservationStart(slot_start));
4841
4842 allocator.root()->Free(ptr);
4843 }
4844
TEST_P(PartitionAllocTest,CheckReservationType)4845 TEST_P(PartitionAllocTest, CheckReservationType) {
4846 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
4847 EXPECT_TRUE(ptr);
4848 uintptr_t address = UntagPtr(ptr);
4849 uintptr_t address_to_check = address;
4850 EXPECT_FALSE(IsReservationStart(address_to_check));
4851 EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
4852 EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
4853 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4854 address_to_check = address + kTestAllocSize - 1;
4855 EXPECT_FALSE(IsReservationStart(address_to_check));
4856 EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
4857 EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
4858 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4859 address_to_check =
4860 partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
4861 EXPECT_TRUE(IsReservationStart(address_to_check));
4862 EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
4863 EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
4864 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4865 allocator.root()->Free(ptr);
4866 // Freeing keeps a normal-bucket super page in memory.
4867 address_to_check =
4868 partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
4869 EXPECT_TRUE(IsReservationStart(address_to_check));
4870 EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
4871 EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
4872 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4873
4874 size_t large_size = 2 * kSuperPageSize;
4875 ASSERT_GT(large_size, kMaxBucketed);
4876 ptr = allocator.root()->Alloc(large_size, type_name);
4877 EXPECT_TRUE(ptr);
4878 address = UntagPtr(ptr);
4879 address_to_check = address;
4880 EXPECT_FALSE(IsReservationStart(address_to_check));
4881 EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
4882 EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
4883 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4884 address_to_check =
4885 partition_alloc::internal::base::bits::AlignUp(address, kSuperPageSize);
4886 EXPECT_FALSE(IsReservationStart(address_to_check));
4887 EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
4888 EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
4889 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4890 address_to_check = address + large_size - 1;
4891 EXPECT_FALSE(IsReservationStart(address_to_check));
4892 EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
4893 EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
4894 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4895 address_to_check =
4896 partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
4897 EXPECT_TRUE(IsReservationStart(address_to_check));
4898 EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
4899 EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
4900 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4901 allocator.root()->Free(ptr);
4902 // Freeing releases direct-map super pages.
4903 address_to_check =
4904 partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
4905
4906 // DCHECKs don't work with EXPECT_DEATH on official builds.
4907 #if BUILDFLAG(PA_DCHECK_IS_ON) && (!defined(OFFICIAL_BUILD) || !defined(NDEBUG))
4908 // Expect to DCHECK on unallocated region.
4909 EXPECT_DEATH_IF_SUPPORTED(IsReservationStart(address_to_check), "");
4910 #endif // BUILDFLAG(PA_DCHECK_IS_ON) && (!defined(OFFICIAL_BUILD) ||
4911 // !defined(NDEBUG))
4912
4913 EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
4914 EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
4915 EXPECT_FALSE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4916 }
4917
4918 // Test for crash http://crbug.com/1169003.
TEST_P(PartitionAllocTest,CrossPartitionRootRealloc)4919 TEST_P(PartitionAllocTest, CrossPartitionRootRealloc) {
4920 // Size is large enough to satisfy it from a single-slot slot span
4921 size_t test_size = MaxRegularSlotSpanSize() - ExtraAllocSize(allocator);
4922 void* ptr = allocator.root()->Alloc<AllocFlags::kReturnNull>(test_size);
4923 EXPECT_TRUE(ptr);
4924
4925 // Create new root and call PurgeMemory to simulate ConfigurePartitions().
4926 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
4927 PurgeFlags::kDiscardUnusedSystemPages);
4928 std::unique_ptr<PartitionRoot> new_root = CreateCustomTestRoot(
4929 GetCommonPartitionOptions(),
4930 PartitionTestOptions{.set_bucket_distribution = true});
4931
4932 // Realloc from |allocator.root()| into |new_root|.
4933 void* ptr2 = new_root->Realloc<AllocFlags::kReturnNull>(ptr, test_size + 1024,
4934 nullptr);
4935 EXPECT_TRUE(ptr2);
4936 PA_EXPECT_PTR_NE(ptr, ptr2);
4937 }
4938
TEST_P(PartitionAllocTest,FastPathOrReturnNull)4939 TEST_P(PartitionAllocTest, FastPathOrReturnNull) {
4940 size_t allocation_size = 64;
4941 // The very first allocation is never a fast path one, since it needs a new
4942 // super page and a new partition page.
4943 EXPECT_FALSE(allocator.root()->Alloc<AllocFlags::kFastPathOrReturnNull>(
4944 allocation_size, ""));
4945 void* ptr = allocator.root()->Alloc(allocation_size);
4946 ASSERT_TRUE(ptr);
4947
4948 // Next one is, since the partition page has been activated.
4949 void* ptr2 = allocator.root()->Alloc<AllocFlags::kFastPathOrReturnNull>(
4950 allocation_size, "");
4951 EXPECT_TRUE(ptr2);
4952
4953 // First allocation of a different bucket is slow.
4954 EXPECT_FALSE(allocator.root()->Alloc<AllocFlags::kFastPathOrReturnNull>(
4955 2 * allocation_size, ""));
4956
4957 size_t allocated_size = 2 * allocation_size;
4958 std::vector<void*> ptrs;
4959 while (void* new_ptr =
4960 allocator.root()->Alloc<AllocFlags::kFastPathOrReturnNull>(
4961 allocation_size, "")) {
4962 ptrs.push_back(new_ptr);
4963 allocated_size += allocation_size;
4964 }
4965 EXPECT_LE(allocated_size,
4966 PartitionPageSize() * kMaxPartitionPagesPerRegularSlotSpan);
4967
4968 for (void* ptr_to_free : ptrs) {
4969 allocator.root()->Free<FreeFlags::kNoHooks>(ptr_to_free);
4970 }
4971
4972 allocator.root()->Free<FreeFlags::kNoHooks>(ptr);
4973 allocator.root()->Free<FreeFlags::kNoHooks>(ptr2);
4974 }
4975
4976 #if defined(PA_HAS_DEATH_TESTS)
4977 // DCHECK message are stripped in official build. It causes death tests with
4978 // matchers to fail.
4979 #if !defined(OFFICIAL_BUILD) || !defined(NDEBUG)
4980
TEST_P(PartitionAllocDeathTest,CheckTriggered)4981 TEST_P(PartitionAllocDeathTest, CheckTriggered) {
4982 EXPECT_DCHECK_DEATH_WITH(PA_CHECK(5 == 7), "Check failed.*5 == 7");
4983 EXPECT_DEATH(PA_CHECK(5 == 7), "Check failed.*5 == 7");
4984 }
4985
4986 #endif // !defined(OFFICIAL_BUILD) && !defined(NDEBUG)
4987 #endif // defined(PA_HAS_DEATH_TESTS)
4988
4989 // Not on chromecast, since gtest considers extra output from itself as a test
4990 // failure:
4991 // https://ci.chromium.org/ui/p/chromium/builders/ci/Cast%20Audio%20Linux/98492/overview
4992 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && defined(PA_HAS_DEATH_TESTS) && \
4993 !BUILDFLAG(PA_IS_CASTOS)
4994
4995 namespace {
4996
FreeForTest(void * data)4997 PA_NOINLINE void FreeForTest(void* data) {
4998 free(data);
4999 }
5000
5001 class ThreadDelegateForPreforkHandler
5002 : public base::PlatformThreadForTesting::Delegate {
5003 public:
ThreadDelegateForPreforkHandler(std::atomic<bool> & please_stop,std::atomic<int> & started_threads,const int alloc_size)5004 ThreadDelegateForPreforkHandler(std::atomic<bool>& please_stop,
5005 std::atomic<int>& started_threads,
5006 const int alloc_size)
5007 : please_stop_(please_stop),
5008 started_threads_(started_threads),
5009 alloc_size_(alloc_size) {}
5010
ThreadMain()5011 void ThreadMain() override {
5012 started_threads_++;
5013 while (!please_stop_.load(std::memory_order_relaxed)) {
5014 void* ptr = malloc(alloc_size_);
5015
5016 // A simple malloc() / free() pair can be discarded by the compiler (and
5017 // is), making the test fail. It is sufficient to make |FreeForTest()| a
5018 // PA_NOINLINE function for the call to not be eliminated, but it is
5019 // required.
5020 FreeForTest(ptr);
5021 }
5022 }
5023
5024 private:
5025 std::atomic<bool>& please_stop_;
5026 std::atomic<int>& started_threads_;
5027 const int alloc_size_;
5028 };
5029
5030 } // namespace
5031
5032 // Disabled because executing it causes Gtest to show a warning in the output,
5033 // which confuses the runner on some platforms, making the test report an
5034 // "UNKNOWN" status even though it succeeded.
TEST_P(PartitionAllocTest,DISABLED_PreforkHandler)5035 TEST_P(PartitionAllocTest, DISABLED_PreforkHandler) {
5036 std::atomic<bool> please_stop;
5037 std::atomic<int> started_threads{0};
5038
5039 // Continuously allocates / frees memory, bypassing the thread cache. This
5040 // makes it likely that this thread will own the lock, and that the
5041 // EXPECT_EXIT() part will deadlock.
5042 constexpr size_t kAllocSize = ThreadCache::kLargeSizeThreshold + 1;
5043 ThreadDelegateForPreforkHandler delegate(please_stop, started_threads,
5044 kAllocSize);
5045
5046 constexpr int kThreads = 4;
5047 base::PlatformThreadHandle thread_handles[kThreads];
5048 for (auto& thread_handle : thread_handles) {
5049 base::PlatformThreadForTesting::Create(0, &delegate, &thread_handle);
5050 }
5051 // Make sure all threads are actually already running.
5052 while (started_threads != kThreads) {
5053 }
5054
5055 EXPECT_EXIT(
5056 {
5057 void* ptr = malloc(kAllocSize);
5058 FreeForTest(ptr);
5059 exit(1);
5060 },
5061 ::testing::ExitedWithCode(1), "");
5062
5063 please_stop.store(true);
5064 for (auto& thread_handle : thread_handles) {
5065 base::PlatformThreadForTesting::Join(thread_handle);
5066 }
5067 }
5068
5069 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
5070 // PA_CONFIG(HAS_DEATH_TESTS) && !BUILDFLAG(PA_IS_CASTOS)
5071
5072 // Checks the bucket index logic.
TEST_P(PartitionAllocTest,GetIndex)5073 TEST_P(PartitionAllocTest, GetIndex) {
5074 BucketIndexLookup lookup{};
5075
5076 for (size_t size = 0; size < kMaxBucketed; size++) {
5077 size_t index = BucketIndexLookup::GetIndex(size);
5078 ASSERT_GE(lookup.bucket_sizes()[index], size);
5079 }
5080
5081 // Make sure that power-of-two have exactly matching buckets.
5082 for (size_t size = (1 << (kMinBucketedOrder - 1)); size < kMaxBucketed;
5083 size <<= 1) {
5084 size_t index = BucketIndexLookup::GetIndex(size);
5085 ASSERT_EQ(lookup.bucket_sizes()[index], size);
5086 }
5087 }
5088
5089 // Used to check alignment. If the compiler understands the annotations, the
5090 // zeroing in the constructor uses aligned SIMD instructions.
TEST_P(PartitionAllocTest,MallocFunctionAnnotations)5091 TEST_P(PartitionAllocTest, MallocFunctionAnnotations) {
5092 struct TestStruct {
5093 uint64_t a = 0;
5094 uint64_t b = 0;
5095 };
5096
5097 void* buffer = Alloc(sizeof(TestStruct));
5098 // Should use "mov*a*ps" on x86_64.
5099 auto* x = new (buffer) TestStruct();
5100
5101 EXPECT_EQ(x->a, 0u);
5102 Free(buffer);
5103 }
5104
5105 // Test that the ConfigurablePool works properly.
TEST_P(PartitionAllocTest,ConfigurablePool)5106 TEST_P(PartitionAllocTest, ConfigurablePool) {
5107 EXPECT_FALSE(IsConfigurablePoolAvailable());
5108
5109 // The rest is only applicable to 64-bit mode
5110 #if defined(ARCH_CPU_64_BITS)
5111 // Repeat the test for every possible Pool size
5112 const size_t max_pool_size = PartitionAddressSpace::ConfigurablePoolMaxSize();
5113 const size_t min_pool_size = PartitionAddressSpace::ConfigurablePoolMinSize();
5114 for (size_t pool_size = max_pool_size; pool_size >= min_pool_size;
5115 pool_size /= 2) {
5116 PA_DCHECK(std::has_single_bit(pool_size));
5117 EXPECT_FALSE(IsConfigurablePoolAvailable());
5118 uintptr_t pool_base =
5119 AllocPages(pool_size, pool_size,
5120 PageAccessibilityConfiguration(
5121 PageAccessibilityConfiguration::kInaccessible),
5122 PageTag::kPartitionAlloc);
5123 EXPECT_NE(0u, pool_base);
5124 PartitionAddressSpace::InitConfigurablePool(pool_base, pool_size);
5125
5126 EXPECT_TRUE(IsConfigurablePoolAvailable());
5127
5128 PartitionOptions opts = GetCommonPartitionOptions();
5129 opts.use_configurable_pool = PartitionOptions::kAllowed;
5130 std::unique_ptr<PartitionRoot> root = CreateCustomTestRoot(
5131 opts, PartitionTestOptions{.uncap_empty_slot_span_memory = true,
5132 .set_bucket_distribution = true});
5133
5134 const size_t count = 250;
5135 std::vector<void*> allocations(count, nullptr);
5136 for (size_t i = 0; i < count; ++i) {
5137 const size_t size = kTestSizes[base::RandGenerator(kTestSizesCount)];
5138 allocations[i] = root->Alloc(size);
5139 EXPECT_NE(nullptr, allocations[i]);
5140 // We don't Untag allocations here because MTE is disabled for
5141 // configurable pools used by V8.
5142 // https://bugs.chromium.org/p/v8/issues/detail?id=13117
5143 uintptr_t allocation_base = reinterpret_cast<uintptr_t>(allocations[i]);
5144 EXPECT_EQ(allocation_base, UntagPtr(allocations[i]));
5145 EXPECT_TRUE(allocation_base >= pool_base &&
5146 allocation_base < pool_base + pool_size);
5147 }
5148
5149 PartitionAddressSpace::UninitConfigurablePoolForTesting();
5150 FreePages(pool_base, pool_size);
5151 }
5152
5153 #endif // defined(ARCH_CPU_64_BITS)
5154 }
5155
TEST_P(PartitionAllocTest,EmptySlotSpanSizeIsCapped)5156 TEST_P(PartitionAllocTest, EmptySlotSpanSizeIsCapped) {
5157 // Use another root, since the ones from the test harness disable the empty
5158 // slot span size cap.
5159 std::unique_ptr<PartitionRoot> root = CreateCustomTestRoot(
5160 GetCommonPartitionOptions(),
5161 PartitionTestOptions{.set_bucket_distribution = true});
5162
5163 // Allocate some memory, don't free it to keep committed memory.
5164 std::vector<void*> allocated_memory;
5165 const size_t size = SystemPageSize();
5166 const size_t count = 400;
5167 for (size_t i = 0; i < count; i++) {
5168 void* ptr = root->Alloc(size);
5169 allocated_memory.push_back(ptr);
5170 }
5171 ASSERT_GE(root->total_size_of_committed_pages.load(std::memory_order_relaxed),
5172 size * count);
5173
5174 // To create empty slot spans, allocate from single-slot slot spans, 128kiB at
5175 // a time.
5176 std::vector<void*> single_slot_allocated_memory;
5177 constexpr size_t single_slot_count = kDefaultEmptySlotSpanRingSize - 1;
5178 const size_t single_slot_size = MaxRegularSlotSpanSize() + 1;
5179 // Make sure that even with allocation size rounding up, a single allocation
5180 // is still below the threshold.
5181 ASSERT_LT(MaxRegularSlotSpanSize() * 2,
5182 ((count * size) >> root->max_empty_slot_spans_dirty_bytes_shift));
5183 for (size_t i = 0; i < single_slot_count; i++) {
5184 void* ptr = root->Alloc(single_slot_size);
5185 single_slot_allocated_memory.push_back(ptr);
5186 }
5187
5188 // Free everything at once, creating as many empty slot spans as there are
5189 // allocations (since they are from single-slot slot spans).
5190 for (void* ptr : single_slot_allocated_memory) {
5191 root->Free(ptr);
5192 }
5193
5194 // Still have some committed empty slot spans.
5195 // PA_TS_UNCHECKED_READ() is not an issue here, since everything is
5196 // single-threaded.
5197 EXPECT_GT(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes), 0u);
5198 // But not all, as the cap triggered.
5199 EXPECT_LT(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes),
5200 single_slot_count * single_slot_size);
5201
5202 // Nothing left after explicit purge.
5203 root->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
5204 EXPECT_EQ(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes), 0u);
5205
5206 for (void* ptr : allocated_memory) {
5207 root->Free(ptr);
5208 }
5209 }
5210
TEST_P(PartitionAllocTest,IncreaseEmptySlotSpanRingSize)5211 TEST_P(PartitionAllocTest, IncreaseEmptySlotSpanRingSize) {
5212 std::unique_ptr<PartitionRoot> root = CreateCustomTestRoot(
5213 GetCommonPartitionOptions(),
5214 PartitionTestOptions{.uncap_empty_slot_span_memory = true,
5215 .set_bucket_distribution = true});
5216
5217 std::vector<void*> single_slot_allocated_memory;
5218 constexpr size_t single_slot_count = kDefaultEmptySlotSpanRingSize + 10;
5219 const size_t single_slot_size = MaxRegularSlotSpanSize() + 1;
5220 const size_t bucket_size =
5221 root->buckets[SizeToIndex(single_slot_size)].slot_size;
5222
5223 for (size_t i = 0; i < single_slot_count; i++) {
5224 void* ptr = root->Alloc(single_slot_size);
5225 single_slot_allocated_memory.push_back(ptr);
5226 }
5227
5228 // Free everything at once, creating as many empty slot spans as there are
5229 // allocations (since they are from single-slot slot spans).
5230 for (void* ptr : single_slot_allocated_memory) {
5231 root->Free(ptr);
5232 }
5233 single_slot_allocated_memory.clear();
5234
5235 // Some of the free()-s above overflowed the slot span ring.
5236 EXPECT_EQ(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes),
5237 kDefaultEmptySlotSpanRingSize * bucket_size);
5238
5239 // Now can cache more slot spans.
5240 root->EnableLargeEmptySlotSpanRing();
5241
5242 constexpr size_t single_slot_large_count = kDefaultEmptySlotSpanRingSize + 10;
5243 for (size_t i = 0; i < single_slot_large_count; i++) {
5244 void* ptr = root->Alloc(single_slot_size);
5245 single_slot_allocated_memory.push_back(ptr);
5246 }
5247
5248 for (void* ptr : single_slot_allocated_memory) {
5249 root->Free(ptr);
5250 }
5251 single_slot_allocated_memory.clear();
5252
5253 // No overflow this time.
5254 EXPECT_EQ(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes),
5255 single_slot_large_count * bucket_size);
5256
5257 constexpr size_t single_slot_too_many_count = kMaxFreeableSpans + 10;
5258 for (size_t i = 0; i < single_slot_too_many_count; i++) {
5259 void* ptr = root->Alloc(single_slot_size);
5260 single_slot_allocated_memory.push_back(ptr);
5261 }
5262
5263 for (void* ptr : single_slot_allocated_memory) {
5264 root->Free(ptr);
5265 }
5266 single_slot_allocated_memory.clear();
5267
5268 // Overflow still works.
5269 EXPECT_EQ(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes),
5270 kMaxFreeableSpans * bucket_size);
5271 }
5272
5273 #if BUILDFLAG(PA_IS_CAST_ANDROID) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
5274 extern "C" {
5275 void* __real_malloc(size_t);
5276 } // extern "C"
5277
TEST_P(PartitionAllocTest,HandleMixedAllocations)5278 TEST_P(PartitionAllocTest, HandleMixedAllocations) {
5279 void* ptr = __real_malloc(12);
5280 // Should not crash, no test assertion.
5281 free(ptr);
5282 }
5283 #endif
5284
TEST_P(PartitionAllocTest,SortFreelist)5285 TEST_P(PartitionAllocTest, SortFreelist) {
5286 const size_t count = 100;
5287 const size_t allocation_size = 1;
5288 void* first_ptr = allocator.root()->Alloc(allocation_size);
5289
5290 std::vector<void*> allocations;
5291 for (size_t i = 0; i < count; ++i) {
5292 allocations.push_back(allocator.root()->Alloc(allocation_size));
5293 }
5294
5295 // Shuffle and free memory out of order.
5296 std::random_device rd;
5297 std::mt19937 generator(rd());
5298 std::shuffle(allocations.begin(), allocations.end(), generator);
5299
5300 // Keep one allocation alive (first_ptr), so that the SlotSpan is not fully
5301 // empty.
5302 for (void* ptr : allocations) {
5303 allocator.root()->Free(ptr);
5304 }
5305 allocations.clear();
5306
5307 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
5308
5309 size_t bucket_index =
5310 SizeToIndex(allocation_size + ExtraAllocSize(allocator));
5311 auto& bucket = allocator.root()->buckets[bucket_index];
5312 EXPECT_TRUE(bucket.active_slot_spans_head->freelist_is_sorted());
5313
5314 // Can sort again.
5315 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
5316 EXPECT_TRUE(bucket.active_slot_spans_head->freelist_is_sorted());
5317
5318 for (size_t i = 0; i < count; ++i) {
5319 allocations.push_back(allocator.root()->Alloc(allocation_size));
5320 // Allocating keeps the freelist sorted.
5321 EXPECT_TRUE(bucket.active_slot_spans_head->freelist_is_sorted());
5322 }
5323
5324 // Check that it is sorted.
5325 for (size_t i = 1; i < allocations.size(); i++) {
5326 EXPECT_LT(UntagPtr(allocations[i - 1]), UntagPtr(allocations[i]));
5327 }
5328
5329 for (void* ptr : allocations) {
5330 allocator.root()->Free(ptr);
5331 // Free()-ing memory destroys order. Not looking at the head of the active
5332 // list, as it is not necessarily the one from which |ptr| came from.
5333 auto* slot_span =
5334 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
5335 EXPECT_FALSE(slot_span->freelist_is_sorted());
5336 }
5337
5338 allocator.root()->Free(first_ptr);
5339 }
5340
5341 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_LINUX) && \
5342 defined(ARCH_CPU_64_BITS)
TEST_P(PartitionAllocTest,CrashOnUnknownPointer)5343 TEST_P(PartitionAllocTest, CrashOnUnknownPointer) {
5344 int not_a_heap_object = 42;
5345 EXPECT_DEATH(allocator.root()->Free(¬_a_heap_object), "");
5346 }
5347 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
5348 // BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_64_BITS)
5349
5350 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_MAC)
5351
5352 // Adapted from crashpad tests.
5353 class ScopedOpenCLNoOpKernel {
5354 public:
ScopedOpenCLNoOpKernel()5355 ScopedOpenCLNoOpKernel()
5356 : context_(nullptr),
5357 program_(nullptr),
5358 kernel_(nullptr),
5359 success_(false) {}
5360
5361 ScopedOpenCLNoOpKernel(const ScopedOpenCLNoOpKernel&) = delete;
5362 ScopedOpenCLNoOpKernel& operator=(const ScopedOpenCLNoOpKernel&) = delete;
5363
~ScopedOpenCLNoOpKernel()5364 ~ScopedOpenCLNoOpKernel() {
5365 if (kernel_) {
5366 cl_int rv = clReleaseKernel(kernel_);
5367 EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseKernel";
5368 }
5369
5370 if (program_) {
5371 cl_int rv = clReleaseProgram(program_);
5372 EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseProgram";
5373 }
5374
5375 if (context_) {
5376 cl_int rv = clReleaseContext(context_);
5377 EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseContext";
5378 }
5379 }
5380
SetUp()5381 void SetUp() {
5382 cl_platform_id platform_id;
5383 cl_int rv = clGetPlatformIDs(1, &platform_id, nullptr);
5384 ASSERT_EQ(rv, CL_SUCCESS) << "clGetPlatformIDs";
5385 cl_device_id device_id;
5386 rv =
5387 clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_CPU, 1, &device_id, nullptr);
5388 #if defined(ARCH_CPU_ARM64)
5389 // CL_DEVICE_TYPE_CPU doesn’t seem to work at all on arm64, meaning that
5390 // these weird OpenCL modules probably don’t show up there at all. Keep this
5391 // test even on arm64 in case this ever does start working.
5392 if (rv == CL_INVALID_VALUE) {
5393 return;
5394 }
5395 #endif // ARCH_CPU_ARM64
5396 ASSERT_EQ(rv, CL_SUCCESS) << "clGetDeviceIDs";
5397
5398 context_ = clCreateContext(nullptr, 1, &device_id, nullptr, nullptr, &rv);
5399 ASSERT_EQ(rv, CL_SUCCESS) << "clCreateContext";
5400
5401 const char* sources[] = {
5402 "__kernel void NoOp(void) {barrier(CLK_LOCAL_MEM_FENCE);}",
5403 };
5404 const size_t source_lengths[] = {
5405 strlen(sources[0]),
5406 };
5407 static_assert(std::size(sources) == std::size(source_lengths),
5408 "arrays must be parallel");
5409
5410 program_ = clCreateProgramWithSource(context_, std::size(sources), sources,
5411 source_lengths, &rv);
5412 ASSERT_EQ(rv, CL_SUCCESS) << "clCreateProgramWithSource";
5413
5414 rv = clBuildProgram(program_, 1, &device_id, "-cl-opt-disable", nullptr,
5415 nullptr);
5416 ASSERT_EQ(rv, CL_SUCCESS) << "clBuildProgram";
5417
5418 kernel_ = clCreateKernel(program_, "NoOp", &rv);
5419 ASSERT_EQ(rv, CL_SUCCESS) << "clCreateKernel";
5420
5421 success_ = true;
5422 }
5423
success() const5424 bool success() const { return success_; }
5425
5426 private:
5427 cl_context context_;
5428 cl_program program_;
5429 cl_kernel kernel_;
5430 bool success_;
5431 };
5432
5433 // On macOS 10.11, allocations are made with PartitionAlloc, but the pointer
5434 // is incorrectly passed by CoreFoundation to the previous default zone,
5435 // causing crashes. This is intended to detect these issues regressing in future
5436 // versions of macOS.
TEST_P(PartitionAllocTest,OpenCL)5437 TEST_P(PartitionAllocTest, OpenCL) {
5438 ScopedOpenCLNoOpKernel kernel;
5439 kernel.SetUp();
5440 #if !defined(ARCH_CPU_ARM64)
5441 ASSERT_TRUE(kernel.success());
5442 #endif
5443 }
5444
5445 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
5446 // BUILDFLAG(IS_MAC)
5447
TEST_P(PartitionAllocTest,SmallSlotSpanWaste)5448 TEST_P(PartitionAllocTest, SmallSlotSpanWaste) {
5449 for (PartitionRoot::Bucket& bucket : allocator.root()->buckets) {
5450 const size_t slot_size = bucket.slot_size;
5451 if (slot_size == kInvalidBucketSize) {
5452 continue;
5453 }
5454
5455 size_t small_system_page_count =
5456 partition_alloc::internal::ComputeSystemPagesPerSlotSpan(
5457 bucket.slot_size, true);
5458 size_t small_waste =
5459 (small_system_page_count * SystemPageSize()) % slot_size;
5460
5461 EXPECT_LT(small_waste, .05 * SystemPageSize());
5462 if (slot_size <= MaxRegularSlotSpanSize()) {
5463 EXPECT_LE(small_system_page_count, MaxSystemPagesPerRegularSlotSpan());
5464 }
5465 }
5466 }
5467
TEST_P(PartitionAllocTest,SortActiveSlotSpans)5468 TEST_P(PartitionAllocTest, SortActiveSlotSpans) {
5469 auto run_test = [](size_t count) {
5470 PartitionBucket bucket;
5471 bucket.Init(16);
5472 bucket.active_slot_spans_head = nullptr;
5473
5474 std::vector<SlotSpanMetadata> slot_spans;
5475 slot_spans.reserve(count);
5476
5477 // Add slot spans with random freelist length.
5478 for (size_t i = 0; i < count; i++) {
5479 slot_spans.emplace_back(&bucket);
5480 auto& slot_span = slot_spans.back();
5481 slot_span.num_unprovisioned_slots =
5482 partition_alloc::internal::base::RandGenerator(
5483 bucket.get_slots_per_span() / 2);
5484 slot_span.num_allocated_slots =
5485 partition_alloc::internal::base::RandGenerator(
5486 bucket.get_slots_per_span() - slot_span.num_unprovisioned_slots);
5487 slot_span.next_slot_span = bucket.active_slot_spans_head;
5488 bucket.active_slot_spans_head = &slot_span;
5489 }
5490
5491 bucket.SortActiveSlotSpans();
5492
5493 std::set<SlotSpanMetadata*> seen_slot_spans;
5494 std::vector<SlotSpanMetadata*> sorted_slot_spans;
5495 for (auto* slot_span = bucket.active_slot_spans_head; slot_span;
5496 slot_span = slot_span->next_slot_span) {
5497 sorted_slot_spans.push_back(slot_span);
5498 seen_slot_spans.insert(slot_span);
5499 }
5500
5501 // None repeated, none missing.
5502 EXPECT_EQ(seen_slot_spans.size(), sorted_slot_spans.size());
5503 EXPECT_EQ(seen_slot_spans.size(), slot_spans.size());
5504
5505 // The first slot spans are sorted.
5506 size_t sorted_spans_count =
5507 std::min(PartitionBucket::kMaxSlotSpansToSort, count);
5508 EXPECT_TRUE(std::is_sorted(sorted_slot_spans.begin(),
5509 sorted_slot_spans.begin() + sorted_spans_count,
5510 partition_alloc::internal::CompareSlotSpans));
5511
5512 // Slot spans with no freelist entries are at the end of the sorted run.
5513 auto has_empty_freelist = [](SlotSpanMetadata* a) {
5514 return a->GetFreelistLength() == 0;
5515 };
5516 auto it = std::find_if(sorted_slot_spans.begin(),
5517 sorted_slot_spans.begin() + sorted_spans_count,
5518 has_empty_freelist);
5519 if (it != sorted_slot_spans.end()) {
5520 EXPECT_TRUE(std::all_of(it,
5521 sorted_slot_spans.begin() + sorted_spans_count,
5522 has_empty_freelist));
5523 }
5524 };
5525
5526 // Everything is sorted.
5527 run_test(PartitionBucket::kMaxSlotSpansToSort / 2);
5528 // Only the first slot spans are sorted.
5529 run_test(PartitionBucket::kMaxSlotSpansToSort * 2);
5530
5531 // Corner cases.
5532 run_test(0);
5533 run_test(1);
5534 }
5535
5536 #if BUILDFLAG(USE_FREESLOT_BITMAP)
TEST_P(PartitionAllocTest,FreeSlotBitmapMarkedAsUsedAfterAlloc)5537 TEST_P(PartitionAllocTest, FreeSlotBitmapMarkedAsUsedAfterAlloc) {
5538 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
5539 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
5540 EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
5541
5542 allocator.root()->Free(ptr);
5543 }
5544
TEST_P(PartitionAllocTest,FreeSlotBitmapMarkedAsFreeAfterFree)5545 TEST_P(PartitionAllocTest, FreeSlotBitmapMarkedAsFreeAfterFree) {
5546 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
5547 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
5548 EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
5549
5550 allocator.root()->Free(ptr);
5551 EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
5552 }
5553
TEST_P(PartitionAllocTest,FreeSlotBitmapResetAfterDecommit)5554 TEST_P(PartitionAllocTest, FreeSlotBitmapResetAfterDecommit) {
5555 void* ptr1 = allocator.root()->Alloc(
5556 SystemPageSize() - ExtraAllocSize(allocator), type_name);
5557 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr1);
5558 allocator.root()->Free(ptr1);
5559
5560 EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
5561 // Decommit the slot span. Bitmap will be rewritten in Decommit().
5562 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
5563 EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
5564 }
5565
TEST_P(PartitionAllocTest,FreeSlotBitmapResetAfterPurge)5566 TEST_P(PartitionAllocTest, FreeSlotBitmapResetAfterPurge) {
5567 void* ptr1 = allocator.root()->Alloc(
5568 SystemPageSize() - ExtraAllocSize(allocator), type_name);
5569 char* ptr2 = static_cast<char*>(allocator.root()->Alloc(
5570 SystemPageSize() - ExtraAllocSize(allocator), type_name));
5571 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr2);
5572 allocator.root()->Free(ptr2);
5573
5574 CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, true);
5575 EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
5576 // Bitmap will be rewritten in PartitionPurgeSlotSpan().
5577 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
5578 CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, false);
5579 EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
5580
5581 allocator.root()->Free(ptr1);
5582 }
5583
5584 #endif // BUILDFLAG(USE_FREESLOT_BITMAP)
5585
5586 } // namespace partition_alloc::internal
5587
5588 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
5589