1 // Copyright 2013 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/allocator/partition_allocator/partition_alloc_for_testing.h"
6
7 #include <algorithm>
8 #include <cstddef>
9 #include <cstdint>
10 #include <cstdlib>
11 #include <cstring>
12 #include <iostream>
13 #include <limits>
14 #include <memory>
15 #include <random>
16 #include <set>
17 #include <tuple>
18 #include <vector>
19
20 #include "base/allocator/partition_allocator/address_space_randomization.h"
21 #include "base/allocator/partition_allocator/chromecast_buildflags.h"
22 #include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
23 #include "base/allocator/partition_allocator/freeslot_bitmap.h"
24 #include "base/allocator/partition_allocator/page_allocator_constants.h"
25 #include "base/allocator/partition_allocator/partition_address_space.h"
26 #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
27 #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
28 #include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
29 #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
30 #include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
31 #include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h"
32 #include "base/allocator/partition_allocator/partition_alloc_base/rand_util.h"
33 #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
34 #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h"
35 #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
36 #include "base/allocator/partition_allocator/partition_alloc_config.h"
37 #include "base/allocator/partition_allocator/partition_alloc_constants.h"
38 #include "base/allocator/partition_allocator/partition_bucket.h"
39 #include "base/allocator/partition_allocator/partition_cookie.h"
40 #include "base/allocator/partition_allocator/partition_freelist_entry.h"
41 #include "base/allocator/partition_allocator/partition_page.h"
42 #include "base/allocator/partition_allocator/partition_ref_count.h"
43 #include "base/allocator/partition_allocator/partition_root.h"
44 #include "base/allocator/partition_allocator/pkey.h"
45 #include "base/allocator/partition_allocator/reservation_offset_table.h"
46 #include "base/allocator/partition_allocator/tagging.h"
47 #include "base/system/sys_info.h"
48 #include "base/test/gtest_util.h"
49 #include "build/build_config.h"
50 #include "testing/gtest/include/gtest/gtest.h"
51
52 #if defined(__ARM_FEATURE_MEMORY_TAGGING)
53 #include <arm_acle.h>
54 #endif
55
56 #if BUILDFLAG(IS_POSIX)
57 #if BUILDFLAG(IS_LINUX)
58 // We need PKEY_DISABLE_WRITE in this file; glibc defines it in sys/mman.h but
59 // it's actually Linux-specific and other Linux libcs define it in linux/mman.h.
60 // We have to include both to be sure we get the definition.
61 #include <linux/mman.h>
62 #endif // BUILDFLAG(IS_LINUX)
63 #include <sys/mman.h>
64 #include <sys/resource.h>
65 #include <sys/time.h>
66 #endif // BUILDFLAG(IS_POSIX)
67
68 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_MAC)
69 #include <OpenCL/opencl.h>
70 #endif
71
72 #if BUILDFLAG(ENABLE_PKEYS)
73 #include <sys/syscall.h>
74 #endif
75
76 // In the MTE world, the upper bits of a pointer can be decorated with a tag,
77 // thus allowing many versions of the same pointer to exist. These macros take
78 // that into account when comparing.
79 #define PA_EXPECT_PTR_EQ(ptr1, ptr2) \
80 { EXPECT_EQ(UntagPtr(ptr1), UntagPtr(ptr2)); }
81 #define PA_EXPECT_PTR_NE(ptr1, ptr2) \
82 { EXPECT_NE(UntagPtr(ptr1), UntagPtr(ptr2)); }
83
84 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
85
86 namespace {
87
IsLargeMemoryDevice()88 bool IsLargeMemoryDevice() {
89 // Treat any device with 4GiB or more of physical memory as a "large memory
90 // device". We check for slightly less than GiB so that devices with a small
91 // amount of memory not accessible to the OS still count as "large".
92 //
93 // Set to 4GiB, since we have 2GiB Android devices where tests flakily fail
94 // (e.g. Nexus 5X, crbug.com/1191195).
95 return base::SysInfo::AmountOfPhysicalMemory() >= 4000ULL * 1024 * 1024;
96 }
97
SetAddressSpaceLimit()98 bool SetAddressSpaceLimit() {
99 #if !defined(ARCH_CPU_64_BITS) || !BUILDFLAG(IS_POSIX)
100 // 32 bits => address space is limited already.
101 return true;
102 #elif BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE)
103 // macOS will accept, but not enforce, |RLIMIT_AS| changes. See
104 // https://crbug.com/435269 and rdar://17576114.
105 //
106 // Note: This number must be not less than 6 GB, because with
107 // sanitizer_coverage_flags=edge, it reserves > 5 GB of address space. See
108 // https://crbug.com/674665.
109 const size_t kAddressSpaceLimit = static_cast<size_t>(6144) * 1024 * 1024;
110 struct rlimit limit;
111 if (getrlimit(RLIMIT_DATA, &limit) != 0) {
112 return false;
113 }
114 if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) {
115 limit.rlim_cur = kAddressSpaceLimit;
116 if (setrlimit(RLIMIT_DATA, &limit) != 0) {
117 return false;
118 }
119 }
120 return true;
121 #else
122 return false;
123 #endif
124 }
125
ClearAddressSpaceLimit()126 bool ClearAddressSpaceLimit() {
127 #if !defined(ARCH_CPU_64_BITS) || !BUILDFLAG(IS_POSIX)
128 return true;
129 #elif BUILDFLAG(IS_POSIX)
130 struct rlimit limit;
131 if (getrlimit(RLIMIT_DATA, &limit) != 0) {
132 return false;
133 }
134 limit.rlim_cur = limit.rlim_max;
135 if (setrlimit(RLIMIT_DATA, &limit) != 0) {
136 return false;
137 }
138 return true;
139 #else
140 return false;
141 #endif
142 }
143
144 const size_t kTestSizes[] = {
145 1,
146 17,
147 100,
148 partition_alloc::internal::SystemPageSize(),
149 partition_alloc::internal::SystemPageSize() + 1,
150 partition_alloc::PartitionRoot<
151 partition_alloc::internal::ThreadSafe>::GetDirectMapSlotSize(100),
152 1 << 20,
153 1 << 21,
154 };
155 constexpr size_t kTestSizesCount = std::size(kTestSizes);
156
AllocateRandomly(partition_alloc::PartitionRoot<partition_alloc::internal::ThreadSafe> * root,size_t count,unsigned int flags)157 void AllocateRandomly(
158 partition_alloc::PartitionRoot<partition_alloc::internal::ThreadSafe>* root,
159 size_t count,
160 unsigned int flags) {
161 std::vector<void*> allocations(count, nullptr);
162 for (size_t i = 0; i < count; ++i) {
163 const size_t size =
164 kTestSizes[partition_alloc::internal::base::RandGenerator(
165 kTestSizesCount)];
166 allocations[i] = root->AllocWithFlags(flags, size, nullptr);
167 EXPECT_NE(nullptr, allocations[i]) << " size: " << size << " i: " << i;
168 }
169
170 for (size_t i = 0; i < count; ++i) {
171 if (allocations[i]) {
172 root->Free(allocations[i]);
173 }
174 }
175 }
176
HandleOOM(size_t unused_size)177 void HandleOOM(size_t unused_size) {
178 PA_LOG(FATAL) << "Out of memory";
179 }
180
181 int g_dangling_raw_ptr_detected_count = 0;
182 int g_dangling_raw_ptr_released_count = 0;
183
184 class CountDanglingRawPtr {
185 public:
CountDanglingRawPtr()186 CountDanglingRawPtr() {
187 g_dangling_raw_ptr_detected_count = 0;
188 g_dangling_raw_ptr_released_count = 0;
189 old_detected_fn_ = partition_alloc::GetDanglingRawPtrDetectedFn();
190 old_released_fn_ = partition_alloc::GetDanglingRawPtrReleasedFn();
191
192 partition_alloc::SetDanglingRawPtrDetectedFn(
193 CountDanglingRawPtr::DanglingRawPtrDetected);
194 partition_alloc::SetDanglingRawPtrReleasedFn(
195 CountDanglingRawPtr::DanglingRawPtrReleased);
196 }
~CountDanglingRawPtr()197 ~CountDanglingRawPtr() {
198 partition_alloc::SetDanglingRawPtrDetectedFn(old_detected_fn_);
199 partition_alloc::SetDanglingRawPtrReleasedFn(old_released_fn_);
200 }
201
202 private:
DanglingRawPtrDetected(uintptr_t)203 static void DanglingRawPtrDetected(uintptr_t) {
204 g_dangling_raw_ptr_detected_count++;
205 }
DanglingRawPtrReleased(uintptr_t)206 static void DanglingRawPtrReleased(uintptr_t) {
207 g_dangling_raw_ptr_released_count++;
208 }
209
210 partition_alloc::DanglingRawPtrDetectedFn* old_detected_fn_;
211 partition_alloc::DanglingRawPtrReleasedFn* old_released_fn_;
212 };
213
214 } // namespace
215
216 // Note: This test exercises interfaces inside the `partition_alloc`
217 // namespace, but inspects objects inside `partition_alloc::internal`.
218 // For ease of reading, the tests are placed into the latter namespace.
219 namespace partition_alloc::internal {
220
221 using BucketDistribution = ThreadSafePartitionRoot::BucketDistribution;
222 using SlotSpan = SlotSpanMetadata<ThreadSafe>;
223
224 const size_t kTestAllocSize = 16;
225
226 #if !BUILDFLAG(PA_DCHECK_IS_ON)
227 const size_t kPointerOffset = kPartitionRefCountOffsetAdjustment;
228 const size_t kExtraAllocSizeWithoutRefCount = 0ull;
229 #else
230 const size_t kPointerOffset = kPartitionRefCountOffsetAdjustment;
231 const size_t kExtraAllocSizeWithoutRefCount = kCookieSize;
232 #endif
233
234 const size_t kExtraAllocSizeWithRefCount =
235 kExtraAllocSizeWithoutRefCount + kInSlotRefCountBufferSize;
236
237 const char* type_name = nullptr;
238
SetDistributionForPartitionRoot(ThreadSafePartitionRoot * root,BucketDistribution distribution)239 void SetDistributionForPartitionRoot(ThreadSafePartitionRoot* root,
240 BucketDistribution distribution) {
241 switch (distribution) {
242 case BucketDistribution::kDefault:
243 root->ResetBucketDistributionForTesting();
244 break;
245 case BucketDistribution::kDenser:
246 root->SwitchToDenserBucketDistribution();
247 break;
248 }
249 }
250
ExtraAllocSize(const PartitionAllocator<internal::ThreadSafe> & allocator)251 size_t ExtraAllocSize(
252 const PartitionAllocator<internal::ThreadSafe>& allocator) {
253 return kExtraAllocSizeWithoutRefCount +
254 (allocator.root()->brp_enabled() ? kInSlotRefCountBufferSize : 0);
255 }
256
257 class ScopedPageAllocation {
258 public:
ScopedPageAllocation(PartitionAllocator<internal::ThreadSafe> & allocator,base::CheckedNumeric<size_t> npages)259 ScopedPageAllocation(PartitionAllocator<internal::ThreadSafe>& allocator,
260 base::CheckedNumeric<size_t> npages)
261 : allocator_(allocator),
262 npages_(npages),
263 ptr_(static_cast<char*>(allocator_.root()->Alloc(
264 (npages * SystemPageSize() - ExtraAllocSize(allocator_))
265 .ValueOrDie(),
266 type_name))) {}
267
~ScopedPageAllocation()268 ~ScopedPageAllocation() { allocator_.root()->Free(ptr_); }
269
TouchAllPages()270 void TouchAllPages() {
271 memset(ptr_, 'A',
272 ((npages_ * SystemPageSize()) - ExtraAllocSize(allocator_))
273 .ValueOrDie());
274 }
275
PageAtIndex(size_t index)276 void* PageAtIndex(size_t index) {
277 return ptr_ - kPointerOffset + (SystemPageSize() * index);
278 }
279
280 private:
281 PartitionAllocator<internal::ThreadSafe>& allocator_;
282 const base::CheckedNumeric<size_t> npages_;
283 char* ptr_;
284 };
285
286 struct PartitionAllocTestParam {
PartitionAllocTestParampartition_alloc::internal::PartitionAllocTestParam287 PartitionAllocTestParam(BucketDistribution bucket_distribution,
288 bool use_pkey_pool)
289 : bucket_distribution(bucket_distribution),
290 use_pkey_pool(use_pkey_pool) {}
291 BucketDistribution bucket_distribution;
292 bool use_pkey_pool;
293 };
294
GetPartitionAllocTestParams()295 const std::vector<PartitionAllocTestParam> GetPartitionAllocTestParams() {
296 std::vector<PartitionAllocTestParam> params;
297 params.emplace_back(BucketDistribution::kDefault, false);
298 params.emplace_back(BucketDistribution::kDenser, false);
299 #if BUILDFLAG(ENABLE_PKEYS)
300 if (CPUHasPkeySupport()) {
301 params.emplace_back(BucketDistribution::kDefault, true);
302 params.emplace_back(BucketDistribution::kDenser, true);
303 }
304 #endif
305 return params;
306 }
307
308 class PartitionAllocTest
309 : public testing::TestWithParam<PartitionAllocTestParam> {
310 protected:
311 PartitionAllocTest() = default;
312
313 ~PartitionAllocTest() override = default;
314
InitializeAllocator()315 void InitializeAllocator() {
316 #if BUILDFLAG(ENABLE_PKEYS)
317 int pkey = PkeyAlloc(UsePkeyPool() ? 0 : PKEY_DISABLE_WRITE);
318 if (pkey != -1) {
319 pkey_ = pkey;
320 }
321 // We always want to have a pkey allocator initialized to make sure that the
322 // other pools still work. As part of the initializition, we tag some memory
323 // with the new pkey, effectively making it read-only. So there's some
324 // potential for breakage that this should catch.
325 pkey_allocator.init({
326 partition_alloc::PartitionOptions::AlignedAlloc::kAllowed,
327 partition_alloc::PartitionOptions::ThreadCache::kDisabled,
328 partition_alloc::PartitionOptions::Quarantine::kDisallowed,
329 partition_alloc::PartitionOptions::Cookie::kAllowed,
330 partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
331 partition_alloc::PartitionOptions::BackupRefPtrZapping::kDisabled,
332 partition_alloc::PartitionOptions::UseConfigurablePool::kNo,
333 partition_alloc::PartitionOptions::AddDummyRefCount::kDisabled,
334 pkey_ != kInvalidPkey ? pkey_ : kDefaultPkey,
335 });
336 if (UsePkeyPool() && pkey_ != kInvalidPkey) {
337 allocator.init({
338 partition_alloc::PartitionOptions::AlignedAlloc::kAllowed,
339 partition_alloc::PartitionOptions::ThreadCache::kDisabled,
340 partition_alloc::PartitionOptions::Quarantine::kDisallowed,
341 partition_alloc::PartitionOptions::Cookie::kAllowed,
342 partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
343 partition_alloc::PartitionOptions::BackupRefPtrZapping::kDisabled,
344 partition_alloc::PartitionOptions::UseConfigurablePool::kNo,
345 partition_alloc::PartitionOptions::AddDummyRefCount::kDisabled,
346 pkey_,
347 });
348 return;
349 }
350 #endif
351 allocator.init({
352 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
353 BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
354 // AlignedAllocWithFlags() can't be called when BRP is in the "before
355 // allocation" mode, because this mode adds extras before the allocation.
356 // Extras after the allocation are ok.
357 PartitionOptions::AlignedAlloc::kAllowed,
358 #else
359 PartitionOptions::AlignedAlloc::kDisallowed,
360 #endif
361 PartitionOptions::ThreadCache::kDisabled,
362 PartitionOptions::Quarantine::kDisallowed,
363 PartitionOptions::Cookie::kAllowed,
364 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
365 PartitionOptions::BackupRefPtr::kEnabled,
366 PartitionOptions::BackupRefPtrZapping::kEnabled,
367 #else
368 PartitionOptions::BackupRefPtr::kDisabled,
369 PartitionOptions::BackupRefPtrZapping::kDisabled,
370 #endif
371 PartitionOptions::UseConfigurablePool::kNo,
372 });
373 }
374
RealAllocSize() const375 size_t RealAllocSize() const {
376 return partition_alloc::internal::base::bits::AlignUp(
377 kTestAllocSize + ExtraAllocSize(allocator), kAlignment);
378 }
379
SetUp()380 void SetUp() override {
381 PartitionRoot<ThreadSafe>::EnableSortActiveSlotSpans();
382 PartitionAllocGlobalInit(HandleOOM);
383 InitializeAllocator();
384
385 aligned_allocator.init({
386 PartitionOptions::AlignedAlloc::kAllowed,
387 PartitionOptions::ThreadCache::kDisabled,
388 PartitionOptions::Quarantine::kDisallowed,
389 PartitionOptions::Cookie::kDisallowed,
390 PartitionOptions::BackupRefPtr::kDisabled,
391 PartitionOptions::BackupRefPtrZapping::kDisabled,
392 PartitionOptions::UseConfigurablePool::kNo,
393 });
394 test_bucket_index_ = SizeToIndex(RealAllocSize());
395 allocator.root()->UncapEmptySlotSpanMemoryForTesting();
396 aligned_allocator.root()->UncapEmptySlotSpanMemoryForTesting();
397
398 SetDistributionForPartitionRoot(allocator.root(), GetBucketDistribution());
399 SetDistributionForPartitionRoot(aligned_allocator.root(),
400 GetBucketDistribution());
401 }
402
SizeToIndex(size_t size)403 size_t SizeToIndex(size_t size) {
404 const auto distribution_to_use = GetBucketDistribution();
405 return PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex(
406 size, distribution_to_use);
407 }
408
SizeToBucketSize(size_t size)409 size_t SizeToBucketSize(size_t size) {
410 const auto index = SizeToIndex(size);
411 return allocator.root()->buckets[index].slot_size;
412 }
413
TearDown()414 void TearDown() override {
415 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
416 PurgeFlags::kDiscardUnusedSystemPages);
417 PartitionAllocGlobalUninitForTesting();
418 #if BUILDFLAG(ENABLE_PKEYS)
419 if (pkey_ != kInvalidPkey) {
420 PkeyFree(pkey_);
421 }
422 #endif
423 }
424
GetNumPagesPerSlotSpan(size_t size)425 size_t GetNumPagesPerSlotSpan(size_t size) {
426 size_t real_size = size + ExtraAllocSize(allocator);
427 size_t bucket_index = SizeToIndex(real_size);
428 PartitionRoot<ThreadSafe>::Bucket* bucket =
429 &allocator.root()->buckets[bucket_index];
430 // TODO(tasak): make get_pages_per_slot_span() available at
431 // partition_alloc_unittest.cc. Is it allowable to make the code from
432 // partition_bucet.cc to partition_bucket.h?
433 return (bucket->num_system_pages_per_slot_span +
434 (NumSystemPagesPerPartitionPage() - 1)) /
435 NumSystemPagesPerPartitionPage();
436 }
437
GetFullSlotSpan(size_t size)438 SlotSpan* GetFullSlotSpan(size_t size) {
439 size_t real_size = size + ExtraAllocSize(allocator);
440 size_t bucket_index = SizeToIndex(real_size);
441 PartitionRoot<ThreadSafe>::Bucket* bucket =
442 &allocator.root()->buckets[bucket_index];
443 size_t num_slots =
444 (bucket->num_system_pages_per_slot_span * SystemPageSize()) /
445 bucket->slot_size;
446 uintptr_t first = 0;
447 uintptr_t last = 0;
448 size_t i;
449 for (i = 0; i < num_slots; ++i) {
450 void* ptr = allocator.root()->Alloc(size, type_name);
451 EXPECT_TRUE(ptr);
452 if (!i) {
453 first = allocator.root()->ObjectToSlotStart(ptr);
454 } else if (i == num_slots - 1) {
455 last = allocator.root()->ObjectToSlotStart(ptr);
456 }
457 }
458 EXPECT_EQ(SlotSpan::FromSlotStart(first), SlotSpan::FromSlotStart(last));
459 if (bucket->num_system_pages_per_slot_span ==
460 NumSystemPagesPerPartitionPage()) {
461 EXPECT_EQ(first & PartitionPageBaseMask(),
462 last & PartitionPageBaseMask());
463 }
464 EXPECT_EQ(num_slots, bucket->active_slot_spans_head->num_allocated_slots);
465 EXPECT_EQ(nullptr, bucket->active_slot_spans_head->get_freelist_head());
466 EXPECT_TRUE(bucket->is_valid());
467 EXPECT_TRUE(bucket->active_slot_spans_head !=
468 SlotSpan::get_sentinel_slot_span());
469 EXPECT_TRUE(bucket->active_slot_spans_head->is_full());
470 return bucket->active_slot_spans_head;
471 }
472
CycleFreeCache(size_t size)473 void CycleFreeCache(size_t size) {
474 for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
475 void* ptr = allocator.root()->Alloc(size, type_name);
476 auto* slot_span =
477 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
478 auto* bucket = slot_span->bucket;
479 EXPECT_EQ(1u, bucket->active_slot_spans_head->num_allocated_slots);
480 allocator.root()->Free(ptr);
481 EXPECT_EQ(0u, bucket->active_slot_spans_head->num_allocated_slots);
482 EXPECT_TRUE(bucket->active_slot_spans_head->in_empty_cache() ||
483 bucket->active_slot_spans_head ==
484 SlotSpanMetadata<ThreadSafe>::get_sentinel_slot_span());
485 }
486 }
487
488 enum ReturnNullTestMode {
489 kPartitionAllocWithFlags,
490 kPartitionReallocWithFlags,
491 kPartitionRootTryRealloc,
492 };
493
DoReturnNullTest(size_t alloc_size,ReturnNullTestMode mode)494 void DoReturnNullTest(size_t alloc_size, ReturnNullTestMode mode) {
495 // TODO(crbug.com/678782): Where necessary and possible, disable the
496 // platform's OOM-killing behavior. OOM-killing makes this test flaky on
497 // low-memory devices.
498 if (!IsLargeMemoryDevice()) {
499 PA_LOG(WARNING)
500 << "Skipping test on this device because of crbug.com/678782";
501 PA_LOG(FATAL) << "Passed DoReturnNullTest";
502 }
503
504 ASSERT_TRUE(SetAddressSpaceLimit());
505
506 // Work out the number of allocations for 6 GB of memory.
507 const int num_allocations = (6 * 1024 * 1024) / (alloc_size / 1024);
508
509 void** ptrs = static_cast<void**>(
510 allocator.root()->Alloc(num_allocations * sizeof(void*), type_name));
511 int i;
512
513 for (i = 0; i < num_allocations; ++i) {
514 switch (mode) {
515 case kPartitionAllocWithFlags: {
516 ptrs[i] = allocator.root()->AllocWithFlags(AllocFlags::kReturnNull,
517 alloc_size, type_name);
518 break;
519 }
520 case kPartitionReallocWithFlags: {
521 ptrs[i] = allocator.root()->AllocWithFlags(AllocFlags::kReturnNull, 1,
522 type_name);
523 ptrs[i] = allocator.root()->ReallocWithFlags(
524 AllocFlags::kReturnNull, ptrs[i], alloc_size, type_name);
525 break;
526 }
527 case kPartitionRootTryRealloc: {
528 ptrs[i] = allocator.root()->AllocWithFlags(AllocFlags::kReturnNull, 1,
529 type_name);
530 ptrs[i] =
531 allocator.root()->TryRealloc(ptrs[i], alloc_size, type_name);
532 }
533 }
534
535 if (!i) {
536 EXPECT_TRUE(ptrs[0]);
537 }
538 if (!ptrs[i]) {
539 ptrs[i] = allocator.root()->AllocWithFlags(AllocFlags::kReturnNull,
540 alloc_size, type_name);
541 EXPECT_FALSE(ptrs[i]);
542 break;
543 }
544 }
545
546 // We shouldn't succeed in allocating all 6 GB of memory. If we do, then
547 // we're not actually testing anything here.
548 EXPECT_LT(i, num_allocations);
549
550 // Free, reallocate and free again each block we allocated. We do this to
551 // check that freeing memory also works correctly after a failed allocation.
552 for (--i; i >= 0; --i) {
553 allocator.root()->Free(ptrs[i]);
554 ptrs[i] = allocator.root()->AllocWithFlags(AllocFlags::kReturnNull,
555 alloc_size, type_name);
556 EXPECT_TRUE(ptrs[i]);
557 allocator.root()->Free(ptrs[i]);
558 }
559
560 allocator.root()->Free(ptrs);
561
562 EXPECT_TRUE(ClearAddressSpaceLimit());
563 PA_LOG(FATAL) << "Passed DoReturnNullTest";
564 }
565
566 void RunRefCountReallocSubtest(size_t orig_size, size_t new_size);
567
Alloc(size_t size)568 PA_NOINLINE PA_MALLOC_FN void* Alloc(size_t size) {
569 return allocator.root()->Alloc(size, "");
570 }
571
Free(void * ptr)572 PA_NOINLINE void Free(void* ptr) { allocator.root()->Free(ptr); }
573
GetBucketDistribution() const574 BucketDistribution GetBucketDistribution() const {
575 return GetParam().bucket_distribution;
576 }
577
UsePkeyPool() const578 bool UsePkeyPool() const { return GetParam().use_pkey_pool; }
UseBRPPool() const579 bool UseBRPPool() const { return allocator.root()->brp_enabled(); }
580
581 partition_alloc::PartitionAllocatorForTesting allocator;
582 partition_alloc::PartitionAllocatorForTesting aligned_allocator;
583 #if BUILDFLAG(ENABLE_PKEYS)
584 partition_alloc::PartitionAllocatorForTesting pkey_allocator;
585 #endif
586 size_t test_bucket_index_;
587
588 #if BUILDFLAG(ENABLE_PKEYS)
589 int pkey_ = kInvalidPkey;
590 #endif
591 };
592
593 // Death tests misbehave on Android, http://crbug.com/643760.
594 #if defined(GTEST_HAS_DEATH_TEST) && !BUILDFLAG(IS_ANDROID)
595 #define PA_HAS_DEATH_TESTS
596
597 class PartitionAllocDeathTest : public PartitionAllocTest {};
598
599 INSTANTIATE_TEST_SUITE_P(AlternateBucketDistribution,
600 PartitionAllocDeathTest,
601 testing::ValuesIn(GetPartitionAllocTestParams()));
602
603 #endif
604
605 namespace {
606
FreeFullSlotSpan(PartitionRoot<internal::ThreadSafe> * root,SlotSpan * slot_span)607 void FreeFullSlotSpan(PartitionRoot<internal::ThreadSafe>* root,
608 SlotSpan* slot_span) {
609 EXPECT_TRUE(slot_span->is_full());
610 size_t size = slot_span->bucket->slot_size;
611 size_t num_slots =
612 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
613 size;
614 EXPECT_EQ(num_slots, slot_span->num_allocated_slots);
615 uintptr_t address = SlotSpan::ToSlotSpanStart(slot_span);
616 size_t i;
617 for (i = 0; i < num_slots; ++i) {
618 root->Free(root->SlotStartToObject(address));
619 address += size;
620 }
621 EXPECT_TRUE(slot_span->is_empty());
622 }
623
624 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
CheckPageInCore(void * ptr,bool in_core)625 bool CheckPageInCore(void* ptr, bool in_core) {
626 unsigned char ret = 0;
627 EXPECT_EQ(0, mincore(ptr, SystemPageSize(), &ret));
628 return in_core == (ret & 1);
629 }
630
631 #define CHECK_PAGE_IN_CORE(ptr, in_core) \
632 EXPECT_TRUE(CheckPageInCore(ptr, in_core))
633 #else
634 #define CHECK_PAGE_IN_CORE(ptr, in_core) (void)(0)
635 #endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
636
637 class MockPartitionStatsDumper : public PartitionStatsDumper {
638 public:
639 MockPartitionStatsDumper() = default;
640
PartitionDumpTotals(const char * partition_name,const PartitionMemoryStats * stats)641 void PartitionDumpTotals(const char* partition_name,
642 const PartitionMemoryStats* stats) override {
643 EXPECT_GE(stats->total_mmapped_bytes, stats->total_resident_bytes);
644 EXPECT_EQ(total_resident_bytes, stats->total_resident_bytes);
645 EXPECT_EQ(total_active_bytes, stats->total_active_bytes);
646 EXPECT_EQ(total_decommittable_bytes, stats->total_decommittable_bytes);
647 EXPECT_EQ(total_discardable_bytes, stats->total_discardable_bytes);
648 }
649
PartitionsDumpBucketStats(const char * partition_name,const PartitionBucketMemoryStats * stats)650 void PartitionsDumpBucketStats(
651 [[maybe_unused]] const char* partition_name,
652 const PartitionBucketMemoryStats* stats) override {
653 EXPECT_TRUE(stats->is_valid);
654 EXPECT_EQ(0u, stats->bucket_slot_size & sizeof(void*));
655 bucket_stats.push_back(*stats);
656 total_resident_bytes += stats->resident_bytes;
657 total_active_bytes += stats->active_bytes;
658 total_decommittable_bytes += stats->decommittable_bytes;
659 total_discardable_bytes += stats->discardable_bytes;
660 }
661
IsMemoryAllocationRecorded()662 bool IsMemoryAllocationRecorded() {
663 return total_resident_bytes != 0 && total_active_bytes != 0;
664 }
665
GetBucketStats(size_t bucket_size)666 const PartitionBucketMemoryStats* GetBucketStats(size_t bucket_size) {
667 for (auto& stat : bucket_stats) {
668 if (stat.bucket_slot_size == bucket_size) {
669 return &stat;
670 }
671 }
672 return nullptr;
673 }
674
675 private:
676 size_t total_resident_bytes = 0;
677 size_t total_active_bytes = 0;
678 size_t total_decommittable_bytes = 0;
679 size_t total_discardable_bytes = 0;
680
681 std::vector<PartitionBucketMemoryStats> bucket_stats;
682 };
683
684 } // namespace
685
686 INSTANTIATE_TEST_SUITE_P(AlternateBucketDistribution,
687 PartitionAllocTest,
688 testing::ValuesIn(GetPartitionAllocTestParams()));
689
690 // Check that the most basic of allocate / free pairs work.
TEST_P(PartitionAllocTest,Basic)691 TEST_P(PartitionAllocTest, Basic) {
692 PartitionRoot<ThreadSafe>::Bucket* bucket =
693 &allocator.root()->buckets[test_bucket_index_];
694 auto* seed_slot_span = SlotSpan::get_sentinel_slot_span();
695
696 EXPECT_FALSE(bucket->empty_slot_spans_head);
697 EXPECT_FALSE(bucket->decommitted_slot_spans_head);
698 EXPECT_EQ(seed_slot_span, bucket->active_slot_spans_head);
699 EXPECT_EQ(nullptr, bucket->active_slot_spans_head->next_slot_span);
700
701 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
702 EXPECT_TRUE(ptr);
703 EXPECT_EQ(kPointerOffset, UntagPtr(ptr) & PartitionPageOffsetMask());
704 // Check that the offset appears to include a guard page.
705 EXPECT_EQ(PartitionPageSize() +
706 partition_alloc::internal::ReservedFreeSlotBitmapSize() +
707 kPointerOffset,
708 UntagPtr(ptr) & kSuperPageOffsetMask);
709
710 allocator.root()->Free(ptr);
711 // Expect that the last active slot span gets noticed as empty but doesn't get
712 // decommitted.
713 EXPECT_TRUE(bucket->empty_slot_spans_head);
714 EXPECT_FALSE(bucket->decommitted_slot_spans_head);
715 }
716
717 // Test multiple allocations, and freelist handling.
TEST_P(PartitionAllocTest,MultiAlloc)718 TEST_P(PartitionAllocTest, MultiAlloc) {
719 void* ptr1 = allocator.root()->Alloc(kTestAllocSize, type_name);
720 void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
721 EXPECT_TRUE(ptr1);
722 EXPECT_TRUE(ptr2);
723 ptrdiff_t diff = UntagPtr(ptr2) - UntagPtr(ptr1);
724 EXPECT_EQ(static_cast<ptrdiff_t>(RealAllocSize()), diff);
725
726 // Check that we re-use the just-freed slot.
727 allocator.root()->Free(ptr2);
728 ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
729 EXPECT_TRUE(ptr2);
730 diff = UntagPtr(ptr2) - UntagPtr(ptr1);
731 EXPECT_EQ(static_cast<ptrdiff_t>(RealAllocSize()), diff);
732 allocator.root()->Free(ptr1);
733 ptr1 = allocator.root()->Alloc(kTestAllocSize, type_name);
734 EXPECT_TRUE(ptr1);
735 diff = UntagPtr(ptr2) - UntagPtr(ptr1);
736 EXPECT_EQ(static_cast<ptrdiff_t>(RealAllocSize()), diff);
737
738 void* ptr3 = allocator.root()->Alloc(kTestAllocSize, type_name);
739 EXPECT_TRUE(ptr3);
740 diff = UntagPtr(ptr3) - UntagPtr(ptr1);
741 EXPECT_EQ(static_cast<ptrdiff_t>(RealAllocSize() * 2), diff);
742
743 allocator.root()->Free(ptr1);
744 allocator.root()->Free(ptr2);
745 allocator.root()->Free(ptr3);
746 }
747
748 // Test a bucket with multiple slot spans.
TEST_P(PartitionAllocTest,MultiSlotSpans)749 TEST_P(PartitionAllocTest, MultiSlotSpans) {
750 PartitionRoot<ThreadSafe>::Bucket* bucket =
751 &allocator.root()->buckets[test_bucket_index_];
752
753 auto* slot_span = GetFullSlotSpan(kTestAllocSize);
754 FreeFullSlotSpan(allocator.root(), slot_span);
755 EXPECT_TRUE(bucket->empty_slot_spans_head);
756 EXPECT_EQ(SlotSpan::get_sentinel_slot_span(), bucket->active_slot_spans_head);
757 EXPECT_EQ(nullptr, slot_span->next_slot_span);
758 EXPECT_EQ(0u, slot_span->num_allocated_slots);
759
760 slot_span = GetFullSlotSpan(kTestAllocSize);
761 auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
762
763 EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
764 EXPECT_EQ(nullptr, slot_span2->next_slot_span);
765 EXPECT_EQ(SlotSpan::ToSlotSpanStart(slot_span) & kSuperPageBaseMask,
766 SlotSpan::ToSlotSpanStart(slot_span2) & kSuperPageBaseMask);
767
768 // Fully free the non-current slot span. This will leave us with no current
769 // active slot span because one is empty and the other is full.
770 FreeFullSlotSpan(allocator.root(), slot_span);
771 EXPECT_EQ(0u, slot_span->num_allocated_slots);
772 EXPECT_TRUE(bucket->empty_slot_spans_head);
773 EXPECT_EQ(SlotSpanMetadata<ThreadSafe>::get_sentinel_slot_span(),
774 bucket->active_slot_spans_head);
775
776 // Allocate a new slot span, it should pull from the freelist.
777 slot_span = GetFullSlotSpan(kTestAllocSize);
778 EXPECT_FALSE(bucket->empty_slot_spans_head);
779 EXPECT_EQ(slot_span, bucket->active_slot_spans_head);
780
781 FreeFullSlotSpan(allocator.root(), slot_span);
782 FreeFullSlotSpan(allocator.root(), slot_span2);
783 EXPECT_EQ(0u, slot_span->num_allocated_slots);
784 EXPECT_EQ(0u, slot_span2->num_allocated_slots);
785 EXPECT_EQ(0u, slot_span2->num_unprovisioned_slots);
786 EXPECT_TRUE(slot_span2->in_empty_cache());
787 }
788
789 // Test some finer aspects of internal slot span transitions.
TEST_P(PartitionAllocTest,SlotSpanTransitions)790 TEST_P(PartitionAllocTest, SlotSpanTransitions) {
791 PartitionRoot<ThreadSafe>::Bucket* bucket =
792 &allocator.root()->buckets[test_bucket_index_];
793
794 auto* slot_span1 = GetFullSlotSpan(kTestAllocSize);
795 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
796 EXPECT_EQ(nullptr, slot_span1->next_slot_span);
797 auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
798 EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
799 EXPECT_EQ(nullptr, slot_span2->next_slot_span);
800
801 // Bounce slot_span1 back into the non-full list then fill it up again.
802 void* ptr = allocator.root()->SlotStartToObject(
803 SlotSpan::ToSlotSpanStart(slot_span1));
804 allocator.root()->Free(ptr);
805 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
806 std::ignore = allocator.root()->Alloc(kTestAllocSize, type_name);
807 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
808 EXPECT_EQ(slot_span2, bucket->active_slot_spans_head->next_slot_span);
809
810 // Allocating another slot span at this point should cause us to scan over
811 // slot_span1 (which is both full and NOT our current slot span), and evict it
812 // from the freelist. Older code had a O(n^2) condition due to failure to do
813 // this.
814 auto* slot_span3 = GetFullSlotSpan(kTestAllocSize);
815 EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
816 EXPECT_EQ(nullptr, slot_span3->next_slot_span);
817
818 // Work out a pointer into slot_span2 and free it.
819 ptr = allocator.root()->SlotStartToObject(
820 SlotSpan::ToSlotSpanStart(slot_span2));
821 allocator.root()->Free(ptr);
822 // Trying to allocate at this time should cause us to cycle around to
823 // slot_span2 and find the recently freed slot.
824 void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
825 PA_EXPECT_PTR_EQ(ptr, ptr2);
826 EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
827 EXPECT_EQ(slot_span3, slot_span2->next_slot_span);
828
829 // Work out a pointer into slot_span1 and free it. This should pull the slot
830 // span back into the list of available slot spans.
831 ptr = allocator.root()->SlotStartToObject(
832 SlotSpan::ToSlotSpanStart(slot_span1));
833 allocator.root()->Free(ptr);
834 // This allocation should be satisfied by slot_span1.
835 ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
836 PA_EXPECT_PTR_EQ(ptr, ptr2);
837 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
838 EXPECT_EQ(slot_span2, slot_span1->next_slot_span);
839
840 FreeFullSlotSpan(allocator.root(), slot_span3);
841 FreeFullSlotSpan(allocator.root(), slot_span2);
842 FreeFullSlotSpan(allocator.root(), slot_span1);
843
844 // Allocating whilst in this state exposed a bug, so keep the test.
845 ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
846 allocator.root()->Free(ptr);
847 }
848
TEST_P(PartitionAllocTest,PreferSlotSpansWithProvisionedEntries)849 TEST_P(PartitionAllocTest, PreferSlotSpansWithProvisionedEntries) {
850 size_t size = SystemPageSize() - ExtraAllocSize(allocator);
851 size_t real_size = size + ExtraAllocSize(allocator);
852 size_t bucket_index =
853 allocator.root()->SizeToBucketIndex(real_size, GetBucketDistribution());
854 PartitionRoot<ThreadSafe>::Bucket* bucket =
855 &allocator.root()->buckets[bucket_index];
856 ASSERT_EQ(bucket->slot_size, real_size);
857 size_t slots_per_span = bucket->num_system_pages_per_slot_span;
858
859 // Make 10 full slot spans.
860 constexpr int kSpans = 10;
861 std::vector<std::vector<void*>> allocated_memory_spans(kSpans);
862 for (int span_index = 0; span_index < kSpans; span_index++) {
863 for (size_t i = 0; i < slots_per_span; i++) {
864 allocated_memory_spans[span_index].push_back(
865 allocator.root()->Alloc(size, ""));
866 }
867 }
868
869 // Reverse ordering, since a newly non-full span is placed at the head of the
870 // active list.
871 for (int span_index = kSpans - 1; span_index >= 0; span_index--) {
872 allocator.root()->Free(allocated_memory_spans[span_index].back());
873 allocated_memory_spans[span_index].pop_back();
874 }
875
876 // Since slot spans are large enough and we freed memory from the end, the
877 // slot spans become partially provisioned after PurgeMemory().
878 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
879 PurgeFlags::kDiscardUnusedSystemPages);
880 std::vector<SlotSpanMetadata<ThreadSafe>*> active_slot_spans;
881 for (auto* span = bucket->active_slot_spans_head; span;
882 span = span->next_slot_span) {
883 active_slot_spans.push_back(span);
884 ASSERT_EQ(span->num_unprovisioned_slots, 1u);
885 // But no freelist entries.
886 ASSERT_FALSE(span->get_freelist_head());
887 }
888
889 // Free one entry in the middle span, creating a freelist entry.
890 constexpr size_t kSpanIndex = 5;
891 allocator.root()->Free(allocated_memory_spans[kSpanIndex].back());
892 allocated_memory_spans[kSpanIndex].pop_back();
893
894 ASSERT_TRUE(active_slot_spans[kSpanIndex]->get_freelist_head());
895 ASSERT_FALSE(bucket->active_slot_spans_head->get_freelist_head());
896
897 // It must come from the middle slot span even though the first one has
898 // unprovisioned space.
899 void* new_ptr = allocator.root()->Alloc(size, "");
900
901 // Comes from the middle slot span, since it has a freelist entry.
902 auto* new_active_slot_span = active_slot_spans[kSpanIndex];
903 ASSERT_FALSE(new_active_slot_span->get_freelist_head());
904
905 // The middle slot span was moved to the front.
906 active_slot_spans.erase(active_slot_spans.begin() + kSpanIndex);
907 active_slot_spans.insert(active_slot_spans.begin(), new_active_slot_span);
908
909 // Check slot span ordering.
910 int index = 0;
911 for (auto* span = bucket->active_slot_spans_head; span;
912 span = span->next_slot_span) {
913 EXPECT_EQ(span, active_slot_spans[index]);
914 index++;
915 }
916 EXPECT_EQ(index, kSpans);
917
918 allocator.root()->Free(new_ptr);
919 for (int span_index = 0; span_index < kSpans; span_index++) {
920 for (void* ptr : allocated_memory_spans[span_index]) {
921 allocator.root()->Free(ptr);
922 }
923 }
924 }
925
926 // Test some corner cases relating to slot span transitions in the internal
927 // free slot span list metadata bucket.
TEST_P(PartitionAllocTest,FreeSlotSpanListSlotSpanTransitions)928 TEST_P(PartitionAllocTest, FreeSlotSpanListSlotSpanTransitions) {
929 PartitionRoot<ThreadSafe>::Bucket* bucket =
930 &allocator.root()->buckets[test_bucket_index_];
931
932 size_t num_to_fill_free_list_slot_span =
933 PartitionPageSize() / (sizeof(SlotSpan) + ExtraAllocSize(allocator));
934 // The +1 is because we need to account for the fact that the current slot
935 // span never gets thrown on the freelist.
936 ++num_to_fill_free_list_slot_span;
937 auto slot_spans =
938 std::make_unique<SlotSpan*[]>(num_to_fill_free_list_slot_span);
939
940 size_t i;
941 for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
942 slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
943 }
944 EXPECT_EQ(slot_spans[num_to_fill_free_list_slot_span - 1],
945 bucket->active_slot_spans_head);
946 for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
947 FreeFullSlotSpan(allocator.root(), slot_spans[i]);
948 }
949 EXPECT_EQ(SlotSpan::get_sentinel_slot_span(), bucket->active_slot_spans_head);
950 EXPECT_TRUE(bucket->empty_slot_spans_head);
951
952 // Allocate / free in a different bucket size so we get control of a
953 // different free slot span list. We need two slot spans because one will be
954 // the last active slot span and not get freed.
955 auto* slot_span1 = GetFullSlotSpan(kTestAllocSize * 2);
956 auto* slot_span2 = GetFullSlotSpan(kTestAllocSize * 2);
957 FreeFullSlotSpan(allocator.root(), slot_span1);
958 FreeFullSlotSpan(allocator.root(), slot_span2);
959
960 for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
961 slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
962 }
963 EXPECT_EQ(slot_spans[num_to_fill_free_list_slot_span - 1],
964 bucket->active_slot_spans_head);
965
966 for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
967 FreeFullSlotSpan(allocator.root(), slot_spans[i]);
968 }
969 EXPECT_EQ(SlotSpan::get_sentinel_slot_span(), bucket->active_slot_spans_head);
970 EXPECT_TRUE(bucket->empty_slot_spans_head);
971 }
972
973 // Test a large series of allocations that cross more than one underlying
974 // super page.
TEST_P(PartitionAllocTest,MultiPageAllocs)975 TEST_P(PartitionAllocTest, MultiPageAllocs) {
976 size_t num_pages_per_slot_span = GetNumPagesPerSlotSpan(kTestAllocSize);
977 // 1 super page has 2 guard partition pages and a tag bitmap.
978 size_t num_slot_spans_needed =
979 (NumPartitionPagesPerSuperPage() - 2 -
980 partition_alloc::internal::NumPartitionPagesPerFreeSlotBitmap()) /
981 num_pages_per_slot_span;
982
983 // We need one more slot span in order to cross super page boundary.
984 ++num_slot_spans_needed;
985
986 EXPECT_GT(num_slot_spans_needed, 1u);
987 auto slot_spans = std::make_unique<SlotSpan*[]>(num_slot_spans_needed);
988 uintptr_t first_super_page_base = 0;
989 size_t i;
990 for (i = 0; i < num_slot_spans_needed; ++i) {
991 slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
992 uintptr_t slot_span_start = SlotSpan::ToSlotSpanStart(slot_spans[i]);
993 if (!i) {
994 first_super_page_base = slot_span_start & kSuperPageBaseMask;
995 }
996 if (i == num_slot_spans_needed - 1) {
997 uintptr_t second_super_page_base = slot_span_start & kSuperPageBaseMask;
998 uintptr_t second_super_page_offset =
999 slot_span_start & kSuperPageOffsetMask;
1000 EXPECT_FALSE(second_super_page_base == first_super_page_base);
1001 // Check that we allocated a guard page and the reserved tag bitmap for
1002 // the second page.
1003 EXPECT_EQ(PartitionPageSize() +
1004 partition_alloc::internal::ReservedFreeSlotBitmapSize(),
1005 second_super_page_offset);
1006 }
1007 }
1008 for (i = 0; i < num_slot_spans_needed; ++i) {
1009 FreeFullSlotSpan(allocator.root(), slot_spans[i]);
1010 }
1011 }
1012
1013 // Test the generic allocation functions that can handle arbitrary sizes and
1014 // reallocing etc.
TEST_P(PartitionAllocTest,Alloc)1015 TEST_P(PartitionAllocTest, Alloc) {
1016 void* ptr = allocator.root()->Alloc(1, type_name);
1017 EXPECT_TRUE(ptr);
1018 allocator.root()->Free(ptr);
1019 ptr = allocator.root()->Alloc(kMaxBucketed + 1, type_name);
1020 EXPECT_TRUE(ptr);
1021 allocator.root()->Free(ptr);
1022
1023 // To make both alloc(x + 1) and alloc(x + kSmallestBucket) to allocate from
1024 // the same bucket, partition_alloc::internal::base::bits::AlignUp(1 + x +
1025 // ExtraAllocSize(allocator), kAlignment)
1026 // == partition_alloc::internal::base::bits::AlignUp(kSmallestBucket + x +
1027 // ExtraAllocSize(allocator), kAlignment), because slot_size is multiples of
1028 // kAlignment. So (x + ExtraAllocSize(allocator)) must be multiples of
1029 // kAlignment. x =
1030 // partition_alloc::internal::base::bits::AlignUp(ExtraAllocSize(allocator),
1031 // kAlignment) - ExtraAllocSize(allocator);
1032 size_t base_size = partition_alloc::internal::base::bits::AlignUp(
1033 ExtraAllocSize(allocator), kAlignment) -
1034 ExtraAllocSize(allocator);
1035 ptr = allocator.root()->Alloc(base_size + 1, type_name);
1036 EXPECT_TRUE(ptr);
1037 void* orig_ptr = ptr;
1038 char* char_ptr = static_cast<char*>(ptr);
1039 *char_ptr = 'A';
1040
1041 // Change the size of the realloc, remaining inside the same bucket.
1042 void* new_ptr = allocator.root()->Realloc(ptr, base_size + 2, type_name);
1043 PA_EXPECT_PTR_EQ(ptr, new_ptr);
1044 new_ptr = allocator.root()->Realloc(ptr, base_size + 1, type_name);
1045 PA_EXPECT_PTR_EQ(ptr, new_ptr);
1046 new_ptr =
1047 allocator.root()->Realloc(ptr, base_size + kSmallestBucket, type_name);
1048 PA_EXPECT_PTR_EQ(ptr, new_ptr);
1049
1050 // Change the size of the realloc, switching buckets.
1051 new_ptr = allocator.root()->Realloc(ptr, base_size + kSmallestBucket + 1,
1052 type_name);
1053 PA_EXPECT_PTR_NE(new_ptr, ptr);
1054 // Check that the realloc copied correctly.
1055 char* new_char_ptr = static_cast<char*>(new_ptr);
1056 EXPECT_EQ(*new_char_ptr, 'A');
1057 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
1058 // Subtle: this checks for an old bug where we copied too much from the
1059 // source of the realloc. The condition can be detected by a trashing of
1060 // the uninitialized value in the space of the upsized allocation.
1061 EXPECT_EQ(kUninitializedByte,
1062 static_cast<unsigned char>(*(new_char_ptr + kSmallestBucket)));
1063 #endif
1064 *new_char_ptr = 'B';
1065 // The realloc moved. To check that the old allocation was freed, we can
1066 // do an alloc of the old allocation size and check that the old allocation
1067 // address is at the head of the freelist and reused.
1068 void* reused_ptr = allocator.root()->Alloc(base_size + 1, type_name);
1069 PA_EXPECT_PTR_EQ(reused_ptr, orig_ptr);
1070 allocator.root()->Free(reused_ptr);
1071
1072 // Downsize the realloc.
1073 ptr = new_ptr;
1074 new_ptr = allocator.root()->Realloc(ptr, base_size + 1, type_name);
1075 PA_EXPECT_PTR_EQ(new_ptr, orig_ptr);
1076 new_char_ptr = static_cast<char*>(new_ptr);
1077 EXPECT_EQ(*new_char_ptr, 'B');
1078 *new_char_ptr = 'C';
1079
1080 // Upsize the realloc to outside the partition.
1081 ptr = new_ptr;
1082 new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed + 1, type_name);
1083 PA_EXPECT_PTR_NE(new_ptr, ptr);
1084 new_char_ptr = static_cast<char*>(new_ptr);
1085 EXPECT_EQ(*new_char_ptr, 'C');
1086 *new_char_ptr = 'D';
1087
1088 // Upsize and downsize the realloc, remaining outside the partition.
1089 ptr = new_ptr;
1090 new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed * 10, type_name);
1091 new_char_ptr = static_cast<char*>(new_ptr);
1092 EXPECT_EQ(*new_char_ptr, 'D');
1093 *new_char_ptr = 'E';
1094 ptr = new_ptr;
1095 new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed * 2, type_name);
1096 new_char_ptr = static_cast<char*>(new_ptr);
1097 EXPECT_EQ(*new_char_ptr, 'E');
1098 *new_char_ptr = 'F';
1099
1100 // Downsize the realloc to inside the partition.
1101 ptr = new_ptr;
1102 new_ptr = allocator.root()->Realloc(ptr, base_size + 1, type_name);
1103 PA_EXPECT_PTR_NE(new_ptr, ptr);
1104 PA_EXPECT_PTR_EQ(new_ptr, orig_ptr);
1105 new_char_ptr = static_cast<char*>(new_ptr);
1106 EXPECT_EQ(*new_char_ptr, 'F');
1107
1108 allocator.root()->Free(new_ptr);
1109 }
1110
1111 // Test the generic allocation functions can handle some specific sizes of
1112 // interest.
TEST_P(PartitionAllocTest,AllocSizes)1113 TEST_P(PartitionAllocTest, AllocSizes) {
1114 {
1115 void* ptr = allocator.root()->Alloc(0, type_name);
1116 EXPECT_TRUE(ptr);
1117 allocator.root()->Free(ptr);
1118 }
1119
1120 {
1121 // PartitionPageSize() is interesting because it results in just one
1122 // allocation per page, which tripped up some corner cases.
1123 const size_t size = PartitionPageSize() - ExtraAllocSize(allocator);
1124 void* ptr = allocator.root()->Alloc(size, type_name);
1125 EXPECT_TRUE(ptr);
1126 void* ptr2 = allocator.root()->Alloc(size, type_name);
1127 EXPECT_TRUE(ptr2);
1128 allocator.root()->Free(ptr);
1129 // Should be freeable at this point.
1130 auto* slot_span =
1131 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1132 EXPECT_TRUE(slot_span->in_empty_cache());
1133 allocator.root()->Free(ptr2);
1134 }
1135
1136 {
1137 // Single-slot slot span size.
1138 const size_t size =
1139 PartitionPageSize() * kMaxPartitionPagesPerRegularSlotSpan + 1;
1140
1141 void* ptr = allocator.root()->Alloc(size, type_name);
1142 EXPECT_TRUE(ptr);
1143 memset(ptr, 'A', size);
1144 void* ptr2 = allocator.root()->Alloc(size, type_name);
1145 EXPECT_TRUE(ptr2);
1146 void* ptr3 = allocator.root()->Alloc(size, type_name);
1147 EXPECT_TRUE(ptr3);
1148 void* ptr4 = allocator.root()->Alloc(size, type_name);
1149 EXPECT_TRUE(ptr4);
1150
1151 auto* slot_span = SlotSpanMetadata<internal::ThreadSafe>::FromSlotStart(
1152 allocator.root()->ObjectToSlotStart(ptr));
1153 auto* slot_span2 =
1154 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr3));
1155 EXPECT_NE(slot_span, slot_span2);
1156
1157 allocator.root()->Free(ptr);
1158 allocator.root()->Free(ptr3);
1159 allocator.root()->Free(ptr2);
1160 // Should be freeable at this point.
1161 EXPECT_TRUE(slot_span->in_empty_cache());
1162 EXPECT_EQ(0u, slot_span->num_allocated_slots);
1163 EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
1164 void* new_ptr_1 = allocator.root()->Alloc(size, type_name);
1165 PA_EXPECT_PTR_EQ(ptr2, new_ptr_1);
1166 void* new_ptr_2 = allocator.root()->Alloc(size, type_name);
1167 PA_EXPECT_PTR_EQ(ptr3, new_ptr_2);
1168
1169 allocator.root()->Free(new_ptr_1);
1170 allocator.root()->Free(new_ptr_2);
1171 allocator.root()->Free(ptr4);
1172
1173 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
1174 // |SlotSpanMetadata::Free| must poison the slot's contents with
1175 // |kFreedByte|.
1176 EXPECT_EQ(kFreedByte,
1177 *(static_cast<unsigned char*>(new_ptr_1) + (size - 1)));
1178 #endif
1179 }
1180
1181 // Can we allocate a massive (128MB) size?
1182 // Add +1, to test for cookie writing alignment issues.
1183 // Test this only if the device has enough memory or it might fail due
1184 // to OOM.
1185 if (IsLargeMemoryDevice()) {
1186 void* ptr = allocator.root()->Alloc(128 * 1024 * 1024 + 1, type_name);
1187 allocator.root()->Free(ptr);
1188 }
1189
1190 {
1191 // Check a more reasonable, but still direct mapped, size.
1192 // Chop a system page and a byte off to test for rounding errors.
1193 size_t size = 20 * 1024 * 1024;
1194 ASSERT_GT(size, kMaxBucketed);
1195 size -= SystemPageSize();
1196 size -= 1;
1197 void* ptr = allocator.root()->Alloc(size, type_name);
1198 char* char_ptr = static_cast<char*>(ptr);
1199 *(char_ptr + (size - 1)) = 'A';
1200 allocator.root()->Free(ptr);
1201
1202 // Can we free null?
1203 allocator.root()->Free(nullptr);
1204
1205 // Do we correctly get a null for a failed allocation?
1206 EXPECT_EQ(nullptr,
1207 allocator.root()->AllocWithFlags(
1208 AllocFlags::kReturnNull, 3u * 1024 * 1024 * 1024, type_name));
1209 }
1210 }
1211
1212 // Test that we can fetch the real allocated size after an allocation.
TEST_P(PartitionAllocTest,AllocGetSizeAndStart)1213 TEST_P(PartitionAllocTest, AllocGetSizeAndStart) {
1214 void* ptr;
1215 size_t requested_size, actual_capacity, predicted_capacity;
1216
1217 // Allocate something small.
1218 requested_size = 511 - ExtraAllocSize(allocator);
1219 predicted_capacity =
1220 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1221 ptr = allocator.root()->Alloc(requested_size, type_name);
1222 EXPECT_TRUE(ptr);
1223 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
1224 actual_capacity =
1225 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1226 EXPECT_EQ(predicted_capacity, actual_capacity);
1227 EXPECT_LT(requested_size, actual_capacity);
1228 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1229 if (UseBRPPool()) {
1230 uintptr_t address = UntagPtr(ptr);
1231 for (size_t offset = 0; offset < requested_size; ++offset) {
1232 EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
1233 slot_start);
1234 }
1235 }
1236 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1237 allocator.root()->Free(ptr);
1238
1239 // Allocate a size that should be a perfect match for a bucket, because it
1240 // is an exact power of 2.
1241 requested_size = (256 * 1024) - ExtraAllocSize(allocator);
1242 predicted_capacity =
1243 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1244 ptr = allocator.root()->Alloc(requested_size, type_name);
1245 EXPECT_TRUE(ptr);
1246 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1247 actual_capacity =
1248 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1249 EXPECT_EQ(predicted_capacity, actual_capacity);
1250 EXPECT_EQ(requested_size, actual_capacity);
1251 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1252 if (UseBRPPool()) {
1253 uintptr_t address = UntagPtr(ptr);
1254 for (size_t offset = 0; offset < requested_size; offset += 877) {
1255 EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
1256 slot_start);
1257 }
1258 }
1259 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1260 allocator.root()->Free(ptr);
1261
1262 // Allocate a size that is a system page smaller than a bucket.
1263 // AllocationCapacityFromSlotStart() should return a larger size than we asked
1264 // for now.
1265 size_t num = 64;
1266 while (num * SystemPageSize() >= 1024 * 1024) {
1267 num /= 2;
1268 }
1269 requested_size =
1270 num * SystemPageSize() - SystemPageSize() - ExtraAllocSize(allocator);
1271 predicted_capacity =
1272 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1273 ptr = allocator.root()->Alloc(requested_size, type_name);
1274 EXPECT_TRUE(ptr);
1275 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1276 actual_capacity =
1277 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1278 EXPECT_EQ(predicted_capacity, actual_capacity);
1279 EXPECT_EQ(requested_size + SystemPageSize(), actual_capacity);
1280 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1281 if (UseBRPPool()) {
1282 uintptr_t address = UntagPtr(ptr);
1283 for (size_t offset = 0; offset < requested_size; offset += 4999) {
1284 EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
1285 slot_start);
1286 }
1287 }
1288 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1289 allocator.root()->Free(ptr);
1290
1291 // Allocate the maximum allowed bucketed size.
1292 requested_size = kMaxBucketed - ExtraAllocSize(allocator);
1293 predicted_capacity =
1294 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1295 ptr = allocator.root()->Alloc(requested_size, type_name);
1296 EXPECT_TRUE(ptr);
1297 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1298 actual_capacity =
1299 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1300 EXPECT_EQ(predicted_capacity, actual_capacity);
1301 EXPECT_EQ(requested_size, actual_capacity);
1302 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1303 if (UseBRPPool()) {
1304 uintptr_t address = UntagPtr(ptr);
1305 for (size_t offset = 0; offset < requested_size; offset += 4999) {
1306 EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
1307 slot_start);
1308 }
1309 }
1310 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1311
1312 // Check that we can write at the end of the reported size too.
1313 char* char_ptr = static_cast<char*>(ptr);
1314 *(char_ptr + (actual_capacity - 1)) = 'A';
1315 allocator.root()->Free(ptr);
1316
1317 // Allocate something very large, and uneven.
1318 if (IsLargeMemoryDevice()) {
1319 requested_size = 128 * 1024 * 1024 - 33;
1320 predicted_capacity =
1321 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1322 ptr = allocator.root()->Alloc(requested_size, type_name);
1323 EXPECT_TRUE(ptr);
1324 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1325 actual_capacity =
1326 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1327 EXPECT_EQ(predicted_capacity, actual_capacity);
1328
1329 EXPECT_LT(requested_size, actual_capacity);
1330
1331 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1332 if (UseBRPPool()) {
1333 uintptr_t address = UntagPtr(ptr);
1334 for (size_t offset = 0; offset < requested_size; offset += 16111) {
1335 EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
1336 slot_start);
1337 }
1338 }
1339 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1340 allocator.root()->Free(ptr);
1341 }
1342
1343 // Too large allocation.
1344 requested_size = MaxDirectMapped() + 1;
1345 predicted_capacity =
1346 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1347 EXPECT_EQ(requested_size, predicted_capacity);
1348 }
1349
1350 #if PA_CONFIG(HAS_MEMORY_TAGGING)
TEST_P(PartitionAllocTest,MTEProtectsFreedPtr)1351 TEST_P(PartitionAllocTest, MTEProtectsFreedPtr) {
1352 // This test checks that Arm's memory tagging extension (MTE) is correctly
1353 // protecting freed pointers.
1354 base::CPU cpu;
1355 if (!cpu.has_mte()) {
1356 // This test won't pass without MTE support.
1357 GTEST_SKIP();
1358 }
1359
1360 // Create an arbitrarily-sized small allocation.
1361 size_t alloc_size = 64 - ExtraAllocSize(allocator);
1362 uint64_t* ptr1 =
1363 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
1364 EXPECT_TRUE(ptr1);
1365
1366 // Invalidate the pointer by freeing it.
1367 allocator.root()->Free(ptr1);
1368
1369 // When we immediately reallocate a pointer, we should see the same allocation
1370 // slot but with a different tag (PA_EXPECT_PTR_EQ ignores the MTE tag).
1371 uint64_t* ptr2 =
1372 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
1373 PA_EXPECT_PTR_EQ(ptr1, ptr2);
1374 // The different tag bits mean that ptr1 is not the same as ptr2.
1375 EXPECT_NE(ptr1, ptr2);
1376
1377 // When we free again, we expect a new tag for that area that's different from
1378 // ptr1 and ptr2.
1379 allocator.root()->Free(ptr2);
1380 uint64_t* ptr3 =
1381 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
1382 PA_EXPECT_PTR_EQ(ptr2, ptr3);
1383 EXPECT_NE(ptr1, ptr3);
1384 EXPECT_NE(ptr2, ptr3);
1385
1386 // We don't check anything about ptr3, but we do clean it up to avoid DCHECKs.
1387 allocator.root()->Free(ptr3);
1388 }
1389 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
1390
1391 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
TEST_P(PartitionAllocTest,IsPtrWithinSameAlloc)1392 TEST_P(PartitionAllocTest, IsPtrWithinSameAlloc) {
1393 if (!UseBRPPool()) {
1394 return;
1395 }
1396
1397 const size_t kMinReasonableTestSize =
1398 partition_alloc::internal::base::bits::AlignUp(
1399 ExtraAllocSize(allocator) + 1, kAlignment);
1400 ASSERT_GT(kMinReasonableTestSize, ExtraAllocSize(allocator));
1401 const size_t kSizes[] = {kMinReasonableTestSize,
1402 256,
1403 SystemPageSize(),
1404 PartitionPageSize(),
1405 MaxRegularSlotSpanSize(),
1406 MaxRegularSlotSpanSize() + 1,
1407 MaxRegularSlotSpanSize() + SystemPageSize(),
1408 MaxRegularSlotSpanSize() + PartitionPageSize(),
1409 kMaxBucketed,
1410 kMaxBucketed + 1,
1411 kMaxBucketed + SystemPageSize(),
1412 kMaxBucketed + PartitionPageSize(),
1413 kSuperPageSize};
1414 #if BUILDFLAG(HAS_64_BIT_POINTERS)
1415 constexpr size_t kFarFarAwayDelta = 512 * kGiB;
1416 #else
1417 constexpr size_t kFarFarAwayDelta = kGiB;
1418 #endif
1419 for (size_t size : kSizes) {
1420 size_t requested_size = size - ExtraAllocSize(allocator);
1421 // For regular slot-span allocations, confirm the size fills the entire
1422 // slot. Otherwise the test would be ineffective, as Partition Alloc has no
1423 // ability to check against the actual allocated size.
1424 // Single-slot slot-spans and direct map don't have that problem.
1425 if (size <= MaxRegularSlotSpanSize()) {
1426 ASSERT_EQ(requested_size,
1427 allocator.root()->AllocationCapacityFromRequestedSize(
1428 requested_size));
1429 }
1430
1431 constexpr size_t kNumRepeats = 3;
1432 void* ptrs[kNumRepeats];
1433 for (void*& ptr : ptrs) {
1434 ptr = allocator.root()->Alloc(requested_size, type_name);
1435 // Double check.
1436 if (size <= MaxRegularSlotSpanSize()) {
1437 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
1438 EXPECT_EQ(
1439 requested_size,
1440 allocator.root()->AllocationCapacityFromSlotStart(slot_start));
1441 }
1442
1443 uintptr_t address = UntagPtr(ptr);
1444 EXPECT_EQ(IsPtrWithinSameAlloc(address, address - kFarFarAwayDelta, 0u),
1445 PtrPosWithinAlloc::kFarOOB);
1446 EXPECT_EQ(IsPtrWithinSameAlloc(address, address - kSuperPageSize, 0u),
1447 PtrPosWithinAlloc::kFarOOB);
1448 EXPECT_EQ(IsPtrWithinSameAlloc(address, address - 1, 0u),
1449 PtrPosWithinAlloc::kFarOOB);
1450 EXPECT_EQ(IsPtrWithinSameAlloc(address, address, 0u),
1451 PtrPosWithinAlloc::kInBounds);
1452 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size / 2, 0u),
1453 PtrPosWithinAlloc::kInBounds);
1454 #if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
1455 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size - 1, 1u),
1456 PtrPosWithinAlloc::kInBounds);
1457 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size, 1u),
1458 PtrPosWithinAlloc::kAllocEnd);
1459 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size - 4, 4u),
1460 PtrPosWithinAlloc::kInBounds);
1461 for (size_t subtrahend = 0; subtrahend < 4; subtrahend++) {
1462 EXPECT_EQ(IsPtrWithinSameAlloc(
1463 address, address + requested_size - subtrahend, 4u),
1464 PtrPosWithinAlloc::kAllocEnd);
1465 }
1466 #else // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
1467 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size, 0u),
1468 PtrPosWithinAlloc::kInBounds);
1469 #endif
1470 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size + 1, 0u),
1471 PtrPosWithinAlloc::kFarOOB);
1472 EXPECT_EQ(IsPtrWithinSameAlloc(
1473 address, address + requested_size + kSuperPageSize, 0u),
1474 PtrPosWithinAlloc::kFarOOB);
1475 EXPECT_EQ(IsPtrWithinSameAlloc(
1476 address, address + requested_size + kFarFarAwayDelta, 0u),
1477 PtrPosWithinAlloc::kFarOOB);
1478 EXPECT_EQ(
1479 IsPtrWithinSameAlloc(address + requested_size,
1480 address + requested_size + kFarFarAwayDelta, 0u),
1481 PtrPosWithinAlloc::kFarOOB);
1482 EXPECT_EQ(
1483 IsPtrWithinSameAlloc(address + requested_size,
1484 address + requested_size + kSuperPageSize, 0u),
1485 PtrPosWithinAlloc::kFarOOB);
1486 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
1487 address + requested_size + 1, 0u),
1488 PtrPosWithinAlloc::kFarOOB);
1489 #if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
1490 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size - 1,
1491 address + requested_size - 1, 1u),
1492 PtrPosWithinAlloc::kInBounds);
1493 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size - 1,
1494 address + requested_size, 1u),
1495 PtrPosWithinAlloc::kAllocEnd);
1496 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
1497 address + requested_size, 1u),
1498 PtrPosWithinAlloc::kAllocEnd);
1499 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size - 4,
1500 address + requested_size - 4, 4u),
1501 PtrPosWithinAlloc::kInBounds);
1502 for (size_t addend = 1; addend < 4; addend++) {
1503 EXPECT_EQ(
1504 IsPtrWithinSameAlloc(address + requested_size - 4,
1505 address + requested_size - 4 + addend, 4u),
1506 PtrPosWithinAlloc::kAllocEnd);
1507 }
1508 #else // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
1509 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
1510 address + requested_size, 0u),
1511 PtrPosWithinAlloc::kInBounds);
1512 #endif
1513 EXPECT_EQ(IsPtrWithinSameAlloc(
1514 address + requested_size,
1515 address + requested_size - (requested_size / 2), 0u),
1516 PtrPosWithinAlloc::kInBounds);
1517 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size, address, 0u),
1518 PtrPosWithinAlloc::kInBounds);
1519 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size, address - 1, 0u),
1520 PtrPosWithinAlloc::kFarOOB);
1521 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
1522 address - kSuperPageSize, 0u),
1523 PtrPosWithinAlloc::kFarOOB);
1524 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
1525 address - kFarFarAwayDelta, 0u),
1526 PtrPosWithinAlloc::kFarOOB);
1527 }
1528
1529 for (void* ptr : ptrs) {
1530 allocator.root()->Free(ptr);
1531 }
1532 }
1533 }
1534
TEST_P(PartitionAllocTest,GetSlotStartMultiplePages)1535 TEST_P(PartitionAllocTest, GetSlotStartMultiplePages) {
1536 if (!UseBRPPool()) {
1537 return;
1538 }
1539
1540 auto* root = allocator.root();
1541 // Find the smallest bucket with multiple PartitionPages. When searching for
1542 // a bucket here, we need to check two conditions:
1543 // (1) The bucket is used in our current bucket distribution.
1544 // (2) The bucket is large enough that our requested size (see below) will be
1545 // non-zero.
1546 size_t real_size = 0;
1547 for (const auto& bucket : root->buckets) {
1548 if ((root->buckets + SizeToIndex(bucket.slot_size))->slot_size !=
1549 bucket.slot_size) {
1550 continue;
1551 }
1552 if (bucket.slot_size <= ExtraAllocSize(allocator)) {
1553 continue;
1554 }
1555 if (bucket.num_system_pages_per_slot_span >
1556 NumSystemPagesPerPartitionPage()) {
1557 real_size = bucket.slot_size;
1558 break;
1559 }
1560 }
1561
1562 // Make sure that we've managed to find an appropriate bucket.
1563 ASSERT_GT(real_size, 0u);
1564
1565 const size_t requested_size = real_size - ExtraAllocSize(allocator);
1566 // Double check we don't end up with 0 or negative size.
1567 EXPECT_GT(requested_size, 0u);
1568 EXPECT_LE(requested_size, real_size);
1569 const auto* bucket = allocator.root()->buckets + SizeToIndex(real_size);
1570 EXPECT_EQ(bucket->slot_size, real_size);
1571 // Make sure the test is testing multiple partition pages case.
1572 EXPECT_GT(bucket->num_system_pages_per_slot_span,
1573 PartitionPageSize() / SystemPageSize());
1574 size_t num_slots =
1575 (bucket->num_system_pages_per_slot_span * SystemPageSize()) / real_size;
1576 std::vector<void*> ptrs;
1577 for (size_t i = 0; i < num_slots; ++i) {
1578 ptrs.push_back(allocator.root()->Alloc(requested_size, type_name));
1579 }
1580 for (void* ptr : ptrs) {
1581 uintptr_t address = UntagPtr(ptr);
1582 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
1583 EXPECT_EQ(allocator.root()->AllocationCapacityFromSlotStart(slot_start),
1584 requested_size);
1585 for (size_t offset = 0; offset < requested_size; offset += 13) {
1586 EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
1587 slot_start);
1588 }
1589 allocator.root()->Free(ptr);
1590 }
1591 }
1592 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1593
1594 // Test the realloc() contract.
TEST_P(PartitionAllocTest,Realloc)1595 TEST_P(PartitionAllocTest, Realloc) {
1596 // realloc(0, size) should be equivalent to malloc().
1597 void* ptr = allocator.root()->Realloc(nullptr, kTestAllocSize, type_name);
1598 memset(ptr, 'A', kTestAllocSize);
1599 auto* slot_span =
1600 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1601 // realloc(ptr, 0) should be equivalent to free().
1602 void* ptr2 = allocator.root()->Realloc(ptr, 0, type_name);
1603 EXPECT_EQ(nullptr, ptr2);
1604 EXPECT_EQ(allocator.root()->ObjectToSlotStart(ptr),
1605 UntagPtr(slot_span->get_freelist_head()));
1606
1607 // Test that growing an allocation with realloc() copies everything from the
1608 // old allocation.
1609 size_t size = SystemPageSize() - ExtraAllocSize(allocator);
1610 // Confirm size fills the entire slot.
1611 ASSERT_EQ(size, allocator.root()->AllocationCapacityFromRequestedSize(size));
1612 ptr = allocator.root()->Alloc(size, type_name);
1613 memset(ptr, 'A', size);
1614 ptr2 = allocator.root()->Realloc(ptr, size + 1, type_name);
1615 PA_EXPECT_PTR_NE(ptr, ptr2);
1616 char* char_ptr2 = static_cast<char*>(ptr2);
1617 EXPECT_EQ('A', char_ptr2[0]);
1618 EXPECT_EQ('A', char_ptr2[size - 1]);
1619 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
1620 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr2[size]));
1621 #endif
1622
1623 // Test that shrinking an allocation with realloc() also copies everything
1624 // from the old allocation. Use |size - 1| to test what happens to the extra
1625 // space before the cookie.
1626 ptr = allocator.root()->Realloc(ptr2, size - 1, type_name);
1627 PA_EXPECT_PTR_NE(ptr2, ptr);
1628 char* char_ptr = static_cast<char*>(ptr);
1629 EXPECT_EQ('A', char_ptr[0]);
1630 EXPECT_EQ('A', char_ptr[size - 2]);
1631 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
1632 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr[size - 1]));
1633 #endif
1634
1635 allocator.root()->Free(ptr);
1636
1637 // Single-slot slot spans...
1638 // Test that growing an allocation with realloc() copies everything from the
1639 // old allocation.
1640 size = MaxRegularSlotSpanSize() + 1;
1641 ASSERT_LE(2 * size, kMaxBucketed); // should be in single-slot span range
1642 // Confirm size doesn't fill the entire slot.
1643 ASSERT_LT(size, allocator.root()->AllocationCapacityFromRequestedSize(size));
1644 ptr = allocator.root()->Alloc(size, type_name);
1645 memset(ptr, 'A', size);
1646 ptr2 = allocator.root()->Realloc(ptr, size * 2, type_name);
1647 PA_EXPECT_PTR_NE(ptr, ptr2);
1648 char_ptr2 = static_cast<char*>(ptr2);
1649 EXPECT_EQ('A', char_ptr2[0]);
1650 EXPECT_EQ('A', char_ptr2[size - 1]);
1651 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
1652 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr2[size]));
1653 #endif
1654 allocator.root()->Free(ptr2);
1655
1656 // Test that shrinking an allocation with realloc() also copies everything
1657 // from the old allocation.
1658 size = 2 * (MaxRegularSlotSpanSize() + 1);
1659 ASSERT_GT(size / 2, MaxRegularSlotSpanSize()); // in single-slot span range
1660 ptr = allocator.root()->Alloc(size, type_name);
1661 memset(ptr, 'A', size);
1662 ptr2 = allocator.root()->Realloc(ptr2, size / 2, type_name);
1663 PA_EXPECT_PTR_NE(ptr, ptr2);
1664 char_ptr2 = static_cast<char*>(ptr2);
1665 EXPECT_EQ('A', char_ptr2[0]);
1666 EXPECT_EQ('A', char_ptr2[size / 2 - 1]);
1667 #if BUILDFLAG(PA_DCHECK_IS_ON)
1668 // For single-slot slot spans, the cookie is always placed immediately after
1669 // the allocation.
1670 EXPECT_EQ(kCookieValue[0], static_cast<unsigned char>(char_ptr2[size / 2]));
1671 #endif
1672 allocator.root()->Free(ptr2);
1673
1674 // Test that shrinking a direct mapped allocation happens in-place.
1675 // Pick a large size so that Realloc doesn't think it's worthwhile to
1676 // downsize even if one less super page is used (due to high granularity on
1677 // 64-bit systems).
1678 size = 10 * kSuperPageSize + SystemPageSize() - 42;
1679 ASSERT_GT(size - 32 * SystemPageSize(), kMaxBucketed);
1680 ptr = allocator.root()->Alloc(size, type_name);
1681 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
1682 size_t actual_capacity =
1683 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1684 ptr2 = allocator.root()->Realloc(ptr, size - SystemPageSize(), type_name);
1685 uintptr_t slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
1686 EXPECT_EQ(slot_start, slot_start2);
1687 EXPECT_EQ(actual_capacity - SystemPageSize(),
1688 allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
1689 void* ptr3 =
1690 allocator.root()->Realloc(ptr2, size - 32 * SystemPageSize(), type_name);
1691 uintptr_t slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
1692 EXPECT_EQ(slot_start2, slot_start3);
1693 EXPECT_EQ(actual_capacity - 32 * SystemPageSize(),
1694 allocator.root()->AllocationCapacityFromSlotStart(slot_start3));
1695
1696 // Test that a previously in-place shrunk direct mapped allocation can be
1697 // expanded up again up to its original size.
1698 ptr = allocator.root()->Realloc(ptr3, size, type_name);
1699 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1700 EXPECT_EQ(slot_start3, slot_start);
1701 EXPECT_EQ(actual_capacity,
1702 allocator.root()->AllocationCapacityFromSlotStart(slot_start));
1703
1704 // Test that the allocation can be expanded in place up to its capacity.
1705 ptr2 = allocator.root()->Realloc(ptr, actual_capacity, type_name);
1706 slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
1707 EXPECT_EQ(slot_start, slot_start2);
1708 EXPECT_EQ(actual_capacity,
1709 allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
1710
1711 // Test that a direct mapped allocation is performed not in-place when the
1712 // new size is small enough.
1713 ptr3 = allocator.root()->Realloc(ptr2, SystemPageSize(), type_name);
1714 slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
1715 EXPECT_NE(slot_start, slot_start3);
1716
1717 allocator.root()->Free(ptr3);
1718 }
1719
TEST_P(PartitionAllocTest,ReallocDirectMapAligned)1720 TEST_P(PartitionAllocTest, ReallocDirectMapAligned) {
1721 size_t alignments[] = {
1722 PartitionPageSize(),
1723 2 * PartitionPageSize(),
1724 kMaxSupportedAlignment / 2,
1725 kMaxSupportedAlignment,
1726 };
1727
1728 for (size_t alignment : alignments) {
1729 // Test that shrinking a direct mapped allocation happens in-place.
1730 // Pick a large size so that Realloc doesn't think it's worthwhile to
1731 // downsize even if one less super page is used (due to high granularity on
1732 // 64-bit systems), even if the alignment padding is taken out.
1733 size_t size = 10 * kSuperPageSize + SystemPageSize() - 42;
1734 ASSERT_GT(size, kMaxBucketed);
1735 void* ptr =
1736 allocator.root()->AllocWithFlagsInternal(0, size, alignment, type_name);
1737 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
1738 size_t actual_capacity =
1739 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1740 void* ptr2 =
1741 allocator.root()->Realloc(ptr, size - SystemPageSize(), type_name);
1742 uintptr_t slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
1743 EXPECT_EQ(slot_start, slot_start2);
1744 EXPECT_EQ(actual_capacity - SystemPageSize(),
1745 allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
1746 void* ptr3 = allocator.root()->Realloc(ptr2, size - 32 * SystemPageSize(),
1747 type_name);
1748 uintptr_t slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
1749 EXPECT_EQ(slot_start2, slot_start3);
1750 EXPECT_EQ(actual_capacity - 32 * SystemPageSize(),
1751 allocator.root()->AllocationCapacityFromSlotStart(slot_start3));
1752
1753 // Test that a previously in-place shrunk direct mapped allocation can be
1754 // expanded up again up to its original size.
1755 ptr = allocator.root()->Realloc(ptr3, size, type_name);
1756 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1757 EXPECT_EQ(slot_start3, slot_start);
1758 EXPECT_EQ(actual_capacity,
1759 allocator.root()->AllocationCapacityFromSlotStart(slot_start));
1760
1761 // Test that the allocation can be expanded in place up to its capacity.
1762 ptr2 = allocator.root()->Realloc(ptr, actual_capacity, type_name);
1763 slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
1764 EXPECT_EQ(slot_start, slot_start2);
1765 EXPECT_EQ(actual_capacity,
1766 allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
1767
1768 // Test that a direct mapped allocation is performed not in-place when the
1769 // new size is small enough.
1770 ptr3 = allocator.root()->Realloc(ptr2, SystemPageSize(), type_name);
1771 slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
1772 EXPECT_NE(slot_start2, slot_start3);
1773
1774 allocator.root()->Free(ptr3);
1775 }
1776 }
1777
TEST_P(PartitionAllocTest,ReallocDirectMapAlignedRelocate)1778 TEST_P(PartitionAllocTest, ReallocDirectMapAlignedRelocate) {
1779 // Pick size such that the alignment will put it cross the super page
1780 // boundary.
1781 size_t size = 2 * kSuperPageSize - kMaxSupportedAlignment + SystemPageSize();
1782 ASSERT_GT(size, kMaxBucketed);
1783 void* ptr = allocator.root()->AllocWithFlagsInternal(
1784 0, size, kMaxSupportedAlignment, type_name);
1785 // Reallocating with the same size will actually relocate, because without a
1786 // need for alignment we can downsize the reservation significantly.
1787 void* ptr2 = allocator.root()->Realloc(ptr, size, type_name);
1788 PA_EXPECT_PTR_NE(ptr, ptr2);
1789 allocator.root()->Free(ptr2);
1790
1791 // Again pick size such that the alignment will put it cross the super page
1792 // boundary, but this time make it so large that Realloc doesn't fing it worth
1793 // shrinking.
1794 size = 10 * kSuperPageSize - kMaxSupportedAlignment + SystemPageSize();
1795 ASSERT_GT(size, kMaxBucketed);
1796 ptr = allocator.root()->AllocWithFlagsInternal(
1797 0, size, kMaxSupportedAlignment, type_name);
1798 ptr2 = allocator.root()->Realloc(ptr, size, type_name);
1799 EXPECT_EQ(ptr, ptr2);
1800 allocator.root()->Free(ptr2);
1801 }
1802
1803 // Tests the handing out of freelists for partial slot spans.
TEST_P(PartitionAllocTest,PartialPageFreelists)1804 TEST_P(PartitionAllocTest, PartialPageFreelists) {
1805 size_t big_size = SystemPageSize() - ExtraAllocSize(allocator);
1806 size_t bucket_index = SizeToIndex(big_size + ExtraAllocSize(allocator));
1807 PartitionRoot<ThreadSafe>::Bucket* bucket =
1808 &allocator.root()->buckets[bucket_index];
1809 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1810
1811 void* ptr = allocator.root()->Alloc(big_size, type_name);
1812 EXPECT_TRUE(ptr);
1813
1814 auto* slot_span =
1815 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1816 size_t total_slots =
1817 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1818 (big_size + ExtraAllocSize(allocator));
1819 EXPECT_EQ(4u, total_slots);
1820 // The freelist should have one entry, because we were able to exactly fit
1821 // one object slot and one freelist pointer (the null that the head points
1822 // to) into a system page.
1823 EXPECT_FALSE(slot_span->get_freelist_head());
1824 EXPECT_EQ(1u, slot_span->num_allocated_slots);
1825 EXPECT_EQ(3u, slot_span->num_unprovisioned_slots);
1826
1827 void* ptr2 = allocator.root()->Alloc(big_size, type_name);
1828 EXPECT_TRUE(ptr2);
1829 EXPECT_FALSE(slot_span->get_freelist_head());
1830 EXPECT_EQ(2u, slot_span->num_allocated_slots);
1831 EXPECT_EQ(2u, slot_span->num_unprovisioned_slots);
1832
1833 void* ptr3 = allocator.root()->Alloc(big_size, type_name);
1834 EXPECT_TRUE(ptr3);
1835 EXPECT_FALSE(slot_span->get_freelist_head());
1836 EXPECT_EQ(3u, slot_span->num_allocated_slots);
1837 EXPECT_EQ(1u, slot_span->num_unprovisioned_slots);
1838
1839 void* ptr4 = allocator.root()->Alloc(big_size, type_name);
1840 EXPECT_TRUE(ptr4);
1841 EXPECT_FALSE(slot_span->get_freelist_head());
1842 EXPECT_EQ(4u, slot_span->num_allocated_slots);
1843 EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
1844
1845 void* ptr5 = allocator.root()->Alloc(big_size, type_name);
1846 EXPECT_TRUE(ptr5);
1847
1848 auto* slot_span2 =
1849 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr5));
1850 EXPECT_EQ(1u, slot_span2->num_allocated_slots);
1851
1852 // Churn things a little whilst there's a partial slot span freelist.
1853 allocator.root()->Free(ptr);
1854 ptr = allocator.root()->Alloc(big_size, type_name);
1855 void* ptr6 = allocator.root()->Alloc(big_size, type_name);
1856
1857 allocator.root()->Free(ptr);
1858 allocator.root()->Free(ptr2);
1859 allocator.root()->Free(ptr3);
1860 allocator.root()->Free(ptr4);
1861 allocator.root()->Free(ptr5);
1862 allocator.root()->Free(ptr6);
1863 EXPECT_TRUE(slot_span->in_empty_cache());
1864 EXPECT_TRUE(slot_span2->in_empty_cache());
1865 EXPECT_TRUE(slot_span2->get_freelist_head());
1866 EXPECT_EQ(0u, slot_span2->num_allocated_slots);
1867
1868 // Size that's just above half a page.
1869 size_t non_dividing_size =
1870 SystemPageSize() / 2 + 1 - ExtraAllocSize(allocator);
1871 bucket_index = SizeToIndex(non_dividing_size + ExtraAllocSize(allocator));
1872 bucket = &allocator.root()->buckets[bucket_index];
1873 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1874
1875 ptr = allocator.root()->Alloc(non_dividing_size, type_name);
1876 EXPECT_TRUE(ptr);
1877
1878 slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1879 total_slots =
1880 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1881 bucket->slot_size;
1882
1883 EXPECT_FALSE(slot_span->get_freelist_head());
1884 EXPECT_EQ(1u, slot_span->num_allocated_slots);
1885 EXPECT_EQ(total_slots - 1, slot_span->num_unprovisioned_slots);
1886
1887 ptr2 = allocator.root()->Alloc(non_dividing_size, type_name);
1888 EXPECT_TRUE(ptr2);
1889 EXPECT_TRUE(slot_span->get_freelist_head());
1890 EXPECT_EQ(2u, slot_span->num_allocated_slots);
1891 // 2 slots got provisioned: the first one fills the rest of the first (already
1892 // provision page) and exceeds it by just a tad, thus leading to provisioning
1893 // a new page, and the second one fully fits within that new page.
1894 EXPECT_EQ(total_slots - 3, slot_span->num_unprovisioned_slots);
1895
1896 ptr3 = allocator.root()->Alloc(non_dividing_size, type_name);
1897 EXPECT_TRUE(ptr3);
1898 EXPECT_FALSE(slot_span->get_freelist_head());
1899 EXPECT_EQ(3u, slot_span->num_allocated_slots);
1900 EXPECT_EQ(total_slots - 3, slot_span->num_unprovisioned_slots);
1901
1902 allocator.root()->Free(ptr);
1903 allocator.root()->Free(ptr2);
1904 allocator.root()->Free(ptr3);
1905 EXPECT_TRUE(slot_span->in_empty_cache());
1906 EXPECT_TRUE(slot_span2->get_freelist_head());
1907 EXPECT_EQ(0u, slot_span2->num_allocated_slots);
1908
1909 // And test a couple of sizes that do not cross SystemPageSize() with a
1910 // single allocation.
1911 size_t medium_size = (SystemPageSize() / 2) - ExtraAllocSize(allocator);
1912 bucket_index = SizeToIndex(medium_size + ExtraAllocSize(allocator));
1913 bucket = &allocator.root()->buckets[bucket_index];
1914 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1915
1916 ptr = allocator.root()->Alloc(medium_size, type_name);
1917 EXPECT_TRUE(ptr);
1918 slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1919 EXPECT_EQ(1u, slot_span->num_allocated_slots);
1920 total_slots =
1921 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1922 (medium_size + ExtraAllocSize(allocator));
1923 size_t first_slot_span_slots =
1924 SystemPageSize() / (medium_size + ExtraAllocSize(allocator));
1925 EXPECT_EQ(2u, first_slot_span_slots);
1926 EXPECT_EQ(total_slots - first_slot_span_slots,
1927 slot_span->num_unprovisioned_slots);
1928
1929 allocator.root()->Free(ptr);
1930
1931 size_t small_size = (SystemPageSize() / 4) - ExtraAllocSize(allocator);
1932 bucket_index = SizeToIndex(small_size + ExtraAllocSize(allocator));
1933 bucket = &allocator.root()->buckets[bucket_index];
1934 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1935
1936 ptr = allocator.root()->Alloc(small_size, type_name);
1937 EXPECT_TRUE(ptr);
1938 slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1939 EXPECT_EQ(1u, slot_span->num_allocated_slots);
1940 total_slots =
1941 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1942 (small_size + ExtraAllocSize(allocator));
1943 first_slot_span_slots =
1944 SystemPageSize() / (small_size + ExtraAllocSize(allocator));
1945 EXPECT_EQ(total_slots - first_slot_span_slots,
1946 slot_span->num_unprovisioned_slots);
1947
1948 allocator.root()->Free(ptr);
1949 EXPECT_TRUE(slot_span->get_freelist_head());
1950 EXPECT_EQ(0u, slot_span->num_allocated_slots);
1951
1952 static_assert(kExtraAllocSizeWithRefCount < 64, "");
1953 size_t very_small_size = (ExtraAllocSize(allocator) <= 32)
1954 ? (32 - ExtraAllocSize(allocator))
1955 : (64 - ExtraAllocSize(allocator));
1956 size_t very_small_adjusted_size =
1957 allocator.root()->AdjustSize0IfNeeded(very_small_size);
1958 bucket_index =
1959 SizeToIndex(very_small_adjusted_size + ExtraAllocSize(allocator));
1960 bucket = &allocator.root()->buckets[bucket_index];
1961 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1962
1963 ptr = allocator.root()->Alloc(very_small_size, type_name);
1964 EXPECT_TRUE(ptr);
1965 slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1966 EXPECT_EQ(1u, slot_span->num_allocated_slots);
1967 size_t very_small_actual_size = allocator.root()->GetUsableSize(ptr);
1968 total_slots =
1969 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1970 (very_small_actual_size + ExtraAllocSize(allocator));
1971 first_slot_span_slots =
1972 SystemPageSize() / (very_small_actual_size + ExtraAllocSize(allocator));
1973 EXPECT_EQ(total_slots - first_slot_span_slots,
1974 slot_span->num_unprovisioned_slots);
1975
1976 allocator.root()->Free(ptr);
1977 EXPECT_TRUE(slot_span->get_freelist_head());
1978 EXPECT_EQ(0u, slot_span->num_allocated_slots);
1979
1980 // And try an allocation size (against the generic allocator) that is
1981 // larger than a system page.
1982 size_t page_and_a_half_size =
1983 (SystemPageSize() + (SystemPageSize() / 2)) - ExtraAllocSize(allocator);
1984 ptr = allocator.root()->Alloc(page_and_a_half_size, type_name);
1985 EXPECT_TRUE(ptr);
1986 slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1987 EXPECT_EQ(1u, slot_span->num_allocated_slots);
1988 // Only the first slot was provisioned, and that's the one that was just
1989 // allocated so the free list is empty.
1990 EXPECT_TRUE(!slot_span->get_freelist_head());
1991 total_slots =
1992 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1993 (page_and_a_half_size + ExtraAllocSize(allocator));
1994 EXPECT_EQ(total_slots - 1, slot_span->num_unprovisioned_slots);
1995 ptr2 = allocator.root()->Alloc(page_and_a_half_size, type_name);
1996 EXPECT_TRUE(ptr);
1997 slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1998 EXPECT_EQ(2u, slot_span->num_allocated_slots);
1999 // As above, only one slot was provisioned.
2000 EXPECT_TRUE(!slot_span->get_freelist_head());
2001 EXPECT_EQ(total_slots - 2, slot_span->num_unprovisioned_slots);
2002 allocator.root()->Free(ptr);
2003 allocator.root()->Free(ptr2);
2004
2005 // And then make sure than exactly the page size only faults one page.
2006 size_t page_size = SystemPageSize() - ExtraAllocSize(allocator);
2007 ptr = allocator.root()->Alloc(page_size, type_name);
2008 EXPECT_TRUE(ptr);
2009 slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2010 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2011 EXPECT_TRUE(slot_span->get_freelist_head());
2012 total_slots =
2013 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
2014 (page_size + ExtraAllocSize(allocator));
2015 EXPECT_EQ(total_slots - 2, slot_span->num_unprovisioned_slots);
2016 allocator.root()->Free(ptr);
2017 }
2018
2019 // Test some of the fragmentation-resistant properties of the allocator.
TEST_P(PartitionAllocTest,SlotSpanRefilling)2020 TEST_P(PartitionAllocTest, SlotSpanRefilling) {
2021 PartitionRoot<ThreadSafe>::Bucket* bucket =
2022 &allocator.root()->buckets[test_bucket_index_];
2023
2024 // Grab two full slot spans and a non-full slot span.
2025 auto* slot_span1 = GetFullSlotSpan(kTestAllocSize);
2026 auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
2027 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
2028 EXPECT_TRUE(ptr);
2029 EXPECT_NE(slot_span1, bucket->active_slot_spans_head);
2030 EXPECT_NE(slot_span2, bucket->active_slot_spans_head);
2031 auto* slot_span =
2032 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2033 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2034
2035 // Work out a pointer into slot_span2 and free it; and then slot_span1 and
2036 // free it.
2037 void* ptr2 = allocator.root()->SlotStartToObject(
2038 SlotSpan::ToSlotSpanStart(slot_span1));
2039 allocator.root()->Free(ptr2);
2040 ptr2 = allocator.root()->SlotStartToObject(
2041 SlotSpan::ToSlotSpanStart(slot_span2));
2042 allocator.root()->Free(ptr2);
2043
2044 // If we perform two allocations from the same bucket now, we expect to
2045 // refill both the nearly full slot spans.
2046 std::ignore = allocator.root()->Alloc(kTestAllocSize, type_name);
2047 std::ignore = allocator.root()->Alloc(kTestAllocSize, type_name);
2048 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2049
2050 FreeFullSlotSpan(allocator.root(), slot_span2);
2051 FreeFullSlotSpan(allocator.root(), slot_span1);
2052 allocator.root()->Free(ptr);
2053 }
2054
2055 // Basic tests to ensure that allocations work for partial page buckets.
TEST_P(PartitionAllocTest,PartialPages)2056 TEST_P(PartitionAllocTest, PartialPages) {
2057 // Find a size that is backed by a partial partition page.
2058 size_t size = sizeof(void*);
2059 size_t bucket_index;
2060
2061 PartitionRoot<ThreadSafe>::Bucket* bucket = nullptr;
2062 constexpr size_t kMaxSize = 4000u;
2063 while (size < kMaxSize) {
2064 bucket_index = SizeToIndex(size + ExtraAllocSize(allocator));
2065 bucket = &allocator.root()->buckets[bucket_index];
2066 if (bucket->num_system_pages_per_slot_span %
2067 NumSystemPagesPerPartitionPage()) {
2068 break;
2069 }
2070 size += sizeof(void*);
2071 }
2072 EXPECT_LT(size, kMaxSize);
2073
2074 auto* slot_span1 = GetFullSlotSpan(size);
2075 auto* slot_span2 = GetFullSlotSpan(size);
2076 FreeFullSlotSpan(allocator.root(), slot_span2);
2077 FreeFullSlotSpan(allocator.root(), slot_span1);
2078 }
2079
2080 // Test correct handling if our mapping collides with another.
TEST_P(PartitionAllocTest,MappingCollision)2081 TEST_P(PartitionAllocTest, MappingCollision) {
2082 size_t num_pages_per_slot_span = GetNumPagesPerSlotSpan(kTestAllocSize);
2083 // The -2 is because the first and last partition pages in a super page are
2084 // guard pages. We also discount the partition pages used for the tag bitmap.
2085 size_t num_slot_span_needed =
2086 (NumPartitionPagesPerSuperPage() - 2 -
2087 partition_alloc::internal::NumPartitionPagesPerFreeSlotBitmap()) /
2088 num_pages_per_slot_span;
2089 size_t num_partition_pages_needed =
2090 num_slot_span_needed * num_pages_per_slot_span;
2091
2092 auto first_super_page_pages =
2093 std::make_unique<SlotSpan*[]>(num_partition_pages_needed);
2094 auto second_super_page_pages =
2095 std::make_unique<SlotSpan*[]>(num_partition_pages_needed);
2096
2097 size_t i;
2098 for (i = 0; i < num_partition_pages_needed; ++i) {
2099 first_super_page_pages[i] = GetFullSlotSpan(kTestAllocSize);
2100 }
2101
2102 uintptr_t slot_span_start =
2103 SlotSpan::ToSlotSpanStart(first_super_page_pages[0]);
2104 EXPECT_EQ(PartitionPageSize() +
2105 partition_alloc::internal::ReservedFreeSlotBitmapSize(),
2106 slot_span_start & kSuperPageOffsetMask);
2107 uintptr_t super_page =
2108 slot_span_start - PartitionPageSize() -
2109 partition_alloc::internal::ReservedFreeSlotBitmapSize();
2110 // Map a single system page either side of the mapping for our allocations,
2111 // with the goal of tripping up alignment of the next mapping.
2112 uintptr_t map1 =
2113 AllocPages(super_page - PageAllocationGranularity(),
2114 PageAllocationGranularity(), PageAllocationGranularity(),
2115 PageAccessibilityConfiguration(
2116 PageAccessibilityConfiguration::kInaccessible),
2117 PageTag::kPartitionAlloc);
2118 EXPECT_TRUE(map1);
2119 uintptr_t map2 =
2120 AllocPages(super_page + kSuperPageSize, PageAllocationGranularity(),
2121 PageAllocationGranularity(),
2122 PageAccessibilityConfiguration(
2123 PageAccessibilityConfiguration::kInaccessible),
2124 PageTag::kPartitionAlloc);
2125 EXPECT_TRUE(map2);
2126
2127 for (i = 0; i < num_partition_pages_needed; ++i) {
2128 second_super_page_pages[i] = GetFullSlotSpan(kTestAllocSize);
2129 }
2130
2131 FreePages(map1, PageAllocationGranularity());
2132 FreePages(map2, PageAllocationGranularity());
2133
2134 super_page = SlotSpan::ToSlotSpanStart(second_super_page_pages[0]);
2135 EXPECT_EQ(PartitionPageSize() +
2136 partition_alloc::internal::ReservedFreeSlotBitmapSize(),
2137 super_page & kSuperPageOffsetMask);
2138 super_page -= PartitionPageSize() +
2139 partition_alloc::internal::ReservedFreeSlotBitmapSize();
2140 // Map a single system page either side of the mapping for our allocations,
2141 // with the goal of tripping up alignment of the next mapping.
2142 map1 = AllocPages(super_page - PageAllocationGranularity(),
2143 PageAllocationGranularity(), PageAllocationGranularity(),
2144 PageAccessibilityConfiguration(
2145 PageAccessibilityConfiguration::kReadWriteTagged),
2146 PageTag::kPartitionAlloc);
2147 EXPECT_TRUE(map1);
2148 map2 = AllocPages(super_page + kSuperPageSize, PageAllocationGranularity(),
2149 PageAllocationGranularity(),
2150 PageAccessibilityConfiguration(
2151 PageAccessibilityConfiguration::kReadWriteTagged),
2152 PageTag::kPartitionAlloc);
2153 EXPECT_TRUE(map2);
2154 EXPECT_TRUE(TrySetSystemPagesAccess(
2155 map1, PageAllocationGranularity(),
2156 PageAccessibilityConfiguration(
2157 PageAccessibilityConfiguration::kInaccessible)));
2158 EXPECT_TRUE(TrySetSystemPagesAccess(
2159 map2, PageAllocationGranularity(),
2160 PageAccessibilityConfiguration(
2161 PageAccessibilityConfiguration::kInaccessible)));
2162
2163 auto* slot_span_in_third_super_page = GetFullSlotSpan(kTestAllocSize);
2164 FreePages(map1, PageAllocationGranularity());
2165 FreePages(map2, PageAllocationGranularity());
2166
2167 EXPECT_EQ(0u, SlotSpan::ToSlotSpanStart(slot_span_in_third_super_page) &
2168 PartitionPageOffsetMask());
2169
2170 // And make sure we really did get a page in a new superpage.
2171 EXPECT_NE(
2172 SlotSpan::ToSlotSpanStart(first_super_page_pages[0]) & kSuperPageBaseMask,
2173 SlotSpan::ToSlotSpanStart(slot_span_in_third_super_page) &
2174 kSuperPageBaseMask);
2175 EXPECT_NE(SlotSpan::ToSlotSpanStart(second_super_page_pages[0]) &
2176 kSuperPageBaseMask,
2177 SlotSpan::ToSlotSpanStart(slot_span_in_third_super_page) &
2178 kSuperPageBaseMask);
2179
2180 FreeFullSlotSpan(allocator.root(), slot_span_in_third_super_page);
2181 for (i = 0; i < num_partition_pages_needed; ++i) {
2182 FreeFullSlotSpan(allocator.root(), first_super_page_pages[i]);
2183 FreeFullSlotSpan(allocator.root(), second_super_page_pages[i]);
2184 }
2185 }
2186
2187 // Tests that slot spans in the free slot span cache do get freed as
2188 // appropriate.
TEST_P(PartitionAllocTest,FreeCache)2189 TEST_P(PartitionAllocTest, FreeCache) {
2190 EXPECT_EQ(0U, allocator.root()->get_total_size_of_committed_pages());
2191
2192 size_t big_size = 1000 - ExtraAllocSize(allocator);
2193 size_t bucket_index = SizeToIndex(big_size + ExtraAllocSize(allocator));
2194 PartitionBucket<internal::ThreadSafe>* bucket =
2195 &allocator.root()->buckets[bucket_index];
2196
2197 void* ptr = allocator.root()->Alloc(big_size, type_name);
2198 EXPECT_TRUE(ptr);
2199 auto* slot_span =
2200 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2201 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
2202 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2203 // Lazy commit commits only needed pages.
2204 size_t expected_committed_size =
2205 kUseLazyCommit ? SystemPageSize() : PartitionPageSize();
2206 EXPECT_EQ(expected_committed_size,
2207 allocator.root()->get_total_size_of_committed_pages());
2208 allocator.root()->Free(ptr);
2209 EXPECT_EQ(0u, slot_span->num_allocated_slots);
2210 EXPECT_TRUE(slot_span->in_empty_cache());
2211 EXPECT_TRUE(slot_span->get_freelist_head());
2212
2213 CycleFreeCache(kTestAllocSize);
2214
2215 // Flushing the cache should have really freed the unused slot spans.
2216 EXPECT_FALSE(slot_span->get_freelist_head());
2217 EXPECT_FALSE(slot_span->in_empty_cache());
2218 EXPECT_EQ(0u, slot_span->num_allocated_slots);
2219 size_t num_system_pages_per_slot_span = allocator.root()
2220 ->buckets[test_bucket_index_]
2221 .num_system_pages_per_slot_span;
2222 size_t expected_size =
2223 kUseLazyCommit ? SystemPageSize()
2224 : num_system_pages_per_slot_span * SystemPageSize();
2225 EXPECT_EQ(expected_size,
2226 allocator.root()->get_total_size_of_committed_pages());
2227
2228 // Check that an allocation works ok whilst in this state (a free'd slot span
2229 // as the active slot spans head).
2230 ptr = allocator.root()->Alloc(big_size, type_name);
2231 EXPECT_FALSE(bucket->empty_slot_spans_head);
2232 allocator.root()->Free(ptr);
2233
2234 // Also check that a slot span that is bouncing immediately between empty and
2235 // used does not get freed.
2236 for (size_t i = 0; i < kMaxFreeableSpans * 2; ++i) {
2237 ptr = allocator.root()->Alloc(big_size, type_name);
2238 EXPECT_TRUE(slot_span->get_freelist_head());
2239 allocator.root()->Free(ptr);
2240 EXPECT_TRUE(slot_span->get_freelist_head());
2241 }
2242 EXPECT_EQ(expected_committed_size,
2243 allocator.root()->get_total_size_of_committed_pages());
2244 }
2245
2246 // Tests for a bug we had with losing references to free slot spans.
TEST_P(PartitionAllocTest,LostFreeSlotSpansBug)2247 TEST_P(PartitionAllocTest, LostFreeSlotSpansBug) {
2248 size_t size = PartitionPageSize() - ExtraAllocSize(allocator);
2249
2250 void* ptr = allocator.root()->Alloc(size, type_name);
2251 EXPECT_TRUE(ptr);
2252 void* ptr2 = allocator.root()->Alloc(size, type_name);
2253 EXPECT_TRUE(ptr2);
2254
2255 SlotSpanMetadata<internal::ThreadSafe>* slot_span =
2256 SlotSpanMetadata<internal::ThreadSafe>::FromSlotStart(
2257 allocator.root()->ObjectToSlotStart(ptr));
2258 SlotSpanMetadata<internal::ThreadSafe>* slot_span2 =
2259 SlotSpanMetadata<internal::ThreadSafe>::FromSlotStart(
2260 allocator.root()->ObjectToSlotStart(ptr2));
2261 PartitionBucket<internal::ThreadSafe>* bucket = slot_span->bucket;
2262
2263 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
2264 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2265 EXPECT_EQ(1u, slot_span2->num_allocated_slots);
2266 EXPECT_TRUE(slot_span->is_full());
2267 EXPECT_TRUE(slot_span2->is_full());
2268 // The first span was kicked out from the active list, but the second one
2269 // wasn't.
2270 EXPECT_TRUE(slot_span->marked_full);
2271 EXPECT_FALSE(slot_span2->marked_full);
2272
2273 allocator.root()->Free(ptr);
2274 allocator.root()->Free(ptr2);
2275
2276 EXPECT_TRUE(bucket->empty_slot_spans_head);
2277 EXPECT_TRUE(bucket->empty_slot_spans_head->next_slot_span);
2278 EXPECT_EQ(0u, slot_span->num_allocated_slots);
2279 EXPECT_EQ(0u, slot_span2->num_allocated_slots);
2280 EXPECT_FALSE(slot_span->is_full());
2281 EXPECT_FALSE(slot_span->is_full());
2282 EXPECT_FALSE(slot_span->marked_full);
2283 EXPECT_FALSE(slot_span2->marked_full);
2284 EXPECT_TRUE(slot_span->get_freelist_head());
2285 EXPECT_TRUE(slot_span2->get_freelist_head());
2286
2287 CycleFreeCache(kTestAllocSize);
2288
2289 EXPECT_FALSE(slot_span->get_freelist_head());
2290 EXPECT_FALSE(slot_span2->get_freelist_head());
2291
2292 EXPECT_TRUE(bucket->empty_slot_spans_head);
2293 EXPECT_TRUE(bucket->empty_slot_spans_head->next_slot_span);
2294 EXPECT_EQ(SlotSpanMetadata<internal::ThreadSafe>::get_sentinel_slot_span(),
2295 bucket->active_slot_spans_head);
2296
2297 // At this moment, we have two decommitted slot spans, on the empty list.
2298 ptr = allocator.root()->Alloc(size, type_name);
2299 EXPECT_TRUE(ptr);
2300 allocator.root()->Free(ptr);
2301
2302 EXPECT_EQ(SlotSpanMetadata<internal::ThreadSafe>::get_sentinel_slot_span(),
2303 bucket->active_slot_spans_head);
2304 EXPECT_TRUE(bucket->empty_slot_spans_head);
2305 EXPECT_TRUE(bucket->decommitted_slot_spans_head);
2306
2307 CycleFreeCache(kTestAllocSize);
2308
2309 // We're now set up to trigger a historical bug by scanning over the active
2310 // slot spans list. The current code gets into a different state, but we'll
2311 // keep the test as being an interesting corner case.
2312 ptr = allocator.root()->Alloc(size, type_name);
2313 EXPECT_TRUE(ptr);
2314 allocator.root()->Free(ptr);
2315
2316 EXPECT_TRUE(bucket->is_valid());
2317 EXPECT_TRUE(bucket->empty_slot_spans_head);
2318 EXPECT_TRUE(bucket->decommitted_slot_spans_head);
2319 }
2320
2321 #if defined(PA_HAS_DEATH_TESTS)
2322
2323 // Unit tests that check if an allocation fails in "return null" mode,
2324 // repeating it doesn't crash, and still returns null. The tests need to
2325 // stress memory subsystem limits to do so, hence they try to allocate
2326 // 6 GB of memory, each with a different per-allocation block sizes.
2327 //
2328 // On 64-bit systems we need to restrict the address space to force allocation
2329 // failure, so these tests run only on POSIX systems that provide setrlimit(),
2330 // and use it to limit address space to 6GB.
2331 //
2332 // Disable these tests on Android because, due to the allocation-heavy behavior,
2333 // they tend to get OOM-killed rather than pass.
2334 //
2335 // Disable these test on Windows, since they run slower, so tend to timout and
2336 // cause flake.
2337 #if !BUILDFLAG(IS_WIN) && \
2338 (!defined(ARCH_CPU_64_BITS) || \
2339 (BUILDFLAG(IS_POSIX) && \
2340 !(BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)))) || \
2341 BUILDFLAG(IS_FUCHSIA)
2342 #define MAYBE_RepeatedAllocReturnNullDirect RepeatedAllocReturnNullDirect
2343 #define MAYBE_RepeatedReallocReturnNullDirect RepeatedReallocReturnNullDirect
2344 #define MAYBE_RepeatedTryReallocReturnNullDirect \
2345 RepeatedTryReallocReturnNullDirect
2346 #else
2347 #define MAYBE_RepeatedAllocReturnNullDirect \
2348 DISABLED_RepeatedAllocReturnNullDirect
2349 #define MAYBE_RepeatedReallocReturnNullDirect \
2350 DISABLED_RepeatedReallocReturnNullDirect
2351 #define MAYBE_RepeatedTryReallocReturnNullDirect \
2352 DISABLED_RepeatedTryReallocReturnNullDirect
2353 #endif
2354
2355 // The following four tests wrap a called function in an expect death statement
2356 // to perform their test, because they are non-hermetic. Specifically they are
2357 // going to attempt to exhaust the allocatable memory, which leaves the
2358 // allocator in a bad global state.
2359 // Performing them as death tests causes them to be forked into their own
2360 // process, so they won't pollute other tests.
2361 //
2362 // These tests are *very* slow when BUILDFLAG(PA_DCHECK_IS_ON), because they
2363 // memset() many GiB of data (see crbug.com/1168168).
2364 // TODO(lizeb): make these tests faster.
TEST_P(PartitionAllocDeathTest,MAYBE_RepeatedAllocReturnNullDirect)2365 TEST_P(PartitionAllocDeathTest, MAYBE_RepeatedAllocReturnNullDirect) {
2366 // A direct-mapped allocation size.
2367 size_t direct_map_size = 32 * 1024 * 1024;
2368 ASSERT_GT(direct_map_size, kMaxBucketed);
2369 EXPECT_DEATH(DoReturnNullTest(direct_map_size, kPartitionAllocWithFlags),
2370 "Passed DoReturnNullTest");
2371 }
2372
2373 // Repeating above test with Realloc
TEST_P(PartitionAllocDeathTest,MAYBE_RepeatedReallocReturnNullDirect)2374 TEST_P(PartitionAllocDeathTest, MAYBE_RepeatedReallocReturnNullDirect) {
2375 size_t direct_map_size = 32 * 1024 * 1024;
2376 ASSERT_GT(direct_map_size, kMaxBucketed);
2377 EXPECT_DEATH(DoReturnNullTest(direct_map_size, kPartitionReallocWithFlags),
2378 "Passed DoReturnNullTest");
2379 }
2380
2381 // Repeating above test with TryRealloc
TEST_P(PartitionAllocDeathTest,MAYBE_RepeatedTryReallocReturnNullDirect)2382 TEST_P(PartitionAllocDeathTest, MAYBE_RepeatedTryReallocReturnNullDirect) {
2383 size_t direct_map_size = 32 * 1024 * 1024;
2384 ASSERT_GT(direct_map_size, kMaxBucketed);
2385 EXPECT_DEATH(DoReturnNullTest(direct_map_size, kPartitionRootTryRealloc),
2386 "Passed DoReturnNullTest");
2387 }
2388
2389 // TODO(crbug.com/1348221) re-enable the tests below, once the allocator
2390 // actually returns nullptr for non direct-mapped allocations.
2391 // When doing so, they will need to be made MAYBE_ like those above.
2392 //
2393 // Tests "return null" with a 512 kB block size.
TEST_P(PartitionAllocDeathTest,DISABLED_RepeatedAllocReturnNull)2394 TEST_P(PartitionAllocDeathTest, DISABLED_RepeatedAllocReturnNull) {
2395 // A single-slot but non-direct-mapped allocation size.
2396 size_t single_slot_size = 512 * 1024;
2397 ASSERT_GT(single_slot_size, MaxRegularSlotSpanSize());
2398 ASSERT_LE(single_slot_size, kMaxBucketed);
2399 EXPECT_DEATH(DoReturnNullTest(single_slot_size, kPartitionAllocWithFlags),
2400 "Passed DoReturnNullTest");
2401 }
2402
2403 // Repeating above test with Realloc.
TEST_P(PartitionAllocDeathTest,DISABLED_RepeatedReallocReturnNull)2404 TEST_P(PartitionAllocDeathTest, DISABLED_RepeatedReallocReturnNull) {
2405 size_t single_slot_size = 512 * 1024;
2406 ASSERT_GT(single_slot_size, MaxRegularSlotSpanSize());
2407 ASSERT_LE(single_slot_size, kMaxBucketed);
2408 EXPECT_DEATH(DoReturnNullTest(single_slot_size, kPartitionReallocWithFlags),
2409 "Passed DoReturnNullTest");
2410 }
2411
2412 // Repeating above test with TryRealloc.
TEST_P(PartitionAllocDeathTest,DISABLED_RepeatedTryReallocReturnNull)2413 TEST_P(PartitionAllocDeathTest, DISABLED_RepeatedTryReallocReturnNull) {
2414 size_t single_slot_size = 512 * 1024;
2415 ASSERT_GT(single_slot_size, MaxRegularSlotSpanSize());
2416 ASSERT_LE(single_slot_size, kMaxBucketed);
2417 EXPECT_DEATH(DoReturnNullTest(single_slot_size, kPartitionRootTryRealloc),
2418 "Passed DoReturnNullTest");
2419 }
2420
2421 #if PA_CONFIG(HAS_MEMORY_TAGGING)
2422 // Check that Arm's memory tagging extension (MTE) is correctly protecting
2423 // freed pointers. Writes to a free pointer should result in a crash.
TEST_P(PartitionAllocDeathTest,MTEProtectsFreedPtr)2424 TEST_P(PartitionAllocDeathTest, MTEProtectsFreedPtr) {
2425 base::CPU cpu;
2426 if (!cpu.has_mte()) {
2427 // This test won't pass on systems without MTE.
2428 GTEST_SKIP();
2429 }
2430
2431 constexpr uint64_t kCookie = 0x1234567890ABCDEF;
2432 constexpr uint64_t kQuarantined = 0xEFEFEFEFEFEFEFEF;
2433
2434 // Make an arbitrary-sized small allocation.
2435 size_t alloc_size = 64 - ExtraAllocSize(allocator);
2436 uint64_t* ptr =
2437 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
2438 EXPECT_TRUE(ptr);
2439
2440 // Check that the allocation's writable.
2441 *ptr = kCookie;
2442
2443 // Invalidate ptr by freeing it.
2444 allocator.root()->Free(ptr);
2445
2446 // Writing to ptr after free() should crash
2447 EXPECT_EXIT(
2448 {
2449 // Should be in synchronous MTE mode for running this test.
2450 *ptr = kQuarantined;
2451 },
2452 testing::KilledBySignal(SIGSEGV), "");
2453 }
2454 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
2455
2456 // Make sure that malloc(-1) dies.
2457 // In the past, we had an integer overflow that would alias malloc(-1) to
2458 // malloc(0), which is not good.
TEST_P(PartitionAllocDeathTest,LargeAllocs)2459 TEST_P(PartitionAllocDeathTest, LargeAllocs) {
2460 // Largest alloc.
2461 EXPECT_DEATH(allocator.root()->Alloc(static_cast<size_t>(-1), type_name), "");
2462 // And the smallest allocation we expect to die.
2463 // TODO(bartekn): Separate into its own test, as it wouldn't run (same below).
2464 EXPECT_DEATH(allocator.root()->Alloc(MaxDirectMapped() + 1, type_name), "");
2465 }
2466
2467 // These tests don't work deterministically when BRP is enabled on certain
2468 // architectures. On Free(), BRP's ref-count gets overwritten by an encoded
2469 // freelist pointer. On little-endian 64-bit architectures, this happens to be
2470 // always an even number, which will triggers BRP's own CHECK (sic!). On other
2471 // architectures, it's likely to be an odd number >1, which will fool BRP into
2472 // thinking the memory isn't freed and still referenced, thus making it
2473 // quarantine it and return early, before PA_CHECK(slot_start != freelist_head)
2474 // is reached.
2475 // TODO(bartekn): Enable in the BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) case.
2476 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
2477 (BUILDFLAG(HAS_64_BIT_POINTERS) && defined(ARCH_CPU_LITTLE_ENDIAN))
2478
2479 // Check that our immediate double-free detection works.
TEST_P(PartitionAllocDeathTest,ImmediateDoubleFree)2480 TEST_P(PartitionAllocDeathTest, ImmediateDoubleFree) {
2481 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
2482 EXPECT_TRUE(ptr);
2483 allocator.root()->Free(ptr);
2484 EXPECT_DEATH(allocator.root()->Free(ptr), "");
2485 }
2486
2487 // As above, but when this isn't the only slot in the span.
TEST_P(PartitionAllocDeathTest,ImmediateDoubleFree2ndSlot)2488 TEST_P(PartitionAllocDeathTest, ImmediateDoubleFree2ndSlot) {
2489 void* ptr0 = allocator.root()->Alloc(kTestAllocSize, type_name);
2490 EXPECT_TRUE(ptr0);
2491 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
2492 EXPECT_TRUE(ptr);
2493 allocator.root()->Free(ptr);
2494 EXPECT_DEATH(allocator.root()->Free(ptr), "");
2495 allocator.root()->Free(ptr0);
2496 }
2497
2498 // Check that our double-free detection based on |num_allocated_slots| not going
2499 // below 0 works.
2500 //
2501 // Unlike in ImmediateDoubleFree test, we can't have a 2ndSlot version, as this
2502 // protection wouldn't work when there is another slot present in the span. It
2503 // will prevent |num_allocated_slots| from going below 0.
TEST_P(PartitionAllocDeathTest,NumAllocatedSlotsDoubleFree)2504 TEST_P(PartitionAllocDeathTest, NumAllocatedSlotsDoubleFree) {
2505 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
2506 EXPECT_TRUE(ptr);
2507 void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
2508 EXPECT_TRUE(ptr2);
2509 allocator.root()->Free(ptr);
2510 allocator.root()->Free(ptr2);
2511 // This is not an immediate double-free so our immediate detection won't
2512 // fire. However, it does take |num_allocated_slots| to -1, which is illegal
2513 // and should be trapped.
2514 EXPECT_DEATH(allocator.root()->Free(ptr), "");
2515 }
2516
2517 #endif // !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
2518 // (BUILDFLAG(HAS_64_BIT_POINTERS) && defined(ARCH_CPU_LITTLE_ENDIAN))
2519
2520 // Check that guard pages are present where expected.
TEST_P(PartitionAllocDeathTest,DirectMapGuardPages)2521 TEST_P(PartitionAllocDeathTest, DirectMapGuardPages) {
2522 const size_t kSizes[] = {
2523 kMaxBucketed + ExtraAllocSize(allocator) + 1,
2524 kMaxBucketed + SystemPageSize(), kMaxBucketed + PartitionPageSize(),
2525 partition_alloc::internal::base::bits::AlignUp(
2526 kMaxBucketed + kSuperPageSize, kSuperPageSize) -
2527 PartitionRoot<ThreadSafe>::GetDirectMapMetadataAndGuardPagesSize()};
2528 for (size_t size : kSizes) {
2529 ASSERT_GT(size, kMaxBucketed);
2530 size -= ExtraAllocSize(allocator);
2531 EXPECT_GT(size, kMaxBucketed)
2532 << "allocation not large enough for direct allocation";
2533 void* ptr = allocator.root()->Alloc(size, type_name);
2534
2535 EXPECT_TRUE(ptr);
2536 char* char_ptr = static_cast<char*>(ptr) - kPointerOffset;
2537
2538 EXPECT_DEATH(*(char_ptr - 1) = 'A', "");
2539 EXPECT_DEATH(*(char_ptr + partition_alloc::internal::base::bits::AlignUp(
2540 size, SystemPageSize())) = 'A',
2541 "");
2542
2543 allocator.root()->Free(ptr);
2544 }
2545 }
2546
2547 // These tests rely on precise layout. They handle cookie, not ref-count.
2548 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
2549 PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
2550
TEST_P(PartitionAllocDeathTest,UseAfterFreeDetection)2551 TEST_P(PartitionAllocDeathTest, UseAfterFreeDetection) {
2552 base::CPU cpu;
2553 void* data = allocator.root()->Alloc(100, "");
2554 allocator.root()->Free(data);
2555
2556 // use after free, not crashing here, but the next allocation should crash,
2557 // since we corrupted the freelist.
2558 memset(data, 0x42, 100);
2559 EXPECT_DEATH(allocator.root()->Alloc(100, ""), "");
2560 }
2561
TEST_P(PartitionAllocDeathTest,FreelistCorruption)2562 TEST_P(PartitionAllocDeathTest, FreelistCorruption) {
2563 base::CPU cpu;
2564 const size_t alloc_size = 2 * sizeof(void*);
2565 void** fake_freelist_entry =
2566 static_cast<void**>(allocator.root()->Alloc(alloc_size, ""));
2567 fake_freelist_entry[0] = nullptr;
2568 fake_freelist_entry[1] = nullptr;
2569
2570 void** uaf_data =
2571 static_cast<void**>(allocator.root()->Alloc(alloc_size, ""));
2572 allocator.root()->Free(uaf_data);
2573 // Try to confuse the allocator. This is still easy to circumvent willingly,
2574 // "just" need to set uaf_data[1] to ~uaf_data[0].
2575 void* previous_uaf_data = uaf_data[0];
2576 uaf_data[0] = fake_freelist_entry;
2577 EXPECT_DEATH(allocator.root()->Alloc(alloc_size, ""), "");
2578
2579 // Restore the freelist entry value, otherwise freelist corruption is detected
2580 // in TearDown(), crashing this process.
2581 uaf_data[0] = previous_uaf_data;
2582
2583 allocator.root()->Free(fake_freelist_entry);
2584 }
2585
2586 // With BUILDFLAG(PA_DCHECK_IS_ON), cookie already handles off-by-one detection.
2587 #if !BUILDFLAG(PA_DCHECK_IS_ON)
TEST_P(PartitionAllocDeathTest,OffByOneDetection)2588 TEST_P(PartitionAllocDeathTest, OffByOneDetection) {
2589 base::CPU cpu;
2590 const size_t alloc_size = 2 * sizeof(void*);
2591 char* array = static_cast<char*>(allocator.root()->Alloc(alloc_size, ""));
2592 if (cpu.has_mte()) {
2593 EXPECT_DEATH(array[alloc_size] = 'A', "");
2594 } else {
2595 char previous_value = array[alloc_size];
2596 // volatile is required to prevent the compiler from getting too clever and
2597 // eliding the out-of-bounds write. The root cause is that the PA_MALLOC_FN
2598 // annotation tells the compiler (among other things) that the returned
2599 // value cannot alias anything.
2600 *const_cast<volatile char*>(&array[alloc_size]) = 'A';
2601 // Crash at the next allocation. This assumes that we are touching a new,
2602 // non-randomized slot span, where the next slot to be handed over to the
2603 // application directly follows the current one.
2604 EXPECT_DEATH(allocator.root()->Alloc(alloc_size, ""), "");
2605
2606 // Restore integrity, otherwise the process will crash in TearDown().
2607 array[alloc_size] = previous_value;
2608 }
2609 }
2610
TEST_P(PartitionAllocDeathTest,OffByOneDetectionWithRealisticData)2611 TEST_P(PartitionAllocDeathTest, OffByOneDetectionWithRealisticData) {
2612 base::CPU cpu;
2613 const size_t alloc_size = 2 * sizeof(void*);
2614 void** array = static_cast<void**>(allocator.root()->Alloc(alloc_size, ""));
2615 char valid;
2616 if (cpu.has_mte()) {
2617 EXPECT_DEATH(array[2] = &valid, "");
2618 } else {
2619 void* previous_value = array[2];
2620 // As above, needs volatile to convince the compiler to perform the write.
2621 *const_cast<void* volatile*>(&array[2]) = &valid;
2622 // Crash at the next allocation. This assumes that we are touching a new,
2623 // non-randomized slot span, where the next slot to be handed over to the
2624 // application directly follows the current one.
2625 EXPECT_DEATH(allocator.root()->Alloc(alloc_size, ""), "");
2626 array[2] = previous_value;
2627 }
2628 }
2629 #endif // !BUILDFLAG(PA_DCHECK_IS_ON)
2630
2631 #endif // !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
2632 // PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
2633
2634 #endif // !defined(PA_HAS_DEATH_TESTS)
2635
2636 // Tests that |PartitionDumpStats| and |PartitionDumpStats| run without
2637 // crashing and return non-zero values when memory is allocated.
TEST_P(PartitionAllocTest,DumpMemoryStats)2638 TEST_P(PartitionAllocTest, DumpMemoryStats) {
2639 {
2640 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
2641 MockPartitionStatsDumper mock_stats_dumper;
2642 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2643 &mock_stats_dumper);
2644 EXPECT_TRUE(mock_stats_dumper.IsMemoryAllocationRecorded());
2645 allocator.root()->Free(ptr);
2646 }
2647
2648 // This series of tests checks the active -> empty -> decommitted states.
2649 {
2650 {
2651 void* ptr =
2652 allocator.root()->Alloc(2048 - ExtraAllocSize(allocator), type_name);
2653 MockPartitionStatsDumper dumper;
2654 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2655 &dumper);
2656 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2657
2658 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
2659 EXPECT_TRUE(stats);
2660 EXPECT_TRUE(stats->is_valid);
2661 EXPECT_EQ(2048u, stats->bucket_slot_size);
2662 EXPECT_EQ(2048u, stats->active_bytes);
2663 EXPECT_EQ(1u, stats->active_count);
2664 EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
2665 EXPECT_EQ(0u, stats->decommittable_bytes);
2666 EXPECT_EQ(0u, stats->discardable_bytes);
2667 EXPECT_EQ(0u, stats->num_full_slot_spans);
2668 EXPECT_EQ(1u, stats->num_active_slot_spans);
2669 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2670 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2671 allocator.root()->Free(ptr);
2672 }
2673
2674 {
2675 MockPartitionStatsDumper dumper;
2676 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2677 &dumper);
2678 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
2679
2680 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
2681 EXPECT_TRUE(stats);
2682 EXPECT_TRUE(stats->is_valid);
2683 EXPECT_EQ(2048u, stats->bucket_slot_size);
2684 EXPECT_EQ(0u, stats->active_bytes);
2685 EXPECT_EQ(0u, stats->active_count);
2686 EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
2687 EXPECT_EQ(SystemPageSize(), stats->decommittable_bytes);
2688 EXPECT_EQ(0u, stats->discardable_bytes);
2689 EXPECT_EQ(0u, stats->num_full_slot_spans);
2690 EXPECT_EQ(0u, stats->num_active_slot_spans);
2691 EXPECT_EQ(1u, stats->num_empty_slot_spans);
2692 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2693 }
2694
2695 // TODO(crbug.com/722911): Commenting this out causes this test to fail when
2696 // run singly (--gtest_filter=PartitionAllocTest.DumpMemoryStats), but not
2697 // when run with the others (--gtest_filter=PartitionAllocTest.*).
2698 CycleFreeCache(kTestAllocSize);
2699
2700 {
2701 MockPartitionStatsDumper dumper;
2702 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2703 &dumper);
2704 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
2705
2706 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
2707 EXPECT_TRUE(stats);
2708 EXPECT_TRUE(stats->is_valid);
2709 EXPECT_EQ(2048u, stats->bucket_slot_size);
2710 EXPECT_EQ(0u, stats->active_bytes);
2711 EXPECT_EQ(0u, stats->active_count);
2712 EXPECT_EQ(0u, stats->resident_bytes);
2713 EXPECT_EQ(0u, stats->decommittable_bytes);
2714 EXPECT_EQ(0u, stats->discardable_bytes);
2715 EXPECT_EQ(0u, stats->num_full_slot_spans);
2716 EXPECT_EQ(0u, stats->num_active_slot_spans);
2717 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2718 EXPECT_EQ(1u, stats->num_decommitted_slot_spans);
2719 }
2720 }
2721
2722 // This test checks for correct empty slot span list accounting.
2723 {
2724 size_t size = PartitionPageSize() - ExtraAllocSize(allocator);
2725 void* ptr1 = allocator.root()->Alloc(size, type_name);
2726 void* ptr2 = allocator.root()->Alloc(size, type_name);
2727 allocator.root()->Free(ptr1);
2728 allocator.root()->Free(ptr2);
2729
2730 CycleFreeCache(kTestAllocSize);
2731
2732 ptr1 = allocator.root()->Alloc(size, type_name);
2733
2734 {
2735 MockPartitionStatsDumper dumper;
2736 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2737 &dumper);
2738 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2739
2740 const PartitionBucketMemoryStats* stats =
2741 dumper.GetBucketStats(PartitionPageSize());
2742 EXPECT_TRUE(stats);
2743 EXPECT_TRUE(stats->is_valid);
2744 EXPECT_EQ(PartitionPageSize(), stats->bucket_slot_size);
2745 EXPECT_EQ(PartitionPageSize(), stats->active_bytes);
2746 EXPECT_EQ(1u, stats->active_count);
2747 EXPECT_EQ(PartitionPageSize(), stats->resident_bytes);
2748 EXPECT_EQ(0u, stats->decommittable_bytes);
2749 EXPECT_EQ(0u, stats->discardable_bytes);
2750 EXPECT_EQ(1u, stats->num_full_slot_spans);
2751 EXPECT_EQ(0u, stats->num_active_slot_spans);
2752 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2753 EXPECT_EQ(1u, stats->num_decommitted_slot_spans);
2754 }
2755 allocator.root()->Free(ptr1);
2756 }
2757
2758 // This test checks for correct direct mapped accounting.
2759 {
2760 size_t size_smaller = kMaxBucketed + 1;
2761 size_t size_bigger = (kMaxBucketed * 2) + 1;
2762 size_t real_size_smaller =
2763 (size_smaller + SystemPageOffsetMask()) & SystemPageBaseMask();
2764 size_t real_size_bigger =
2765 (size_bigger + SystemPageOffsetMask()) & SystemPageBaseMask();
2766 void* ptr = allocator.root()->Alloc(size_smaller, type_name);
2767 void* ptr2 = allocator.root()->Alloc(size_bigger, type_name);
2768
2769 {
2770 MockPartitionStatsDumper dumper;
2771 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2772 &dumper);
2773 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2774
2775 const PartitionBucketMemoryStats* stats =
2776 dumper.GetBucketStats(real_size_smaller);
2777 EXPECT_TRUE(stats);
2778 EXPECT_TRUE(stats->is_valid);
2779 EXPECT_TRUE(stats->is_direct_map);
2780 EXPECT_EQ(real_size_smaller, stats->bucket_slot_size);
2781 EXPECT_EQ(real_size_smaller, stats->active_bytes);
2782 EXPECT_EQ(1u, stats->active_count);
2783 EXPECT_EQ(real_size_smaller, stats->resident_bytes);
2784 EXPECT_EQ(0u, stats->decommittable_bytes);
2785 EXPECT_EQ(0u, stats->discardable_bytes);
2786 EXPECT_EQ(1u, stats->num_full_slot_spans);
2787 EXPECT_EQ(0u, stats->num_active_slot_spans);
2788 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2789 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2790
2791 stats = dumper.GetBucketStats(real_size_bigger);
2792 EXPECT_TRUE(stats);
2793 EXPECT_TRUE(stats->is_valid);
2794 EXPECT_TRUE(stats->is_direct_map);
2795 EXPECT_EQ(real_size_bigger, stats->bucket_slot_size);
2796 EXPECT_EQ(real_size_bigger, stats->active_bytes);
2797 EXPECT_EQ(1u, stats->active_count);
2798 EXPECT_EQ(real_size_bigger, stats->resident_bytes);
2799 EXPECT_EQ(0u, stats->decommittable_bytes);
2800 EXPECT_EQ(0u, stats->discardable_bytes);
2801 EXPECT_EQ(1u, stats->num_full_slot_spans);
2802 EXPECT_EQ(0u, stats->num_active_slot_spans);
2803 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2804 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2805 }
2806
2807 allocator.root()->Free(ptr2);
2808 allocator.root()->Free(ptr);
2809
2810 // Whilst we're here, allocate again and free with different ordering to
2811 // give a workout to our linked list code.
2812 ptr = allocator.root()->Alloc(size_smaller, type_name);
2813 ptr2 = allocator.root()->Alloc(size_bigger, type_name);
2814 allocator.root()->Free(ptr);
2815 allocator.root()->Free(ptr2);
2816 }
2817
2818 // This test checks large-but-not-quite-direct allocations.
2819 {
2820 const size_t requested_size = 16 * SystemPageSize();
2821 void* ptr = allocator.root()->Alloc(requested_size + 1, type_name);
2822
2823 {
2824 MockPartitionStatsDumper dumper;
2825 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2826 &dumper);
2827 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2828
2829 size_t slot_size = SizeToBucketSize(requested_size + 1);
2830 const PartitionBucketMemoryStats* stats =
2831 dumper.GetBucketStats(slot_size);
2832 ASSERT_TRUE(stats);
2833 EXPECT_TRUE(stats->is_valid);
2834 EXPECT_FALSE(stats->is_direct_map);
2835 EXPECT_EQ(slot_size, stats->bucket_slot_size);
2836 EXPECT_EQ(requested_size + 1 + ExtraAllocSize(allocator),
2837 stats->active_bytes);
2838 EXPECT_EQ(1u, stats->active_count);
2839 EXPECT_EQ(slot_size, stats->resident_bytes);
2840 EXPECT_EQ(0u, stats->decommittable_bytes);
2841 EXPECT_EQ((slot_size - (requested_size + 1)) / SystemPageSize() *
2842 SystemPageSize(),
2843 stats->discardable_bytes);
2844 EXPECT_EQ(1u, stats->num_full_slot_spans);
2845 EXPECT_EQ(0u, stats->num_active_slot_spans);
2846 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2847 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2848 }
2849
2850 allocator.root()->Free(ptr);
2851
2852 {
2853 MockPartitionStatsDumper dumper;
2854 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2855 &dumper);
2856 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
2857
2858 size_t slot_size = SizeToBucketSize(requested_size + 1);
2859 const PartitionBucketMemoryStats* stats =
2860 dumper.GetBucketStats(slot_size);
2861 EXPECT_TRUE(stats);
2862 EXPECT_TRUE(stats->is_valid);
2863 EXPECT_FALSE(stats->is_direct_map);
2864 EXPECT_EQ(slot_size, stats->bucket_slot_size);
2865 EXPECT_EQ(0u, stats->active_bytes);
2866 EXPECT_EQ(0u, stats->active_count);
2867 EXPECT_EQ(slot_size, stats->resident_bytes);
2868 EXPECT_EQ(slot_size, stats->decommittable_bytes);
2869 EXPECT_EQ(0u, stats->num_full_slot_spans);
2870 EXPECT_EQ(0u, stats->num_active_slot_spans);
2871 EXPECT_EQ(1u, stats->num_empty_slot_spans);
2872 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2873 }
2874
2875 void* ptr2 = allocator.root()->Alloc(requested_size + SystemPageSize() + 1,
2876 type_name);
2877 EXPECT_EQ(ptr, ptr2);
2878
2879 {
2880 MockPartitionStatsDumper dumper;
2881 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2882 &dumper);
2883 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2884
2885 size_t slot_size =
2886 SizeToBucketSize(requested_size + SystemPageSize() + 1);
2887 const PartitionBucketMemoryStats* stats =
2888 dumper.GetBucketStats(slot_size);
2889 EXPECT_TRUE(stats);
2890 EXPECT_TRUE(stats->is_valid);
2891 EXPECT_FALSE(stats->is_direct_map);
2892 EXPECT_EQ(slot_size, stats->bucket_slot_size);
2893 EXPECT_EQ(
2894 requested_size + SystemPageSize() + 1 + ExtraAllocSize(allocator),
2895 stats->active_bytes);
2896 EXPECT_EQ(1u, stats->active_count);
2897 EXPECT_EQ(slot_size, stats->resident_bytes);
2898 EXPECT_EQ(0u, stats->decommittable_bytes);
2899 EXPECT_EQ((slot_size - (requested_size + SystemPageSize() + 1)) /
2900 SystemPageSize() * SystemPageSize(),
2901 stats->discardable_bytes);
2902 EXPECT_EQ(1u, stats->num_full_slot_spans);
2903 EXPECT_EQ(0u, stats->num_active_slot_spans);
2904 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2905 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2906 }
2907
2908 allocator.root()->Free(ptr2);
2909 }
2910 }
2911
2912 // Tests the API to purge freeable memory.
TEST_P(PartitionAllocTest,Purge)2913 TEST_P(PartitionAllocTest, Purge) {
2914 char* ptr = static_cast<char*>(
2915 allocator.root()->Alloc(2048 - ExtraAllocSize(allocator), type_name));
2916 allocator.root()->Free(ptr);
2917 {
2918 MockPartitionStatsDumper dumper;
2919 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2920 &dumper);
2921 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
2922
2923 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
2924 EXPECT_TRUE(stats);
2925 EXPECT_TRUE(stats->is_valid);
2926 EXPECT_EQ(SystemPageSize(), stats->decommittable_bytes);
2927 EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
2928 }
2929 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
2930 {
2931 MockPartitionStatsDumper dumper;
2932 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2933 &dumper);
2934 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
2935
2936 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
2937 EXPECT_TRUE(stats);
2938 EXPECT_TRUE(stats->is_valid);
2939 EXPECT_EQ(0u, stats->decommittable_bytes);
2940 EXPECT_EQ(0u, stats->resident_bytes);
2941 }
2942 // Calling purge again here is a good way of testing we didn't mess up the
2943 // state of the free cache ring.
2944 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
2945
2946 // A single-slot but non-direct-mapped allocation size.
2947 size_t single_slot_size = 512 * 1024;
2948 ASSERT_GT(single_slot_size, MaxRegularSlotSpanSize());
2949 ASSERT_LE(single_slot_size, kMaxBucketed);
2950 char* big_ptr =
2951 static_cast<char*>(allocator.root()->Alloc(single_slot_size, type_name));
2952 allocator.root()->Free(big_ptr);
2953 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
2954
2955 CHECK_PAGE_IN_CORE(ptr - kPointerOffset, false);
2956 CHECK_PAGE_IN_CORE(big_ptr - kPointerOffset, false);
2957 }
2958
2959 // Tests that we prefer to allocate into a non-empty partition page over an
2960 // empty one. This is an important aspect of minimizing memory usage for some
2961 // allocation sizes, particularly larger ones.
TEST_P(PartitionAllocTest,PreferActiveOverEmpty)2962 TEST_P(PartitionAllocTest, PreferActiveOverEmpty) {
2963 size_t size = (SystemPageSize() * 2) - ExtraAllocSize(allocator);
2964 // Allocate 3 full slot spans worth of 8192-byte allocations.
2965 // Each slot span for this size is 16384 bytes, or 1 partition page and 2
2966 // slots.
2967 void* ptr1 = allocator.root()->Alloc(size, type_name);
2968 void* ptr2 = allocator.root()->Alloc(size, type_name);
2969 void* ptr3 = allocator.root()->Alloc(size, type_name);
2970 void* ptr4 = allocator.root()->Alloc(size, type_name);
2971 void* ptr5 = allocator.root()->Alloc(size, type_name);
2972 void* ptr6 = allocator.root()->Alloc(size, type_name);
2973
2974 SlotSpanMetadata<internal::ThreadSafe>* slot_span1 =
2975 SlotSpanMetadata<internal::ThreadSafe>::FromSlotStart(
2976 allocator.root()->ObjectToSlotStart(ptr1));
2977 SlotSpanMetadata<internal::ThreadSafe>* slot_span2 =
2978 SlotSpanMetadata<internal::ThreadSafe>::FromSlotStart(
2979 allocator.root()->ObjectToSlotStart(ptr3));
2980 SlotSpanMetadata<internal::ThreadSafe>* slot_span3 =
2981 SlotSpanMetadata<internal::ThreadSafe>::FromSlotStart(
2982 allocator.root()->ObjectToSlotStart(ptr6));
2983 EXPECT_NE(slot_span1, slot_span2);
2984 EXPECT_NE(slot_span2, slot_span3);
2985 PartitionBucket<internal::ThreadSafe>* bucket = slot_span1->bucket;
2986 EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
2987
2988 // Free up the 2nd slot in each slot span.
2989 // This leaves the active list containing 3 slot spans, each with 1 used and 1
2990 // free slot. The active slot span will be the one containing ptr1.
2991 allocator.root()->Free(ptr6);
2992 allocator.root()->Free(ptr4);
2993 allocator.root()->Free(ptr2);
2994 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
2995
2996 // Empty the middle slot span in the active list.
2997 allocator.root()->Free(ptr3);
2998 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
2999
3000 // Empty the first slot span in the active list -- also the current slot span.
3001 allocator.root()->Free(ptr1);
3002
3003 // A good choice here is to re-fill the third slot span since the first two
3004 // are empty. We used to fail that.
3005 void* ptr7 = allocator.root()->Alloc(size, type_name);
3006 PA_EXPECT_PTR_EQ(ptr6, ptr7);
3007 EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
3008
3009 allocator.root()->Free(ptr5);
3010 allocator.root()->Free(ptr7);
3011 }
3012
3013 // Tests the API to purge discardable memory.
TEST_P(PartitionAllocTest,PurgeDiscardableSecondPage)3014 TEST_P(PartitionAllocTest, PurgeDiscardableSecondPage) {
3015 // Free the second of two 4096 byte allocations and then purge.
3016 void* ptr1 = allocator.root()->Alloc(
3017 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3018 char* ptr2 = static_cast<char*>(allocator.root()->Alloc(
3019 SystemPageSize() - ExtraAllocSize(allocator), type_name));
3020 allocator.root()->Free(ptr2);
3021 SlotSpanMetadata<internal::ThreadSafe>* slot_span =
3022 SlotSpanMetadata<internal::ThreadSafe>::FromSlotStart(
3023 allocator.root()->ObjectToSlotStart(ptr1));
3024 EXPECT_EQ(2u, slot_span->num_unprovisioned_slots);
3025 {
3026 MockPartitionStatsDumper dumper;
3027 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3028 &dumper);
3029 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3030
3031 const PartitionBucketMemoryStats* stats =
3032 dumper.GetBucketStats(SystemPageSize());
3033 EXPECT_TRUE(stats);
3034 EXPECT_TRUE(stats->is_valid);
3035 EXPECT_EQ(0u, stats->decommittable_bytes);
3036 EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
3037 EXPECT_EQ(SystemPageSize(), stats->active_bytes);
3038 EXPECT_EQ(2 * SystemPageSize(), stats->resident_bytes);
3039 }
3040 CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, true);
3041 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3042 CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, false);
3043 EXPECT_EQ(3u, slot_span->num_unprovisioned_slots);
3044
3045 allocator.root()->Free(ptr1);
3046 }
3047
TEST_P(PartitionAllocTest,PurgeDiscardableFirstPage)3048 TEST_P(PartitionAllocTest, PurgeDiscardableFirstPage) {
3049 // Free the first of two 4096 byte allocations and then purge.
3050 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3051 SystemPageSize() - ExtraAllocSize(allocator), type_name));
3052 void* ptr2 = allocator.root()->Alloc(
3053 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3054 allocator.root()->Free(ptr1);
3055 {
3056 MockPartitionStatsDumper dumper;
3057 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3058 &dumper);
3059 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3060
3061 const PartitionBucketMemoryStats* stats =
3062 dumper.GetBucketStats(SystemPageSize());
3063 EXPECT_TRUE(stats);
3064 EXPECT_TRUE(stats->is_valid);
3065 EXPECT_EQ(0u, stats->decommittable_bytes);
3066 #if BUILDFLAG(IS_WIN)
3067 EXPECT_EQ(0u, stats->discardable_bytes);
3068 #else
3069 EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
3070 #endif
3071 EXPECT_EQ(SystemPageSize(), stats->active_bytes);
3072 EXPECT_EQ(2 * SystemPageSize(), stats->resident_bytes);
3073 }
3074 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3075 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3076 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, false);
3077
3078 allocator.root()->Free(ptr2);
3079 }
3080
TEST_P(PartitionAllocTest,PurgeDiscardableNonPageSizedAlloc)3081 TEST_P(PartitionAllocTest, PurgeDiscardableNonPageSizedAlloc) {
3082 const size_t requested_size = 2.5 * SystemPageSize();
3083 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3084 requested_size - ExtraAllocSize(allocator), type_name));
3085 void* ptr2 = allocator.root()->Alloc(
3086 requested_size - ExtraAllocSize(allocator), type_name);
3087 void* ptr3 = allocator.root()->Alloc(
3088 requested_size - ExtraAllocSize(allocator), type_name);
3089 void* ptr4 = allocator.root()->Alloc(
3090 requested_size - ExtraAllocSize(allocator), type_name);
3091 memset(ptr1, 'A', requested_size - ExtraAllocSize(allocator));
3092 memset(ptr2, 'A', requested_size - ExtraAllocSize(allocator));
3093 allocator.root()->Free(ptr1);
3094 allocator.root()->Free(ptr2);
3095 {
3096 MockPartitionStatsDumper dumper;
3097 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3098 &dumper);
3099 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3100
3101 const PartitionBucketMemoryStats* stats =
3102 dumper.GetBucketStats(requested_size);
3103 EXPECT_TRUE(stats);
3104 EXPECT_TRUE(stats->is_valid);
3105 EXPECT_EQ(0u, stats->decommittable_bytes);
3106 #if BUILDFLAG(IS_WIN)
3107 EXPECT_EQ(3 * SystemPageSize(), stats->discardable_bytes);
3108 #else
3109 EXPECT_EQ(4 * SystemPageSize(), stats->discardable_bytes);
3110 #endif
3111 EXPECT_EQ(requested_size * 2, stats->active_bytes);
3112 EXPECT_EQ(10 * SystemPageSize(), stats->resident_bytes);
3113 }
3114 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3115 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3116 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3117 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
3118 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), true);
3119 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3120 // Except for Windows, the first page is discardable because the freelist
3121 // pointer on this page is nullptr. Note that CHECK_PAGE_IN_CORE only executes
3122 // checks for Linux and ChromeOS, not for Windows.
3123 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, false);
3124 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
3125 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3126 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
3127 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), false);
3128
3129 allocator.root()->Free(ptr3);
3130 allocator.root()->Free(ptr4);
3131 }
3132
TEST_P(PartitionAllocTest,PurgeDiscardableNonPageSizedAllocOnSlotBoundary)3133 TEST_P(PartitionAllocTest, PurgeDiscardableNonPageSizedAllocOnSlotBoundary) {
3134 const size_t requested_size = 2.5 * SystemPageSize();
3135 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3136 requested_size - ExtraAllocSize(allocator), type_name));
3137 void* ptr2 = allocator.root()->Alloc(
3138 requested_size - ExtraAllocSize(allocator), type_name);
3139 void* ptr3 = allocator.root()->Alloc(
3140 requested_size - ExtraAllocSize(allocator), type_name);
3141 void* ptr4 = allocator.root()->Alloc(
3142 requested_size - ExtraAllocSize(allocator), type_name);
3143 memset(ptr1, 'A', requested_size - ExtraAllocSize(allocator));
3144 memset(ptr2, 'A', requested_size - ExtraAllocSize(allocator));
3145 allocator.root()->Free(ptr2);
3146 allocator.root()->Free(ptr1);
3147 {
3148 MockPartitionStatsDumper dumper;
3149 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3150 &dumper);
3151 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3152
3153 const PartitionBucketMemoryStats* stats =
3154 dumper.GetBucketStats(requested_size);
3155 EXPECT_TRUE(stats);
3156 EXPECT_TRUE(stats->is_valid);
3157 EXPECT_EQ(0u, stats->decommittable_bytes);
3158 #if BUILDFLAG(IS_WIN)
3159 EXPECT_EQ(3 * SystemPageSize(), stats->discardable_bytes);
3160 #else
3161 EXPECT_EQ(4 * SystemPageSize(), stats->discardable_bytes);
3162 #endif
3163 EXPECT_EQ(requested_size * 2, stats->active_bytes);
3164 EXPECT_EQ(10 * SystemPageSize(), stats->resident_bytes);
3165 }
3166 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3167 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3168 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3169 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
3170 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), true);
3171 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3172 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3173 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
3174 // Except for Windows, the third page is discardable because the freelist
3175 // pointer on this page is nullptr. Note that CHECK_PAGE_IN_CORE only executes
3176 // checks for Linux and ChromeOS, not for Windows.
3177 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), false);
3178 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
3179 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), false);
3180
3181 allocator.root()->Free(ptr3);
3182 allocator.root()->Free(ptr4);
3183 }
3184
TEST_P(PartitionAllocTest,PurgeDiscardableManyPages)3185 TEST_P(PartitionAllocTest, PurgeDiscardableManyPages) {
3186 // On systems with large pages, use less pages because:
3187 // 1) There must be a bucket for kFirstAllocPages * SystemPageSize(), and
3188 // 2) On low-end systems, using too many large pages can OOM during the test
3189 const bool kHasLargePages = SystemPageSize() > 4096;
3190 const size_t kFirstAllocPages = kHasLargePages ? 32 : 64;
3191 const size_t kSecondAllocPages = kHasLargePages ? 31 : 61;
3192
3193 // Detect case (1) from above.
3194 PA_DCHECK(kFirstAllocPages * SystemPageSize() < (1UL << kMaxBucketedOrder));
3195
3196 const size_t kDeltaPages = kFirstAllocPages - kSecondAllocPages;
3197
3198 {
3199 ScopedPageAllocation p(allocator, kFirstAllocPages);
3200 p.TouchAllPages();
3201 }
3202
3203 ScopedPageAllocation p(allocator, kSecondAllocPages);
3204
3205 MockPartitionStatsDumper dumper;
3206 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3207 &dumper);
3208 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3209
3210 const PartitionBucketMemoryStats* stats =
3211 dumper.GetBucketStats(kFirstAllocPages * SystemPageSize());
3212 EXPECT_TRUE(stats);
3213 EXPECT_TRUE(stats->is_valid);
3214 EXPECT_EQ(0u, stats->decommittable_bytes);
3215 EXPECT_EQ(kDeltaPages * SystemPageSize(), stats->discardable_bytes);
3216 EXPECT_EQ(kSecondAllocPages * SystemPageSize(), stats->active_bytes);
3217 EXPECT_EQ(kFirstAllocPages * SystemPageSize(), stats->resident_bytes);
3218
3219 for (size_t i = 0; i < kFirstAllocPages; i++) {
3220 CHECK_PAGE_IN_CORE(p.PageAtIndex(i), true);
3221 }
3222
3223 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3224
3225 for (size_t i = 0; i < kSecondAllocPages; i++) {
3226 CHECK_PAGE_IN_CORE(p.PageAtIndex(i), true);
3227 }
3228 for (size_t i = kSecondAllocPages; i < kFirstAllocPages; i++) {
3229 CHECK_PAGE_IN_CORE(p.PageAtIndex(i), false);
3230 }
3231 }
3232
TEST_P(PartitionAllocTest,PurgeDiscardableWithFreeListRewrite)3233 TEST_P(PartitionAllocTest, PurgeDiscardableWithFreeListRewrite) {
3234 // This sub-test tests truncation of the provisioned slots in a trickier
3235 // case where the freelist is rewritten.
3236 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
3237 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3238 SystemPageSize() - ExtraAllocSize(allocator), type_name));
3239 void* ptr2 = allocator.root()->Alloc(
3240 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3241 void* ptr3 = allocator.root()->Alloc(
3242 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3243 void* ptr4 = allocator.root()->Alloc(
3244 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3245 ptr1[0] = 'A';
3246 ptr1[SystemPageSize()] = 'A';
3247 ptr1[SystemPageSize() * 2] = 'A';
3248 ptr1[SystemPageSize() * 3] = 'A';
3249 SlotSpanMetadata<internal::ThreadSafe>* slot_span =
3250 SlotSpanMetadata<internal::ThreadSafe>::FromSlotStart(
3251 allocator.root()->ObjectToSlotStart(ptr1));
3252 allocator.root()->Free(ptr2);
3253 allocator.root()->Free(ptr4);
3254 allocator.root()->Free(ptr1);
3255 EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
3256
3257 {
3258 MockPartitionStatsDumper dumper;
3259 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3260 &dumper);
3261 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3262
3263 const PartitionBucketMemoryStats* stats =
3264 dumper.GetBucketStats(SystemPageSize());
3265 EXPECT_TRUE(stats);
3266 EXPECT_TRUE(stats->is_valid);
3267 EXPECT_EQ(0u, stats->decommittable_bytes);
3268 #if BUILDFLAG(IS_WIN)
3269 EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
3270 #else
3271 EXPECT_EQ(2 * SystemPageSize(), stats->discardable_bytes);
3272 #endif
3273 EXPECT_EQ(SystemPageSize(), stats->active_bytes);
3274 EXPECT_EQ(4 * SystemPageSize(), stats->resident_bytes);
3275 }
3276 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3277 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3278 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3279 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
3280 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3281 EXPECT_EQ(1u, slot_span->num_unprovisioned_slots);
3282 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3283 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
3284 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3285 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
3286
3287 // Let's check we didn't brick the freelist.
3288 void* ptr1b = allocator.root()->Alloc(
3289 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3290 PA_EXPECT_PTR_EQ(ptr1, ptr1b);
3291 void* ptr2b = allocator.root()->Alloc(
3292 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3293 PA_EXPECT_PTR_EQ(ptr2, ptr2b);
3294 EXPECT_FALSE(slot_span->get_freelist_head());
3295
3296 allocator.root()->Free(ptr1);
3297 allocator.root()->Free(ptr2);
3298 allocator.root()->Free(ptr3);
3299 }
3300
TEST_P(PartitionAllocTest,PurgeDiscardableDoubleTruncateFreeList)3301 TEST_P(PartitionAllocTest, PurgeDiscardableDoubleTruncateFreeList) {
3302 // This sub-test is similar, but tests a double-truncation.
3303 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
3304 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3305 SystemPageSize() - ExtraAllocSize(allocator), type_name));
3306 void* ptr2 = allocator.root()->Alloc(
3307 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3308 void* ptr3 = allocator.root()->Alloc(
3309 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3310 void* ptr4 = allocator.root()->Alloc(
3311 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3312 ptr1[0] = 'A';
3313 ptr1[SystemPageSize()] = 'A';
3314 ptr1[SystemPageSize() * 2] = 'A';
3315 ptr1[SystemPageSize() * 3] = 'A';
3316 SlotSpanMetadata<internal::ThreadSafe>* slot_span =
3317 SlotSpanMetadata<internal::ThreadSafe>::FromSlotStart(
3318 allocator.root()->ObjectToSlotStart(ptr1));
3319 allocator.root()->Free(ptr4);
3320 allocator.root()->Free(ptr3);
3321 EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
3322
3323 {
3324 MockPartitionStatsDumper dumper;
3325 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3326 &dumper);
3327 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3328
3329 const PartitionBucketMemoryStats* stats =
3330 dumper.GetBucketStats(SystemPageSize());
3331 EXPECT_TRUE(stats);
3332 EXPECT_TRUE(stats->is_valid);
3333 EXPECT_EQ(0u, stats->decommittable_bytes);
3334 EXPECT_EQ(2 * SystemPageSize(), stats->discardable_bytes);
3335 EXPECT_EQ(2 * SystemPageSize(), stats->active_bytes);
3336 EXPECT_EQ(4 * SystemPageSize(), stats->resident_bytes);
3337 }
3338 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3339 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3340 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3341 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
3342 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3343 EXPECT_EQ(2u, slot_span->num_unprovisioned_slots);
3344 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3345 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3346 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), false);
3347 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
3348
3349 EXPECT_FALSE(slot_span->get_freelist_head());
3350
3351 allocator.root()->Free(ptr1);
3352 allocator.root()->Free(ptr2);
3353 }
3354
TEST_P(PartitionAllocTest,PurgeDiscardableSmallSlotsWithTruncate)3355 TEST_P(PartitionAllocTest, PurgeDiscardableSmallSlotsWithTruncate) {
3356 size_t requested_size = 0.5 * SystemPageSize();
3357 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3358 requested_size - ExtraAllocSize(allocator), type_name));
3359 void* ptr2 = allocator.root()->Alloc(
3360 requested_size - ExtraAllocSize(allocator), type_name);
3361 void* ptr3 = allocator.root()->Alloc(
3362 requested_size - ExtraAllocSize(allocator), type_name);
3363 void* ptr4 = allocator.root()->Alloc(
3364 requested_size - ExtraAllocSize(allocator), type_name);
3365 allocator.root()->Free(ptr3);
3366 allocator.root()->Free(ptr4);
3367 SlotSpanMetadata<internal::ThreadSafe>* slot_span =
3368 SlotSpanMetadata<internal::ThreadSafe>::FromSlotStart(
3369 allocator.root()->ObjectToSlotStart(ptr1));
3370 EXPECT_EQ(4u, slot_span->num_unprovisioned_slots);
3371 {
3372 MockPartitionStatsDumper dumper;
3373 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3374 &dumper);
3375 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3376
3377 const PartitionBucketMemoryStats* stats =
3378 dumper.GetBucketStats(requested_size);
3379 EXPECT_TRUE(stats);
3380 EXPECT_TRUE(stats->is_valid);
3381 EXPECT_EQ(0u, stats->decommittable_bytes);
3382 EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
3383 EXPECT_EQ(requested_size * 2, stats->active_bytes);
3384 EXPECT_EQ(2 * SystemPageSize(), stats->resident_bytes);
3385 }
3386 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3387 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3388 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3389 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3390 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
3391 EXPECT_EQ(6u, slot_span->num_unprovisioned_slots);
3392
3393 allocator.root()->Free(ptr1);
3394 allocator.root()->Free(ptr2);
3395 }
3396
TEST_P(PartitionAllocTest,ActiveListMaintenance)3397 TEST_P(PartitionAllocTest, ActiveListMaintenance) {
3398 size_t size = SystemPageSize() - ExtraAllocSize(allocator);
3399 size_t real_size = size + ExtraAllocSize(allocator);
3400 size_t bucket_index =
3401 allocator.root()->SizeToBucketIndex(real_size, GetBucketDistribution());
3402 PartitionRoot<ThreadSafe>::Bucket* bucket =
3403 &allocator.root()->buckets[bucket_index];
3404 ASSERT_EQ(bucket->slot_size, real_size);
3405 size_t slots_per_span = bucket->num_system_pages_per_slot_span;
3406
3407 // Make 10 full slot spans.
3408 constexpr int kSpans = 10;
3409 std::vector<std::vector<void*>> allocated_memory_spans(kSpans);
3410 for (int span_index = 0; span_index < kSpans; span_index++) {
3411 for (size_t i = 0; i < slots_per_span; i++) {
3412 allocated_memory_spans[span_index].push_back(
3413 allocator.root()->Alloc(size, ""));
3414 }
3415 }
3416
3417 // Free one entry in the middle span, creating a partial slot span.
3418 constexpr size_t kSpanIndex = 5;
3419 allocator.root()->Free(allocated_memory_spans[kSpanIndex].back());
3420 allocated_memory_spans[kSpanIndex].pop_back();
3421
3422 // Empty the last slot span.
3423 for (void* ptr : allocated_memory_spans[kSpans - 1]) {
3424 allocator.root()->Free(ptr);
3425 }
3426 allocated_memory_spans.pop_back();
3427
3428 // The active list now is:
3429 // Partial -> Empty -> Full -> Full -> ... -> Full
3430 bucket->MaintainActiveList();
3431
3432 // Only one entry in the active list.
3433 ASSERT_NE(bucket->active_slot_spans_head,
3434 SlotSpanMetadata<ThreadSafe>::get_sentinel_slot_span());
3435 EXPECT_FALSE(bucket->active_slot_spans_head->next_slot_span);
3436
3437 // The empty list has 1 entry.
3438 ASSERT_NE(bucket->empty_slot_spans_head,
3439 SlotSpanMetadata<ThreadSafe>::get_sentinel_slot_span());
3440 EXPECT_FALSE(bucket->empty_slot_spans_head->next_slot_span);
3441
3442 // The rest are full slot spans.
3443 EXPECT_EQ(8u, bucket->num_full_slot_spans);
3444
3445 // Free all memory.
3446 for (const auto& span : allocated_memory_spans) {
3447 for (void* ptr : span) {
3448 allocator.root()->Free(ptr);
3449 }
3450 }
3451 }
3452
TEST_P(PartitionAllocTest,ReallocMovesCookie)3453 TEST_P(PartitionAllocTest, ReallocMovesCookie) {
3454 // Resize so as to be sure to hit a "resize in place" case, and ensure that
3455 // use of the entire result is compatible with the debug mode's cookie, even
3456 // when the bucket size is large enough to span more than one partition page
3457 // and we can track the "raw" size. See https://crbug.com/709271
3458 static const size_t kSize = MaxRegularSlotSpanSize();
3459 void* ptr = allocator.root()->Alloc(kSize + 1, type_name);
3460 EXPECT_TRUE(ptr);
3461
3462 memset(ptr, 0xbd, kSize + 1);
3463 ptr = allocator.root()->Realloc(ptr, kSize + 2, type_name);
3464 EXPECT_TRUE(ptr);
3465
3466 memset(ptr, 0xbd, kSize + 2);
3467 allocator.root()->Free(ptr);
3468 }
3469
TEST_P(PartitionAllocTest,SmallReallocDoesNotMoveTrailingCookie)3470 TEST_P(PartitionAllocTest, SmallReallocDoesNotMoveTrailingCookie) {
3471 // For crbug.com/781473
3472 static constexpr size_t kSize = 264;
3473 void* ptr = allocator.root()->Alloc(kSize, type_name);
3474 EXPECT_TRUE(ptr);
3475
3476 ptr = allocator.root()->Realloc(ptr, kSize + 16, type_name);
3477 EXPECT_TRUE(ptr);
3478
3479 allocator.root()->Free(ptr);
3480 }
3481
TEST_P(PartitionAllocTest,ZeroFill)3482 TEST_P(PartitionAllocTest, ZeroFill) {
3483 static constexpr size_t kAllZerosSentinel =
3484 std::numeric_limits<size_t>::max();
3485 for (size_t size : kTestSizes) {
3486 char* p = static_cast<char*>(
3487 allocator.root()->AllocWithFlags(AllocFlags::kZeroFill, size, nullptr));
3488 size_t non_zero_position = kAllZerosSentinel;
3489 for (size_t i = 0; i < size; ++i) {
3490 if (0 != p[i]) {
3491 non_zero_position = i;
3492 break;
3493 }
3494 }
3495 EXPECT_EQ(kAllZerosSentinel, non_zero_position)
3496 << "test allocation size: " << size;
3497 allocator.root()->Free(p);
3498 }
3499
3500 for (int i = 0; i < 10; ++i) {
3501 SCOPED_TRACE(i);
3502 AllocateRandomly(allocator.root(), 250, AllocFlags::kZeroFill);
3503 }
3504 }
3505
TEST_P(PartitionAllocTest,Bug_897585)3506 TEST_P(PartitionAllocTest, Bug_897585) {
3507 // Need sizes big enough to be direct mapped and a delta small enough to
3508 // allow re-use of the slot span when cookied. These numbers fall out of the
3509 // test case in the indicated bug.
3510 size_t kInitialSize = 983050;
3511 size_t kDesiredSize = 983100;
3512 ASSERT_GT(kInitialSize, kMaxBucketed);
3513 ASSERT_GT(kDesiredSize, kMaxBucketed);
3514 void* ptr = allocator.root()->AllocWithFlags(AllocFlags::kReturnNull,
3515 kInitialSize, nullptr);
3516 ASSERT_NE(nullptr, ptr);
3517 ptr = allocator.root()->ReallocWithFlags(AllocFlags::kReturnNull, ptr,
3518 kDesiredSize, nullptr);
3519 ASSERT_NE(nullptr, ptr);
3520 memset(ptr, 0xbd, kDesiredSize);
3521 allocator.root()->Free(ptr);
3522 }
3523
TEST_P(PartitionAllocTest,OverrideHooks)3524 TEST_P(PartitionAllocTest, OverrideHooks) {
3525 constexpr size_t kOverriddenSize = 1234;
3526 constexpr const char* kOverriddenType = "Overridden type";
3527 constexpr unsigned char kOverriddenChar = 'A';
3528
3529 // Marked static so that we can use them in non-capturing lambdas below.
3530 // (Non-capturing lambdas convert directly to function pointers.)
3531 static volatile bool free_called = false;
3532 static void* overridden_allocation = nullptr;
3533 overridden_allocation = malloc(kOverriddenSize);
3534 memset(overridden_allocation, kOverriddenChar, kOverriddenSize);
3535
3536 PartitionAllocHooks::SetOverrideHooks(
3537 [](void** out, unsigned int flags, size_t size,
3538 const char* type_name) -> bool {
3539 if (size == kOverriddenSize && type_name == kOverriddenType) {
3540 *out = overridden_allocation;
3541 return true;
3542 }
3543 return false;
3544 },
3545 [](void* address) -> bool {
3546 if (address == overridden_allocation) {
3547 free_called = true;
3548 return true;
3549 }
3550 return false;
3551 },
3552 [](size_t* out, void* address) -> bool {
3553 if (address == overridden_allocation) {
3554 *out = kOverriddenSize;
3555 return true;
3556 }
3557 return false;
3558 });
3559
3560 void* ptr = allocator.root()->AllocWithFlags(
3561 AllocFlags::kReturnNull, kOverriddenSize, kOverriddenType);
3562 ASSERT_EQ(ptr, overridden_allocation);
3563
3564 allocator.root()->Free(ptr);
3565 EXPECT_TRUE(free_called);
3566
3567 // overridden_allocation has not actually been freed so we can now immediately
3568 // realloc it.
3569 free_called = false;
3570 ptr = allocator.root()->ReallocWithFlags(AllocFlags::kReturnNull, ptr, 1,
3571 nullptr);
3572 ASSERT_NE(ptr, nullptr);
3573 EXPECT_NE(ptr, overridden_allocation);
3574 EXPECT_TRUE(free_called);
3575 EXPECT_EQ(*(char*)ptr, kOverriddenChar);
3576 allocator.root()->Free(ptr);
3577
3578 PartitionAllocHooks::SetOverrideHooks(nullptr, nullptr, nullptr);
3579 free(overridden_allocation);
3580 }
3581
TEST_P(PartitionAllocTest,Alignment)3582 TEST_P(PartitionAllocTest, Alignment) {
3583 std::vector<void*> allocated_ptrs;
3584
3585 for (size_t size = 1; size <= PartitionPageSize(); size <<= 1) {
3586 if (size <= ExtraAllocSize(allocator)) {
3587 continue;
3588 }
3589 size_t requested_size = size - ExtraAllocSize(allocator);
3590
3591 // All allocations which are not direct-mapped occupy contiguous slots of a
3592 // span, starting on a page boundary. This means that allocations are first
3593 // rounded up to the nearest bucket size, then have an address of the form:
3594 // (partition-page-aligned address) + i * bucket_size.
3595 //
3596 // All powers of two are bucket sizes, meaning that all power of two
3597 // allocations smaller than a page will be aligned on the allocation size.
3598 size_t expected_alignment = size;
3599 for (int index = 0; index < 3; index++) {
3600 void* ptr = allocator.root()->Alloc(requested_size, "");
3601 allocated_ptrs.push_back(ptr);
3602 EXPECT_EQ(0u,
3603 allocator.root()->ObjectToSlotStart(ptr) % expected_alignment)
3604 << (index + 1) << "-th allocation of size=" << size;
3605 }
3606 }
3607
3608 for (void* ptr : allocated_ptrs) {
3609 allocator.root()->Free(ptr);
3610 }
3611 }
3612
TEST_P(PartitionAllocTest,FundamentalAlignment)3613 TEST_P(PartitionAllocTest, FundamentalAlignment) {
3614 // See the test above for details. Essentially, checking the bucket size is
3615 // sufficient to ensure that alignment will always be respected, as long as
3616 // the fundamental alignment is <= 16 bytes.
3617 size_t fundamental_alignment = kAlignment;
3618 for (size_t size = 0; size < SystemPageSize(); size++) {
3619 // Allocate several pointers, as the first one in use in a size class will
3620 // be aligned on a page boundary.
3621 void* ptr = allocator.root()->Alloc(size, "");
3622 void* ptr2 = allocator.root()->Alloc(size, "");
3623 void* ptr3 = allocator.root()->Alloc(size, "");
3624
3625 EXPECT_EQ(UntagPtr(ptr) % fundamental_alignment, 0u);
3626 EXPECT_EQ(UntagPtr(ptr2) % fundamental_alignment, 0u);
3627 EXPECT_EQ(UntagPtr(ptr3) % fundamental_alignment, 0u);
3628
3629 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
3630 #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
3631 // The capacity(C) is slot size - ExtraAllocSize(allocator).
3632 // Since slot size is multiples of kAlignment,
3633 // C % kAlignment == (slot_size - ExtraAllocSize(allocator)) % kAlignment.
3634 // C % kAlignment == (-ExtraAllocSize(allocator)) % kAlignment.
3635 // Since kCookieSize is a multiple of kAlignment,
3636 // C % kAlignment == (-kInSlotRefCountBufferSize) % kAlignment
3637 // == (kAlignment - kInSlotRefCountBufferSize) % kAlignment.
3638 EXPECT_EQ(
3639 allocator.root()->AllocationCapacityFromSlotStart(slot_start) %
3640 fundamental_alignment,
3641 UseBRPPool() ? fundamental_alignment - kInSlotRefCountBufferSize : 0);
3642 #else
3643 EXPECT_EQ(allocator.root()->AllocationCapacityFromSlotStart(slot_start) %
3644 fundamental_alignment,
3645 -ExtraAllocSize(allocator) % fundamental_alignment);
3646 #endif
3647
3648 allocator.root()->Free(ptr);
3649 allocator.root()->Free(ptr2);
3650 allocator.root()->Free(ptr3);
3651 }
3652 }
3653
VerifyAlignment(PartitionRoot<ThreadSafe> * root,size_t size,size_t alignment)3654 void VerifyAlignment(PartitionRoot<ThreadSafe>* root,
3655 size_t size,
3656 size_t alignment) {
3657 std::vector<void*> allocated_ptrs;
3658
3659 for (int index = 0; index < 3; index++) {
3660 void* ptr = root->AlignedAllocWithFlags(0, alignment, size);
3661 ASSERT_TRUE(ptr);
3662 allocated_ptrs.push_back(ptr);
3663 EXPECT_EQ(0ull, UntagPtr(ptr) % alignment)
3664 << (index + 1) << "-th allocation of size=" << size
3665 << ", alignment=" << alignment;
3666 }
3667
3668 for (void* ptr : allocated_ptrs) {
3669 PartitionRoot<ThreadSafe>::Free(ptr);
3670 }
3671 }
3672
TEST_P(PartitionAllocTest,AlignedAllocations)3673 TEST_P(PartitionAllocTest, AlignedAllocations) {
3674 size_t alloc_sizes[] = {1,
3675 10,
3676 100,
3677 1000,
3678 10000,
3679 60000,
3680 70000,
3681 130000,
3682 500000,
3683 900000,
3684 kMaxBucketed + 1,
3685 2 * kMaxBucketed,
3686 kSuperPageSize - 2 * PartitionPageSize(),
3687 4 * kMaxBucketed};
3688 for (size_t alloc_size : alloc_sizes) {
3689 for (size_t alignment = 1; alignment <= kMaxSupportedAlignment;
3690 alignment <<= 1) {
3691 VerifyAlignment(aligned_allocator.root(), alloc_size, alignment);
3692
3693 // Verify alignment on the regular allocator only when BRP is off, or when
3694 // it's on in the "previous slot" mode. See the comment in SetUp().
3695 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
3696 BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
3697 VerifyAlignment(allocator.root(), alloc_size, alignment);
3698 #endif
3699 }
3700 }
3701 }
3702
3703 // Test that the optimized `GetSlotNumber` implementation produces valid
3704 // results.
TEST_P(PartitionAllocTest,OptimizedGetSlotNumber)3705 TEST_P(PartitionAllocTest, OptimizedGetSlotNumber) {
3706 for (size_t i = 0; i < kNumBuckets; ++i) {
3707 auto& bucket = allocator.root()->buckets[i];
3708 if (SizeToIndex(bucket.slot_size) != i) {
3709 continue;
3710 }
3711 for (size_t slot = 0, offset = 0; slot < bucket.get_slots_per_span();
3712 ++slot, offset += bucket.slot_size) {
3713 EXPECT_EQ(slot, bucket.GetSlotNumber(offset));
3714 EXPECT_EQ(slot, bucket.GetSlotNumber(offset + bucket.slot_size / 2));
3715 EXPECT_EQ(slot, bucket.GetSlotNumber(offset + bucket.slot_size - 1));
3716 }
3717 }
3718 }
3719
TEST_P(PartitionAllocTest,GetUsableSizeNull)3720 TEST_P(PartitionAllocTest, GetUsableSizeNull) {
3721 EXPECT_EQ(0ULL, PartitionRoot<ThreadSafe>::GetUsableSize(nullptr));
3722 }
3723
TEST_P(PartitionAllocTest,GetUsableSize)3724 TEST_P(PartitionAllocTest, GetUsableSize) {
3725 size_t delta = SystemPageSize() + 1;
3726 for (size_t size = 1; size <= kMinDirectMappedDownsize; size += delta) {
3727 void* ptr = allocator.root()->Alloc(size, "");
3728 EXPECT_TRUE(ptr);
3729 size_t usable_size = PartitionRoot<ThreadSafe>::GetUsableSize(ptr);
3730 size_t usable_size_with_hack =
3731 PartitionRoot<ThreadSafe>::GetUsableSizeWithMac11MallocSizeHack(ptr);
3732 #if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
3733 if (size != 32)
3734 #endif
3735 EXPECT_EQ(usable_size_with_hack, usable_size);
3736 EXPECT_LE(size, usable_size);
3737 memset(ptr, 0xDE, usable_size);
3738 // Should not crash when free the ptr.
3739 allocator.root()->Free(ptr);
3740 }
3741 }
3742
3743 #if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
TEST_P(PartitionAllocTest,GetUsableSizeWithMac11MallocSizeHack)3744 TEST_P(PartitionAllocTest, GetUsableSizeWithMac11MallocSizeHack) {
3745 allocator.root()->EnableMac11MallocSizeHackForTesting();
3746 size_t size = internal::kMac11MallocSizeHackRequestedSize;
3747 void* ptr = allocator.root()->Alloc(size, "");
3748 size_t usable_size = PartitionRoot<ThreadSafe>::GetUsableSize(ptr);
3749 size_t usable_size_with_hack =
3750 PartitionRoot<ThreadSafe>::GetUsableSizeWithMac11MallocSizeHack(ptr);
3751 EXPECT_EQ(usable_size, internal::kMac11MallocSizeHackUsableSize);
3752 EXPECT_EQ(usable_size_with_hack, size);
3753
3754 allocator.root()->Free(ptr);
3755 }
3756 #endif // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
3757
TEST_P(PartitionAllocTest,Bookkeeping)3758 TEST_P(PartitionAllocTest, Bookkeeping) {
3759 auto& root = *allocator.root();
3760
3761 EXPECT_EQ(0U, root.total_size_of_committed_pages);
3762 EXPECT_EQ(0U, root.max_size_of_committed_pages);
3763 EXPECT_EQ(0U, root.get_total_size_of_allocated_bytes());
3764 EXPECT_EQ(0U, root.get_max_size_of_allocated_bytes());
3765 EXPECT_EQ(0U, root.total_size_of_super_pages);
3766 size_t small_size = 1000;
3767
3768 // A full slot span of size 1 partition page is committed.
3769 void* ptr = root.Alloc(small_size - ExtraAllocSize(allocator), type_name);
3770 // Lazy commit commits only needed pages.
3771 size_t expected_committed_size =
3772 kUseLazyCommit ? SystemPageSize() : PartitionPageSize();
3773 size_t expected_super_pages_size = kSuperPageSize;
3774 size_t expected_max_committed_size = expected_committed_size;
3775 size_t bucket_index = SizeToIndex(small_size - ExtraAllocSize(allocator));
3776 PartitionBucket<internal::ThreadSafe>* bucket = &root.buckets[bucket_index];
3777 size_t expected_total_allocated_size = bucket->slot_size;
3778 size_t expected_max_allocated_size = expected_total_allocated_size;
3779
3780 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3781 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3782 EXPECT_EQ(expected_total_allocated_size,
3783 root.get_total_size_of_allocated_bytes());
3784 EXPECT_EQ(expected_max_allocated_size,
3785 root.get_max_size_of_allocated_bytes());
3786 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3787
3788 // Freeing memory doesn't result in decommitting pages right away.
3789 root.Free(ptr);
3790 expected_total_allocated_size = 0U;
3791 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3792 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3793 EXPECT_EQ(expected_total_allocated_size,
3794 root.get_total_size_of_allocated_bytes());
3795 EXPECT_EQ(expected_max_allocated_size,
3796 root.get_max_size_of_allocated_bytes());
3797 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3798
3799 // Allocating the same size lands it in the same slot span.
3800 ptr = root.Alloc(small_size - ExtraAllocSize(allocator), type_name);
3801 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3802 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3803 EXPECT_EQ(expected_max_allocated_size,
3804 root.get_max_size_of_allocated_bytes());
3805 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3806
3807 // Freeing memory doesn't result in decommitting pages right away.
3808 root.Free(ptr);
3809 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3810 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3811 EXPECT_EQ(expected_max_allocated_size,
3812 root.get_max_size_of_allocated_bytes());
3813 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3814
3815 // Allocating another size commits another slot span.
3816 ptr = root.Alloc(2 * small_size - ExtraAllocSize(allocator), type_name);
3817 expected_committed_size +=
3818 kUseLazyCommit ? SystemPageSize() : PartitionPageSize();
3819 expected_max_committed_size =
3820 std::max(expected_max_committed_size, expected_committed_size);
3821 expected_max_allocated_size =
3822 std::max(expected_max_allocated_size, static_cast<size_t>(2048));
3823 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3824 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3825 EXPECT_EQ(expected_max_allocated_size,
3826 root.get_max_size_of_allocated_bytes());
3827 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3828
3829 // Freeing memory doesn't result in decommitting pages right away.
3830 root.Free(ptr);
3831 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3832 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3833 EXPECT_EQ(expected_max_allocated_size,
3834 root.get_max_size_of_allocated_bytes());
3835 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3836
3837 // Single-slot slot spans...
3838 //
3839 // When the system page size is larger than 4KiB, we don't necessarily have
3840 // enough space in the superpage to store two of the largest bucketed
3841 // allocations, particularly when we reserve extra space for e.g. bitmaps.
3842 // To avoid this, we use something just below kMaxBucketed.
3843 size_t big_size = kMaxBucketed * 4 / 5 - SystemPageSize();
3844
3845 ASSERT_GT(big_size, MaxRegularSlotSpanSize());
3846 ASSERT_LE(big_size, kMaxBucketed);
3847 bucket_index = SizeToIndex(big_size - ExtraAllocSize(allocator));
3848 bucket = &root.buckets[bucket_index];
3849 // Assert the allocation doesn't fill the entire span nor entire partition
3850 // page, to make the test more interesting.
3851 ASSERT_LT(big_size, bucket->get_bytes_per_span());
3852 ASSERT_NE(big_size % PartitionPageSize(), 0U);
3853 ptr = root.Alloc(big_size - ExtraAllocSize(allocator), type_name);
3854 expected_committed_size += bucket->get_bytes_per_span();
3855 expected_max_committed_size =
3856 std::max(expected_max_committed_size, expected_committed_size);
3857 expected_total_allocated_size += bucket->get_bytes_per_span();
3858 expected_max_allocated_size =
3859 std::max(expected_max_allocated_size, expected_total_allocated_size);
3860 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3861 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3862 EXPECT_EQ(expected_total_allocated_size,
3863 root.get_total_size_of_allocated_bytes());
3864 EXPECT_EQ(expected_max_allocated_size,
3865 root.get_max_size_of_allocated_bytes());
3866 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3867
3868 // Allocating 2nd time doesn't overflow the super page...
3869 void* ptr2 = root.Alloc(big_size - ExtraAllocSize(allocator), type_name);
3870 expected_committed_size += bucket->get_bytes_per_span();
3871 expected_max_committed_size =
3872 std::max(expected_max_committed_size, expected_committed_size);
3873 expected_total_allocated_size += bucket->get_bytes_per_span();
3874 expected_max_allocated_size =
3875 std::max(expected_max_allocated_size, expected_total_allocated_size);
3876 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3877 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3878 EXPECT_EQ(expected_total_allocated_size,
3879 root.get_total_size_of_allocated_bytes());
3880 EXPECT_EQ(expected_max_allocated_size,
3881 root.get_max_size_of_allocated_bytes());
3882 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3883
3884 // ... but 3rd time does.
3885 void* ptr3 = root.Alloc(big_size - ExtraAllocSize(allocator), type_name);
3886 expected_committed_size += bucket->get_bytes_per_span();
3887 expected_max_committed_size =
3888 std::max(expected_max_committed_size, expected_committed_size);
3889 expected_total_allocated_size += bucket->get_bytes_per_span();
3890 expected_max_allocated_size =
3891 std::max(expected_max_allocated_size, expected_total_allocated_size);
3892 expected_super_pages_size += kSuperPageSize;
3893 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3894 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3895 EXPECT_EQ(expected_total_allocated_size,
3896 root.get_total_size_of_allocated_bytes());
3897 EXPECT_EQ(expected_max_allocated_size,
3898 root.get_max_size_of_allocated_bytes());
3899 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3900
3901 // Freeing memory doesn't result in decommitting pages right away.
3902 root.Free(ptr);
3903 root.Free(ptr2);
3904 root.Free(ptr3);
3905 expected_total_allocated_size -= 3 * bucket->get_bytes_per_span();
3906 expected_max_allocated_size =
3907 std::max(expected_max_allocated_size, expected_total_allocated_size);
3908 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3909 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3910 EXPECT_EQ(expected_total_allocated_size,
3911 root.get_total_size_of_allocated_bytes());
3912 EXPECT_EQ(expected_max_allocated_size,
3913 root.get_max_size_of_allocated_bytes());
3914 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3915
3916 // Now everything should be decommitted. The reserved space for super pages
3917 // stays the same and will never go away (by design).
3918 root.PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
3919 expected_committed_size = 0;
3920 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3921 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3922 EXPECT_EQ(expected_total_allocated_size,
3923 root.get_total_size_of_allocated_bytes());
3924 EXPECT_EQ(expected_max_allocated_size,
3925 root.get_max_size_of_allocated_bytes());
3926 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3927
3928 // None of the above should affect the direct map space.
3929 EXPECT_EQ(0U, root.total_size_of_direct_mapped_pages);
3930
3931 size_t huge_sizes[] = {
3932 kMaxBucketed + SystemPageSize(),
3933 kMaxBucketed + SystemPageSize() + 123,
3934 kSuperPageSize - PageAllocationGranularity(),
3935 kSuperPageSize - SystemPageSize() - PartitionPageSize(),
3936 kSuperPageSize - PartitionPageSize(),
3937 kSuperPageSize - SystemPageSize(),
3938 kSuperPageSize,
3939 kSuperPageSize + SystemPageSize(),
3940 kSuperPageSize + PartitionPageSize(),
3941 kSuperPageSize + SystemPageSize() + PartitionPageSize(),
3942 kSuperPageSize + PageAllocationGranularity(),
3943 kSuperPageSize + DirectMapAllocationGranularity(),
3944 };
3945 size_t alignments[] = {
3946 PartitionPageSize(),
3947 2 * PartitionPageSize(),
3948 kMaxSupportedAlignment / 2,
3949 kMaxSupportedAlignment,
3950 };
3951 for (size_t huge_size : huge_sizes) {
3952 ASSERT_GT(huge_size, kMaxBucketed);
3953 for (size_t alignment : alignments) {
3954 // For direct map, we commit only as many pages as needed.
3955 size_t aligned_size = partition_alloc::internal::base::bits::AlignUp(
3956 huge_size, SystemPageSize());
3957 ptr = root.AllocWithFlagsInternal(
3958 0, huge_size - ExtraAllocSize(allocator), alignment, type_name);
3959 expected_committed_size += aligned_size;
3960 expected_max_committed_size =
3961 std::max(expected_max_committed_size, expected_committed_size);
3962 expected_total_allocated_size += aligned_size;
3963 expected_max_allocated_size =
3964 std::max(expected_max_allocated_size, expected_total_allocated_size);
3965 // The total reserved map includes metadata and guard pages at the ends.
3966 // It also includes alignment. However, these would double count the first
3967 // partition page, so it needs to be subtracted.
3968 size_t surrounding_pages_size =
3969 PartitionRoot<ThreadSafe>::GetDirectMapMetadataAndGuardPagesSize() +
3970 alignment - PartitionPageSize();
3971 size_t expected_direct_map_size =
3972 partition_alloc::internal::base::bits::AlignUp(
3973 aligned_size + surrounding_pages_size,
3974 DirectMapAllocationGranularity());
3975 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3976 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3977 EXPECT_EQ(expected_total_allocated_size,
3978 root.get_total_size_of_allocated_bytes());
3979 EXPECT_EQ(expected_max_allocated_size,
3980 root.get_max_size_of_allocated_bytes());
3981 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3982 EXPECT_EQ(expected_direct_map_size,
3983 root.total_size_of_direct_mapped_pages);
3984
3985 // Freeing memory in the diret map decommits pages right away. The address
3986 // space is released for re-use too.
3987 root.Free(ptr);
3988 expected_committed_size -= aligned_size;
3989 expected_direct_map_size = 0;
3990 expected_max_committed_size =
3991 std::max(expected_max_committed_size, expected_committed_size);
3992 expected_total_allocated_size -= aligned_size;
3993 expected_max_allocated_size =
3994 std::max(expected_max_allocated_size, expected_total_allocated_size);
3995 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3996 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3997 EXPECT_EQ(expected_total_allocated_size,
3998 root.get_total_size_of_allocated_bytes());
3999 EXPECT_EQ(expected_max_allocated_size,
4000 root.get_max_size_of_allocated_bytes());
4001 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
4002 EXPECT_EQ(expected_direct_map_size,
4003 root.total_size_of_direct_mapped_pages);
4004 }
4005 }
4006 }
4007
4008 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
4009
TEST_P(PartitionAllocTest,RefCountBasic)4010 TEST_P(PartitionAllocTest, RefCountBasic) {
4011 if (!UseBRPPool()) {
4012 return;
4013 }
4014
4015 constexpr uint64_t kCookie = 0x1234567890ABCDEF;
4016 constexpr uint64_t kQuarantined = 0xEFEFEFEFEFEFEFEF;
4017
4018 size_t alloc_size = 64 - ExtraAllocSize(allocator);
4019 uint64_t* ptr1 =
4020 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
4021 EXPECT_TRUE(ptr1);
4022
4023 *ptr1 = kCookie;
4024
4025 auto* ref_count =
4026 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr1));
4027 EXPECT_TRUE(ref_count->IsAliveWithNoKnownRefs());
4028
4029 ref_count->Acquire();
4030 EXPECT_FALSE(ref_count->Release());
4031 EXPECT_TRUE(ref_count->IsAliveWithNoKnownRefs());
4032 EXPECT_EQ(*ptr1, kCookie);
4033
4034 ref_count->Acquire();
4035 EXPECT_FALSE(ref_count->IsAliveWithNoKnownRefs());
4036
4037 allocator.root()->Free(ptr1);
4038 // The allocation shouldn't be reclaimed, and its contents should be zapped.
4039 // Retag ptr1 to get its correct MTE tag.
4040 ptr1 = TagPtr(ptr1);
4041 EXPECT_NE(*ptr1, kCookie);
4042 EXPECT_EQ(*ptr1, kQuarantined);
4043
4044 // The allocator should not reuse the original slot since its reference count
4045 // doesn't equal zero.
4046 uint64_t* ptr2 =
4047 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
4048 EXPECT_NE(ptr1, ptr2);
4049 allocator.root()->Free(ptr2);
4050
4051 // When the last reference is released, the slot should become reusable.
4052 // Retag ref_count because PartitionAlloc retags ptr to enforce quarantine.
4053 ref_count = TagPtr(ref_count);
4054 EXPECT_TRUE(ref_count->Release());
4055 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr1));
4056 uint64_t* ptr3 =
4057 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
4058 EXPECT_EQ(ptr1, ptr3);
4059 allocator.root()->Free(ptr3);
4060 }
4061
RunRefCountReallocSubtest(size_t orig_size,size_t new_size)4062 void PartitionAllocTest::RunRefCountReallocSubtest(size_t orig_size,
4063 size_t new_size) {
4064 void* ptr1 = allocator.root()->Alloc(orig_size, type_name);
4065 EXPECT_TRUE(ptr1);
4066
4067 auto* ref_count1 =
4068 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr1));
4069 EXPECT_TRUE(ref_count1->IsAliveWithNoKnownRefs());
4070
4071 ref_count1->Acquire();
4072 EXPECT_FALSE(ref_count1->IsAliveWithNoKnownRefs());
4073
4074 void* ptr2 = allocator.root()->Realloc(ptr1, new_size, type_name);
4075 EXPECT_TRUE(ptr2);
4076
4077 // PartitionAlloc may retag memory areas on realloc (even if they
4078 // do not move), so recover the true tag here.
4079 ref_count1 = TagPtr(ref_count1);
4080
4081 // Re-query ref-count. It may have moved if Realloc changed the slot.
4082 auto* ref_count2 =
4083 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr2));
4084
4085 if (UntagPtr(ptr1) == UntagPtr(ptr2)) {
4086 // If the slot didn't change, ref-count should stay the same.
4087 EXPECT_EQ(ref_count1, ref_count2);
4088 EXPECT_FALSE(ref_count2->IsAliveWithNoKnownRefs());
4089
4090 EXPECT_FALSE(ref_count2->Release());
4091 } else {
4092 // If the allocation was moved to another slot, the old ref-count stayed
4093 // in the same location in memory, is no longer alive, but still has a
4094 // reference. The new ref-count is alive, but has no references.
4095 EXPECT_NE(ref_count1, ref_count2);
4096 EXPECT_FALSE(ref_count1->IsAlive());
4097 EXPECT_FALSE(ref_count1->IsAliveWithNoKnownRefs());
4098 EXPECT_TRUE(ref_count2->IsAliveWithNoKnownRefs());
4099
4100 EXPECT_TRUE(ref_count1->Release());
4101 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr1));
4102 }
4103
4104 allocator.root()->Free(ptr2);
4105 }
4106
TEST_P(PartitionAllocTest,RefCountRealloc)4107 TEST_P(PartitionAllocTest, RefCountRealloc) {
4108 if (!UseBRPPool()) {
4109 return;
4110 }
4111
4112 size_t alloc_sizes[] = {500, 5000, 50000, 400000};
4113
4114 for (size_t alloc_size : alloc_sizes) {
4115 alloc_size -= ExtraAllocSize(allocator);
4116 RunRefCountReallocSubtest(alloc_size, alloc_size - 9);
4117 RunRefCountReallocSubtest(alloc_size, alloc_size + 9);
4118 RunRefCountReallocSubtest(alloc_size, alloc_size * 2);
4119 RunRefCountReallocSubtest(alloc_size, alloc_size / 2);
4120 }
4121 }
4122
4123 int g_unretained_dangling_raw_ptr_detected_count = 0;
4124
4125 class UnretainedDanglingRawPtrTest : public PartitionAllocTest {
4126 public:
SetUp()4127 void SetUp() override {
4128 PartitionAllocTest::SetUp();
4129 g_unretained_dangling_raw_ptr_detected_count = 0;
4130 old_detected_fn_ = partition_alloc::GetUnretainedDanglingRawPtrDetectedFn();
4131
4132 partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
4133 &UnretainedDanglingRawPtrTest::DanglingRawPtrDetected);
4134 old_unretained_dangling_ptr_enabled_ =
4135 partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(true);
4136 }
TearDown()4137 void TearDown() override {
4138 partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(old_detected_fn_);
4139 partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(
4140 old_unretained_dangling_ptr_enabled_);
4141 PartitionAllocTest::TearDown();
4142 }
4143
4144 private:
DanglingRawPtrDetected(uintptr_t)4145 static void DanglingRawPtrDetected(uintptr_t) {
4146 g_unretained_dangling_raw_ptr_detected_count++;
4147 }
4148
4149 partition_alloc::DanglingRawPtrDetectedFn* old_detected_fn_;
4150 bool old_unretained_dangling_ptr_enabled_;
4151 };
4152
4153 INSTANTIATE_TEST_SUITE_P(AlternateBucketDistribution,
4154 UnretainedDanglingRawPtrTest,
4155 testing::ValuesIn(GetPartitionAllocTestParams()));
4156
TEST_P(UnretainedDanglingRawPtrTest,UnretainedDanglingPtrNoReport)4157 TEST_P(UnretainedDanglingRawPtrTest, UnretainedDanglingPtrNoReport) {
4158 if (!UseBRPPool()) {
4159 return;
4160 }
4161
4162 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
4163 EXPECT_TRUE(ptr);
4164 auto* ref_count =
4165 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4166 ref_count->Acquire();
4167 EXPECT_TRUE(ref_count->IsAlive());
4168 // Allocation is still live, so calling ReportIfDangling() should not result
4169 // in any detections.
4170 ref_count->ReportIfDangling();
4171 EXPECT_EQ(g_unretained_dangling_raw_ptr_detected_count, 0);
4172 EXPECT_FALSE(ref_count->Release());
4173 allocator.root()->Free(ptr);
4174 }
4175
TEST_P(UnretainedDanglingRawPtrTest,UnretainedDanglingPtrShouldReport)4176 TEST_P(UnretainedDanglingRawPtrTest, UnretainedDanglingPtrShouldReport) {
4177 if (!UseBRPPool()) {
4178 return;
4179 }
4180
4181 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
4182 EXPECT_TRUE(ptr);
4183 auto* ref_count =
4184 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4185 ref_count->Acquire();
4186 EXPECT_TRUE(ref_count->IsAlive());
4187 allocator.root()->Free(ptr);
4188 // At this point, memory shouldn't be alive...
4189 EXPECT_FALSE(ref_count->IsAlive());
4190 // ...and we should report the ptr as dangling.
4191 ref_count->ReportIfDangling();
4192 EXPECT_EQ(g_unretained_dangling_raw_ptr_detected_count, 1);
4193 EXPECT_TRUE(ref_count->Release());
4194
4195 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4196 }
4197
4198 #if !BUILDFLAG(HAS_64_BIT_POINTERS)
TEST_P(PartitionAllocTest,BackupRefPtrGuardRegion)4199 TEST_P(PartitionAllocTest, BackupRefPtrGuardRegion) {
4200 if (!UseBRPPool()) {
4201 return;
4202 }
4203
4204 size_t alignment = internal::PageAllocationGranularity();
4205
4206 uintptr_t requested_address;
4207 memset(&requested_address, internal::kQuarantinedByte,
4208 sizeof(requested_address));
4209 requested_address = RoundDownToPageAllocationGranularity(requested_address);
4210
4211 uintptr_t allocated_address =
4212 AllocPages(requested_address, alignment, alignment,
4213 PageAccessibilityConfiguration(
4214 PageAccessibilityConfiguration::kReadWrite),
4215 PageTag::kPartitionAlloc);
4216 EXPECT_NE(allocated_address, requested_address);
4217
4218 if (allocated_address) {
4219 FreePages(allocated_address, alignment);
4220 }
4221 }
4222 #endif // !BUILDFLAG(HAS_64_BIT_POINTERS)
4223 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
4224
4225 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
4226
4227 // Allocate memory, and reference it from 3 raw_ptr. Among them 2 will be
4228 // dangling.
TEST_P(PartitionAllocTest,DanglingPtr)4229 TEST_P(PartitionAllocTest, DanglingPtr) {
4230 if (!UseBRPPool()) {
4231 return;
4232 }
4233
4234 CountDanglingRawPtr dangling_checks;
4235
4236 // Allocate memory, and reference it from 3 raw_ptr.
4237 uint64_t* ptr = static_cast<uint64_t*>(
4238 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name));
4239 auto* ref_count =
4240 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4241 ref_count->Acquire();
4242 ref_count->Acquire();
4243 ref_count->Acquire();
4244 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4245 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4246
4247 // The first raw_ptr stops referencing it, before the memory has been
4248 // released.
4249 EXPECT_FALSE(ref_count->Release());
4250 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4251 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4252
4253 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
4254 // Free it. This creates two dangling pointer.
4255 allocator.root()->Free(ptr);
4256 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4257 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4258
4259 // The dangling raw_ptr stop referencing it.
4260 EXPECT_FALSE(ref_count->Release());
4261 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4262 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4263
4264 // The dangling raw_ptr stop referencing it again.
4265 EXPECT_TRUE(ref_count->Release());
4266 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4267 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4268 #else
4269 // Free it. This creates two dangling pointer.
4270 allocator.root()->Free(ptr);
4271 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4272 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4273
4274 // The dangling raw_ptr stop referencing it.
4275 EXPECT_FALSE(ref_count->Release());
4276 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4277 EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
4278
4279 // The dangling raw_ptr stop referencing it again.
4280 EXPECT_TRUE(ref_count->Release());
4281 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4282 EXPECT_EQ(g_dangling_raw_ptr_released_count, 2);
4283 #endif
4284
4285 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4286 }
4287
4288 // Allocate memory, and reference it from 3
4289 // raw_ptr<T, DisableDanglingPtrDetection>. Among them 2 will be dangling. This
4290 // doesn't trigger any dangling raw_ptr checks.
TEST_P(PartitionAllocTest,DanglingDanglingPtr)4291 TEST_P(PartitionAllocTest, DanglingDanglingPtr) {
4292 if (!UseBRPPool()) {
4293 return;
4294 }
4295
4296 CountDanglingRawPtr dangling_checks;
4297
4298 // Allocate memory, and reference it from 3 raw_ptr.
4299 uint64_t* ptr = static_cast<uint64_t*>(
4300 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name));
4301 auto* ref_count =
4302 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4303 ref_count->AcquireFromUnprotectedPtr();
4304 ref_count->AcquireFromUnprotectedPtr();
4305 ref_count->AcquireFromUnprotectedPtr();
4306 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4307 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4308
4309 // The first raw_ptr<T, DisableDanglingPtrDetection> stops referencing it,
4310 // before the memory has been released.
4311 EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
4312 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4313 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4314
4315 // Free it. This creates two dangling raw_ptr<T, DisableDanglingPtrDetection>.
4316 allocator.root()->Free(ptr);
4317 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4318 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4319
4320 // The dangling raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
4321 EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
4322 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4323 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4324
4325 // The dangling raw_ptr<T, DisableDanglingPtrDetection> stop referencing it
4326 // again.
4327 EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
4328 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4329 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4330
4331 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4332 }
4333
4334 // When 'free' is called, it remain one raw_ptr<> and one
4335 // raw_ptr<T, DisableDanglingPtrDetection>. The raw_ptr<> is released first.
TEST_P(PartitionAllocTest,DanglingMixedReleaseRawPtrFirst)4336 TEST_P(PartitionAllocTest, DanglingMixedReleaseRawPtrFirst) {
4337 if (!UseBRPPool()) {
4338 return;
4339 }
4340
4341 CountDanglingRawPtr dangling_checks;
4342
4343 uint64_t* ptr = static_cast<uint64_t*>(
4344 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name));
4345 auto* ref_count =
4346 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4347 // Acquire a raw_ptr<T, DisableDanglingPtrDetection> and a raw_ptr<>.
4348 ref_count->AcquireFromUnprotectedPtr();
4349 ref_count->Acquire();
4350 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4351 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4352
4353 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
4354 // Free it.
4355 allocator.root()->Free(ptr);
4356 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4357 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4358
4359 // The raw_ptr<> stops referencing it.
4360 EXPECT_FALSE(ref_count->Release());
4361 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4362 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4363
4364 // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
4365 EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
4366 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4367 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4368 #else
4369 // Free it.
4370 allocator.root()->Free(ptr);
4371 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4372 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4373
4374 // The raw_ptr<> stops referencing it.
4375 EXPECT_FALSE(ref_count->Release());
4376 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4377 EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
4378
4379 // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
4380 EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
4381 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4382 EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
4383 #endif
4384
4385 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4386 }
4387
4388 // When 'free' is called, it remain one raw_ptr<> and one
4389 // raw_ptr<T, DisableDanglingPtrDetection>.
4390 // The raw_ptr<T, DisableDanglingPtrDetection> is released first. This
4391 // triggers the dangling raw_ptr<> checks.
TEST_P(PartitionAllocTest,DanglingMixedReleaseDanglingPtrFirst)4392 TEST_P(PartitionAllocTest, DanglingMixedReleaseDanglingPtrFirst) {
4393 if (!UseBRPPool()) {
4394 return;
4395 }
4396
4397 CountDanglingRawPtr dangling_checks;
4398
4399 void* ptr =
4400 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4401 auto* ref_count =
4402 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4403 // Acquire a raw_ptr<T, DisableDanglingPtrDetection> and a raw_ptr<>.
4404 ref_count->AcquireFromUnprotectedPtr();
4405 ref_count->Acquire();
4406 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4407 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4408
4409 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
4410 // Free it.
4411 allocator.root()->Free(ptr);
4412 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4413 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4414
4415 // The raw_ptr<> stops referencing it.
4416 EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
4417 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4418 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4419
4420 // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
4421 EXPECT_TRUE(ref_count->Release());
4422 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4423 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4424 #else
4425 // Free it.
4426 allocator.root()->Free(ptr);
4427 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4428 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4429
4430 // The raw_ptr<> stops referencing it.
4431 EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
4432 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4433 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4434
4435 // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
4436 EXPECT_TRUE(ref_count->Release());
4437 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4438 EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
4439 #endif
4440
4441 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4442 }
4443
4444 // When 'free' is called, it remains one
4445 // raw_ptr<T, DisableDanglingPtrDetection>, then it is used to acquire one
4446 // dangling raw_ptr<>. Release the raw_ptr<> first.
TEST_P(PartitionAllocTest,DanglingPtrUsedToAcquireNewRawPtr)4447 TEST_P(PartitionAllocTest, DanglingPtrUsedToAcquireNewRawPtr) {
4448 if (!UseBRPPool()) {
4449 return;
4450 }
4451
4452 CountDanglingRawPtr dangling_checks;
4453
4454 void* ptr =
4455 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4456 auto* ref_count =
4457 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4458 // Acquire a raw_ptr<T, DisableDanglingPtrDetection>.
4459 ref_count->AcquireFromUnprotectedPtr();
4460 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4461 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4462
4463 // Free it once.
4464 allocator.root()->Free(ptr);
4465 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4466 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4467
4468 // A raw_ptr<> starts referencing it.
4469 ref_count->Acquire();
4470 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4471 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4472
4473 // The raw_ptr<> stops referencing it.
4474 EXPECT_FALSE(ref_count->Release());
4475 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4476 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4477
4478 // The raw_ptr<T, DisableDanglingPtrDetection> stops referencing it.
4479 EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
4480 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4481 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4482
4483 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4484 }
4485
4486 // Same as 'DanglingPtrUsedToAcquireNewRawPtr', but release the
4487 // raw_ptr<T, DisableDanglingPtrDetection> before the raw_ptr<>.
TEST_P(PartitionAllocTest,DanglingPtrUsedToAcquireNewRawPtrVariant)4488 TEST_P(PartitionAllocTest, DanglingPtrUsedToAcquireNewRawPtrVariant) {
4489 if (!UseBRPPool()) {
4490 return;
4491 }
4492
4493 CountDanglingRawPtr dangling_checks;
4494
4495 void* ptr =
4496 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4497 auto* ref_count =
4498 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4499 // Acquire a raw_ptr<T, DisableDanglingPtrDetection>.
4500 ref_count->AcquireFromUnprotectedPtr();
4501 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4502 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4503
4504 // Free it.
4505 allocator.root()->Free(ptr);
4506 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4507 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4508
4509 // A raw_ptr<> starts referencing it.
4510 ref_count->Acquire();
4511 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4512 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4513
4514 // The raw_ptr<> stops referencing it.
4515 EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
4516 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4517 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4518
4519 // The raw_ptr<T, DisableDanglingPtrDetection> stops referencing it.
4520 EXPECT_TRUE(ref_count->Release());
4521 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4522 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4523
4524 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4525 }
4526
4527 // Acquire a raw_ptr<T>, and release it before freeing memory. In the
4528 // background, there is one raw_ptr<T, DisableDanglingPtrDetection>. This
4529 // doesn't trigger any dangling raw_ptr<T> checks.
TEST_P(PartitionAllocTest,RawPtrReleasedBeforeFree)4530 TEST_P(PartitionAllocTest, RawPtrReleasedBeforeFree) {
4531 if (!UseBRPPool()) {
4532 return;
4533 }
4534
4535 CountDanglingRawPtr dangling_checks;
4536
4537 void* ptr =
4538 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4539 auto* ref_count =
4540 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4541 // Acquire a raw_ptr<T, DisableDanglingPtrDetection> and a raw_ptr<>.
4542 ref_count->Acquire();
4543 ref_count->AcquireFromUnprotectedPtr();
4544 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4545 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4546
4547 // Release the raw_ptr<>.
4548 EXPECT_FALSE(ref_count->Release());
4549 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4550 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4551
4552 // Free it.
4553 allocator.root()->Free(ptr);
4554 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4555 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4556
4557 // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
4558 EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
4559 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4560 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4561
4562 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4563 }
4564
4565 #if defined(PA_HAS_DEATH_TESTS)
4566 // DCHECK message are stripped in official build. It causes death tests with
4567 // matchers to fail.
4568 #if !defined(OFFICIAL_BUILD) || !defined(NDEBUG)
4569
4570 // Acquire() once, Release() twice => CRASH
TEST_P(PartitionAllocDeathTest,ReleaseUnderflowRawPtr)4571 TEST_P(PartitionAllocDeathTest, ReleaseUnderflowRawPtr) {
4572 if (!UseBRPPool()) {
4573 return;
4574 }
4575
4576 void* ptr =
4577 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4578 auto* ref_count =
4579 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4580 ref_count->Acquire();
4581 EXPECT_FALSE(ref_count->Release());
4582 EXPECT_DCHECK_DEATH(ref_count->Release());
4583 allocator.root()->Free(ptr);
4584 }
4585
4586 // AcquireFromUnprotectedPtr() once, ReleaseFromUnprotectedPtr() twice => CRASH
TEST_P(PartitionAllocDeathTest,ReleaseUnderflowDanglingPtr)4587 TEST_P(PartitionAllocDeathTest, ReleaseUnderflowDanglingPtr) {
4588 if (!UseBRPPool()) {
4589 return;
4590 }
4591
4592 void* ptr =
4593 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4594 auto* ref_count =
4595 PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
4596 ref_count->AcquireFromUnprotectedPtr();
4597 EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
4598 EXPECT_DCHECK_DEATH(ref_count->ReleaseFromUnprotectedPtr());
4599 allocator.root()->Free(ptr);
4600 }
4601
4602 #endif //! defined(OFFICIAL_BUILD) || !defined(NDEBUG)
4603 #endif // defined(PA_HAS_DEATH_TESTS)
4604 #endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
4605
TEST_P(PartitionAllocTest,ReservationOffset)4606 TEST_P(PartitionAllocTest, ReservationOffset) {
4607 // For normal buckets, offset should be kOffsetTagNormalBuckets.
4608 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
4609 EXPECT_TRUE(ptr);
4610 uintptr_t address = UntagPtr(ptr);
4611 EXPECT_EQ(kOffsetTagNormalBuckets, *ReservationOffsetPointer(address));
4612 allocator.root()->Free(ptr);
4613
4614 // For direct-map,
4615 size_t large_size = kSuperPageSize * 5 + PartitionPageSize() * .5f;
4616 ASSERT_GT(large_size, kMaxBucketed);
4617 ptr = allocator.root()->Alloc(large_size, type_name);
4618 EXPECT_TRUE(ptr);
4619 address = UntagPtr(ptr);
4620 EXPECT_EQ(0U, *ReservationOffsetPointer(address));
4621 EXPECT_EQ(1U, *ReservationOffsetPointer(address + kSuperPageSize));
4622 EXPECT_EQ(2U, *ReservationOffsetPointer(address + kSuperPageSize * 2));
4623 EXPECT_EQ(3U, *ReservationOffsetPointer(address + kSuperPageSize * 3));
4624 EXPECT_EQ(4U, *ReservationOffsetPointer(address + kSuperPageSize * 4));
4625 EXPECT_EQ(5U, *ReservationOffsetPointer(address + kSuperPageSize * 5));
4626
4627 // In-place realloc doesn't affect the offsets.
4628 void* new_ptr = allocator.root()->Realloc(ptr, large_size * .8, type_name);
4629 EXPECT_EQ(new_ptr, ptr);
4630 EXPECT_EQ(0U, *ReservationOffsetPointer(address));
4631 EXPECT_EQ(1U, *ReservationOffsetPointer(address + kSuperPageSize));
4632 EXPECT_EQ(2U, *ReservationOffsetPointer(address + kSuperPageSize * 2));
4633 EXPECT_EQ(3U, *ReservationOffsetPointer(address + kSuperPageSize * 3));
4634 EXPECT_EQ(4U, *ReservationOffsetPointer(address + kSuperPageSize * 4));
4635 EXPECT_EQ(5U, *ReservationOffsetPointer(address + kSuperPageSize * 5));
4636
4637 allocator.root()->Free(ptr);
4638 // After free, the offsets must be kOffsetTagNotAllocated.
4639 EXPECT_EQ(kOffsetTagNotAllocated, *ReservationOffsetPointer(address));
4640 EXPECT_EQ(kOffsetTagNotAllocated,
4641 *ReservationOffsetPointer(address + kSuperPageSize));
4642 EXPECT_EQ(kOffsetTagNotAllocated,
4643 *ReservationOffsetPointer(address + kSuperPageSize * 2));
4644 EXPECT_EQ(kOffsetTagNotAllocated,
4645 *ReservationOffsetPointer(address + kSuperPageSize * 3));
4646 EXPECT_EQ(kOffsetTagNotAllocated,
4647 *ReservationOffsetPointer(address + kSuperPageSize * 4));
4648 EXPECT_EQ(kOffsetTagNotAllocated,
4649 *ReservationOffsetPointer(address + kSuperPageSize * 5));
4650 }
4651
TEST_P(PartitionAllocTest,GetReservationStart)4652 TEST_P(PartitionAllocTest, GetReservationStart) {
4653 size_t large_size = kSuperPageSize * 3 + PartitionPageSize() * .5f;
4654 ASSERT_GT(large_size, kMaxBucketed);
4655 void* ptr = allocator.root()->Alloc(large_size, type_name);
4656 EXPECT_TRUE(ptr);
4657 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
4658 uintptr_t reservation_start = slot_start - PartitionPageSize();
4659 EXPECT_EQ(0U, reservation_start & DirectMapAllocationGranularityOffsetMask());
4660
4661 uintptr_t address = UntagPtr(ptr);
4662 for (uintptr_t a = address; a < address + large_size; ++a) {
4663 uintptr_t address2 = GetDirectMapReservationStart(a) + PartitionPageSize();
4664 EXPECT_EQ(slot_start, address2);
4665 }
4666
4667 EXPECT_EQ(reservation_start, GetDirectMapReservationStart(slot_start));
4668
4669 allocator.root()->Free(ptr);
4670 }
4671
TEST_P(PartitionAllocTest,CheckReservationType)4672 TEST_P(PartitionAllocTest, CheckReservationType) {
4673 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
4674 EXPECT_TRUE(ptr);
4675 uintptr_t address = UntagPtr(ptr);
4676 uintptr_t address_to_check = address;
4677 EXPECT_FALSE(IsReservationStart(address_to_check));
4678 EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
4679 EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
4680 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4681 address_to_check = address + kTestAllocSize - 1;
4682 EXPECT_FALSE(IsReservationStart(address_to_check));
4683 EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
4684 EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
4685 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4686 address_to_check =
4687 partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
4688 EXPECT_TRUE(IsReservationStart(address_to_check));
4689 EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
4690 EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
4691 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4692 allocator.root()->Free(ptr);
4693 // Freeing keeps a normal-bucket super page in memory.
4694 address_to_check =
4695 partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
4696 EXPECT_TRUE(IsReservationStart(address_to_check));
4697 EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
4698 EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
4699 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4700
4701 size_t large_size = 2 * kSuperPageSize;
4702 ASSERT_GT(large_size, kMaxBucketed);
4703 ptr = allocator.root()->Alloc(large_size, type_name);
4704 EXPECT_TRUE(ptr);
4705 address = UntagPtr(ptr);
4706 address_to_check = address;
4707 EXPECT_FALSE(IsReservationStart(address_to_check));
4708 EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
4709 EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
4710 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4711 address_to_check =
4712 partition_alloc::internal::base::bits::AlignUp(address, kSuperPageSize);
4713 EXPECT_FALSE(IsReservationStart(address_to_check));
4714 EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
4715 EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
4716 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4717 address_to_check = address + large_size - 1;
4718 EXPECT_FALSE(IsReservationStart(address_to_check));
4719 EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
4720 EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
4721 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4722 address_to_check =
4723 partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
4724 EXPECT_TRUE(IsReservationStart(address_to_check));
4725 EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
4726 EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
4727 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4728 allocator.root()->Free(ptr);
4729 // Freeing releases direct-map super pages.
4730 address_to_check =
4731 partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
4732 #if BUILDFLAG(PA_DCHECK_IS_ON)
4733 // Expect to DCHECK on unallocated region.
4734 EXPECT_DEATH_IF_SUPPORTED(IsReservationStart(address_to_check), "");
4735 #endif
4736 EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
4737 EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
4738 EXPECT_FALSE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4739 }
4740
4741 // Test for crash http://crbug.com/1169003.
TEST_P(PartitionAllocTest,CrossPartitionRootRealloc)4742 TEST_P(PartitionAllocTest, CrossPartitionRootRealloc) {
4743 // Size is large enough to satisfy it from a single-slot slot span
4744 size_t test_size = MaxRegularSlotSpanSize() - ExtraAllocSize(allocator);
4745 void* ptr = allocator.root()->AllocWithFlags(AllocFlags::kReturnNull,
4746 test_size, nullptr);
4747 EXPECT_TRUE(ptr);
4748
4749 // Create new root and call PurgeMemory to simulate ConfigurePartitions().
4750 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
4751 PurgeFlags::kDiscardUnusedSystemPages);
4752 auto* new_root = new PartitionRoot<ThreadSafe>({
4753 PartitionOptions::AlignedAlloc::kDisallowed,
4754 PartitionOptions::ThreadCache::kDisabled,
4755 PartitionOptions::Quarantine::kDisallowed,
4756 PartitionOptions::Cookie::kAllowed,
4757 PartitionOptions::BackupRefPtr::kDisabled,
4758 PartitionOptions::BackupRefPtrZapping::kDisabled,
4759 PartitionOptions::UseConfigurablePool::kNo,
4760 });
4761 SetDistributionForPartitionRoot(new_root, GetBucketDistribution());
4762
4763 // Realloc from |allocator.root()| into |new_root|.
4764 void* ptr2 = new_root->ReallocWithFlags(AllocFlags::kReturnNull, ptr,
4765 test_size + 1024, nullptr);
4766 EXPECT_TRUE(ptr2);
4767 PA_EXPECT_PTR_NE(ptr, ptr2);
4768 }
4769
TEST_P(PartitionAllocTest,FastPathOrReturnNull)4770 TEST_P(PartitionAllocTest, FastPathOrReturnNull) {
4771 size_t allocation_size = 64;
4772 // The very first allocation is never a fast path one, since it needs a new
4773 // super page and a new partition page.
4774 EXPECT_FALSE(allocator.root()->AllocWithFlags(
4775 AllocFlags::kFastPathOrReturnNull, allocation_size, ""));
4776 void* ptr = allocator.root()->AllocWithFlags(0, allocation_size, "");
4777 ASSERT_TRUE(ptr);
4778
4779 // Next one is, since the partition page has been activated.
4780 void* ptr2 = allocator.root()->AllocWithFlags(
4781 AllocFlags::kFastPathOrReturnNull, allocation_size, "");
4782 EXPECT_TRUE(ptr2);
4783
4784 // First allocation of a different bucket is slow.
4785 EXPECT_FALSE(allocator.root()->AllocWithFlags(
4786 AllocFlags::kFastPathOrReturnNull, 2 * allocation_size, ""));
4787
4788 size_t allocated_size = 2 * allocation_size;
4789 std::vector<void*> ptrs;
4790 while (void* new_ptr = allocator.root()->AllocWithFlags(
4791 AllocFlags::kFastPathOrReturnNull, allocation_size, "")) {
4792 ptrs.push_back(new_ptr);
4793 allocated_size += allocation_size;
4794 }
4795 EXPECT_LE(allocated_size,
4796 PartitionPageSize() * kMaxPartitionPagesPerRegularSlotSpan);
4797
4798 for (void* ptr_to_free : ptrs) {
4799 allocator.root()->FreeNoHooks(ptr_to_free);
4800 }
4801
4802 allocator.root()->FreeNoHooks(ptr);
4803 allocator.root()->FreeNoHooks(ptr2);
4804 }
4805
4806 #if defined(PA_HAS_DEATH_TESTS)
4807 // DCHECK message are stripped in official build. It causes death tests with
4808 // matchers to fail.
4809 #if !defined(OFFICIAL_BUILD) || !defined(NDEBUG)
4810
TEST_P(PartitionAllocDeathTest,CheckTriggered)4811 TEST_P(PartitionAllocDeathTest, CheckTriggered) {
4812 EXPECT_DCHECK_DEATH_WITH(PA_CHECK(5 == 7), "Check failed.*5 == 7");
4813 EXPECT_DEATH(PA_CHECK(5 == 7), "Check failed.*5 == 7");
4814 }
4815
4816 #endif // !defined(OFFICIAL_BUILD) && !defined(NDEBUG)
4817 #endif // defined(PA_HAS_DEATH_TESTS)
4818
4819 // Not on chromecast, since gtest considers extra output from itself as a test
4820 // failure:
4821 // https://ci.chromium.org/ui/p/chromium/builders/ci/Cast%20Audio%20Linux/98492/overview
4822 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && defined(PA_HAS_DEATH_TESTS) && \
4823 !BUILDFLAG(PA_IS_CASTOS)
4824
4825 namespace {
4826
FreeForTest(void * data)4827 PA_NOINLINE void FreeForTest(void* data) {
4828 free(data);
4829 }
4830
4831 class ThreadDelegateForPreforkHandler
4832 : public base::PlatformThreadForTesting::Delegate {
4833 public:
ThreadDelegateForPreforkHandler(std::atomic<bool> & please_stop,std::atomic<int> & started_threads,const int alloc_size)4834 ThreadDelegateForPreforkHandler(std::atomic<bool>& please_stop,
4835 std::atomic<int>& started_threads,
4836 const int alloc_size)
4837 : please_stop_(please_stop),
4838 started_threads_(started_threads),
4839 alloc_size_(alloc_size) {}
4840
ThreadMain()4841 void ThreadMain() override {
4842 started_threads_++;
4843 while (!please_stop_.load(std::memory_order_relaxed)) {
4844 void* ptr = malloc(alloc_size_);
4845
4846 // A simple malloc() / free() pair can be discarded by the compiler (and
4847 // is), making the test fail. It is sufficient to make |FreeForTest()| a
4848 // PA_NOINLINE function for the call to not be eliminated, but it is
4849 // required.
4850 FreeForTest(ptr);
4851 }
4852 }
4853
4854 private:
4855 std::atomic<bool>& please_stop_;
4856 std::atomic<int>& started_threads_;
4857 const int alloc_size_;
4858 };
4859
4860 } // namespace
4861
4862 // Disabled because executing it causes Gtest to show a warning in the output,
4863 // which confuses the runner on some platforms, making the test report an
4864 // "UNKNOWN" status even though it succeeded.
TEST_P(PartitionAllocTest,DISABLED_PreforkHandler)4865 TEST_P(PartitionAllocTest, DISABLED_PreforkHandler) {
4866 std::atomic<bool> please_stop;
4867 std::atomic<int> started_threads{0};
4868
4869 // Continuously allocates / frees memory, bypassing the thread cache. This
4870 // makes it likely that this thread will own the lock, and that the
4871 // EXPECT_EXIT() part will deadlock.
4872 constexpr size_t kAllocSize = ThreadCache::kLargeSizeThreshold + 1;
4873 ThreadDelegateForPreforkHandler delegate(please_stop, started_threads,
4874 kAllocSize);
4875
4876 constexpr int kThreads = 4;
4877 base::PlatformThreadHandle thread_handles[kThreads];
4878 for (auto& thread_handle : thread_handles) {
4879 base::PlatformThreadForTesting::Create(0, &delegate, &thread_handle);
4880 }
4881 // Make sure all threads are actually already running.
4882 while (started_threads != kThreads) {
4883 }
4884
4885 EXPECT_EXIT(
4886 {
4887 void* ptr = malloc(kAllocSize);
4888 FreeForTest(ptr);
4889 exit(1);
4890 },
4891 ::testing::ExitedWithCode(1), "");
4892
4893 please_stop.store(true);
4894 for (auto& thread_handle : thread_handles) {
4895 base::PlatformThreadForTesting::Join(thread_handle);
4896 }
4897 }
4898
4899 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
4900 // PA_CONFIG(HAS_DEATH_TESTS) && !BUILDFLAG(PA_IS_CASTOS)
4901
4902 // Checks the bucket index logic.
TEST_P(PartitionAllocTest,GetIndex)4903 TEST_P(PartitionAllocTest, GetIndex) {
4904 BucketIndexLookup lookup{};
4905
4906 for (size_t size = 0; size < kMaxBucketed; size++) {
4907 size_t index = BucketIndexLookup::GetIndex(size);
4908 ASSERT_GE(lookup.bucket_sizes()[index], size);
4909 }
4910
4911 // Make sure that power-of-two have exactly matching buckets.
4912 for (size_t size = (1 << (kMinBucketedOrder - 1)); size < kMaxBucketed;
4913 size <<= 1) {
4914 size_t index = BucketIndexLookup::GetIndex(size);
4915 ASSERT_EQ(lookup.bucket_sizes()[index], size);
4916 }
4917 }
4918
4919 // Used to check alignment. If the compiler understands the annotations, the
4920 // zeroing in the constructor uses aligned SIMD instructions.
TEST_P(PartitionAllocTest,MallocFunctionAnnotations)4921 TEST_P(PartitionAllocTest, MallocFunctionAnnotations) {
4922 struct TestStruct {
4923 uint64_t a = 0;
4924 uint64_t b = 0;
4925 };
4926
4927 void* buffer = Alloc(sizeof(TestStruct));
4928 // Should use "mov*a*ps" on x86_64.
4929 auto* x = new (buffer) TestStruct();
4930
4931 EXPECT_EQ(x->a, 0u);
4932 Free(buffer);
4933 }
4934
4935 // Test that the ConfigurablePool works properly.
TEST_P(PartitionAllocTest,ConfigurablePool)4936 TEST_P(PartitionAllocTest, ConfigurablePool) {
4937 EXPECT_FALSE(IsConfigurablePoolAvailable());
4938
4939 // The rest is only applicable to 64-bit mode
4940 #if defined(ARCH_CPU_64_BITS)
4941 // Repeat the test for every possible Pool size
4942 const size_t max_pool_size = PartitionAddressSpace::ConfigurablePoolMaxSize();
4943 const size_t min_pool_size = PartitionAddressSpace::ConfigurablePoolMinSize();
4944 for (size_t pool_size = max_pool_size; pool_size >= min_pool_size;
4945 pool_size /= 2) {
4946 PA_DCHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(pool_size));
4947 EXPECT_FALSE(IsConfigurablePoolAvailable());
4948 uintptr_t pool_base =
4949 AllocPages(pool_size, pool_size,
4950 PageAccessibilityConfiguration(
4951 PageAccessibilityConfiguration::kInaccessible),
4952 PageTag::kPartitionAlloc);
4953 EXPECT_NE(0u, pool_base);
4954 PartitionAddressSpace::InitConfigurablePool(pool_base, pool_size);
4955
4956 EXPECT_TRUE(IsConfigurablePoolAvailable());
4957
4958 auto* root = new PartitionRoot<ThreadSafe>({
4959 PartitionOptions::AlignedAlloc::kDisallowed,
4960 PartitionOptions::ThreadCache::kDisabled,
4961 PartitionOptions::Quarantine::kDisallowed,
4962 PartitionOptions::Cookie::kAllowed,
4963 PartitionOptions::BackupRefPtr::kDisabled,
4964 PartitionOptions::BackupRefPtrZapping::kDisabled,
4965 PartitionOptions::UseConfigurablePool::kIfAvailable,
4966 });
4967 root->UncapEmptySlotSpanMemoryForTesting();
4968 SetDistributionForPartitionRoot(root, GetBucketDistribution());
4969
4970 const size_t count = 250;
4971 std::vector<void*> allocations(count, nullptr);
4972 for (size_t i = 0; i < count; ++i) {
4973 const size_t size = kTestSizes[base::RandGenerator(kTestSizesCount)];
4974 allocations[i] = root->Alloc(size, nullptr);
4975 EXPECT_NE(nullptr, allocations[i]);
4976 // We don't Untag allocations here because MTE is disabled for
4977 // configurable pools used by V8.
4978 // https://bugs.chromium.org/p/v8/issues/detail?id=13117
4979 uintptr_t allocation_base = reinterpret_cast<uintptr_t>(allocations[i]);
4980 EXPECT_EQ(allocation_base, UntagPtr(allocations[i]));
4981 EXPECT_TRUE(allocation_base >= pool_base &&
4982 allocation_base < pool_base + pool_size);
4983 }
4984
4985 PartitionAddressSpace::UninitConfigurablePoolForTesting();
4986 FreePages(pool_base, pool_size);
4987 }
4988
4989 #endif // defined(ARCH_CPU_64_BITS)
4990 }
4991
TEST_P(PartitionAllocTest,EmptySlotSpanSizeIsCapped)4992 TEST_P(PartitionAllocTest, EmptySlotSpanSizeIsCapped) {
4993 // Use another root, since the ones from the test harness disable the empty
4994 // slot span size cap.
4995 PartitionRoot<ThreadSafe> root;
4996 root.Init({
4997 PartitionOptions::AlignedAlloc::kDisallowed,
4998 PartitionOptions::ThreadCache::kDisabled,
4999 PartitionOptions::Quarantine::kDisallowed,
5000 PartitionOptions::Cookie::kAllowed,
5001 PartitionOptions::BackupRefPtr::kDisabled,
5002 PartitionOptions::BackupRefPtrZapping::kDisabled,
5003 PartitionOptions::UseConfigurablePool::kNo,
5004 });
5005 SetDistributionForPartitionRoot(&root, GetBucketDistribution());
5006
5007 // Allocate some memory, don't free it to keep committed memory.
5008 std::vector<void*> allocated_memory;
5009 const size_t size = SystemPageSize();
5010 const size_t count = 400;
5011 for (size_t i = 0; i < count; i++) {
5012 void* ptr = root.Alloc(size, "");
5013 allocated_memory.push_back(ptr);
5014 }
5015 ASSERT_GE(root.total_size_of_committed_pages.load(std::memory_order_relaxed),
5016 size * count);
5017
5018 // To create empty slot spans, allocate from single-slot slot spans, 128kiB at
5019 // a time.
5020 std::vector<void*> single_slot_allocated_memory;
5021 constexpr size_t single_slot_count = kDefaultEmptySlotSpanRingSize - 1;
5022 const size_t single_slot_size = MaxRegularSlotSpanSize() + 1;
5023 // Make sure that even with allocation size rounding up, a single allocation
5024 // is still below the threshold.
5025 ASSERT_LT(MaxRegularSlotSpanSize() * 2,
5026 ((count * size) >> root.max_empty_slot_spans_dirty_bytes_shift));
5027 for (size_t i = 0; i < single_slot_count; i++) {
5028 void* ptr = root.Alloc(single_slot_size, "");
5029 single_slot_allocated_memory.push_back(ptr);
5030 }
5031
5032 // Free everything at once, creating as many empty slot spans as there are
5033 // allocations (since they are from single-slot slot spans).
5034 for (void* ptr : single_slot_allocated_memory) {
5035 root.Free(ptr);
5036 }
5037
5038 // Still have some committed empty slot spans.
5039 // PA_TS_UNCHECKED_READ() is not an issue here, since everything is
5040 // single-threaded.
5041 EXPECT_GT(PA_TS_UNCHECKED_READ(root.empty_slot_spans_dirty_bytes), 0u);
5042 // But not all, as the cap triggered.
5043 EXPECT_LT(PA_TS_UNCHECKED_READ(root.empty_slot_spans_dirty_bytes),
5044 single_slot_count * single_slot_size);
5045
5046 // Nothing left after explicit purge.
5047 root.PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
5048 EXPECT_EQ(PA_TS_UNCHECKED_READ(root.empty_slot_spans_dirty_bytes), 0u);
5049
5050 for (void* ptr : allocated_memory) {
5051 root.Free(ptr);
5052 }
5053 }
5054
TEST_P(PartitionAllocTest,IncreaseEmptySlotSpanRingSize)5055 TEST_P(PartitionAllocTest, IncreaseEmptySlotSpanRingSize) {
5056 PartitionRoot<ThreadSafe> root({
5057 PartitionOptions::AlignedAlloc::kDisallowed,
5058 PartitionOptions::ThreadCache::kDisabled,
5059 PartitionOptions::Quarantine::kDisallowed,
5060 PartitionOptions::Cookie::kAllowed,
5061 PartitionOptions::BackupRefPtr::kDisabled,
5062 PartitionOptions::BackupRefPtrZapping::kDisabled,
5063 PartitionOptions::UseConfigurablePool::kIfAvailable,
5064 });
5065 root.UncapEmptySlotSpanMemoryForTesting();
5066 SetDistributionForPartitionRoot(&root, GetBucketDistribution());
5067
5068 std::vector<void*> single_slot_allocated_memory;
5069 constexpr size_t single_slot_count = kDefaultEmptySlotSpanRingSize + 10;
5070 const size_t single_slot_size = MaxRegularSlotSpanSize() + 1;
5071 const size_t bucket_size =
5072 root.buckets[SizeToIndex(single_slot_size)].slot_size;
5073
5074 for (size_t i = 0; i < single_slot_count; i++) {
5075 void* ptr = root.Alloc(single_slot_size, "");
5076 single_slot_allocated_memory.push_back(ptr);
5077 }
5078
5079 // Free everything at once, creating as many empty slot spans as there are
5080 // allocations (since they are from single-slot slot spans).
5081 for (void* ptr : single_slot_allocated_memory) {
5082 root.Free(ptr);
5083 }
5084 single_slot_allocated_memory.clear();
5085
5086 // Some of the free()-s above overflowed the slot span ring.
5087 EXPECT_EQ(PA_TS_UNCHECKED_READ(root.empty_slot_spans_dirty_bytes),
5088 kDefaultEmptySlotSpanRingSize * bucket_size);
5089
5090 // Now can cache more slot spans.
5091 root.EnableLargeEmptySlotSpanRing();
5092
5093 constexpr size_t single_slot_large_count = kDefaultEmptySlotSpanRingSize + 10;
5094 for (size_t i = 0; i < single_slot_large_count; i++) {
5095 void* ptr = root.Alloc(single_slot_size, "");
5096 single_slot_allocated_memory.push_back(ptr);
5097 }
5098
5099 for (void* ptr : single_slot_allocated_memory) {
5100 root.Free(ptr);
5101 }
5102 single_slot_allocated_memory.clear();
5103
5104 // No overflow this time.
5105 EXPECT_EQ(PA_TS_UNCHECKED_READ(root.empty_slot_spans_dirty_bytes),
5106 single_slot_large_count * bucket_size);
5107
5108 constexpr size_t single_slot_too_many_count = kMaxFreeableSpans + 10;
5109 for (size_t i = 0; i < single_slot_too_many_count; i++) {
5110 void* ptr = root.Alloc(single_slot_size, "");
5111 single_slot_allocated_memory.push_back(ptr);
5112 }
5113
5114 for (void* ptr : single_slot_allocated_memory) {
5115 root.Free(ptr);
5116 }
5117 single_slot_allocated_memory.clear();
5118
5119 // Overflow still works.
5120 EXPECT_EQ(PA_TS_UNCHECKED_READ(root.empty_slot_spans_dirty_bytes),
5121 kMaxFreeableSpans * bucket_size);
5122 }
5123
5124 #if BUILDFLAG(PA_IS_CAST_ANDROID) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
5125 extern "C" {
5126 void* __real_malloc(size_t);
5127 } // extern "C"
5128
TEST_P(PartitionAllocTest,HandleMixedAllocations)5129 TEST_P(PartitionAllocTest, HandleMixedAllocations) {
5130 void* ptr = __real_malloc(12);
5131 // Should not crash, no test assertion.
5132 free(ptr);
5133 }
5134 #endif
5135
TEST_P(PartitionAllocTest,SortFreelist)5136 TEST_P(PartitionAllocTest, SortFreelist) {
5137 const size_t count = 100;
5138 const size_t allocation_size = 1;
5139 void* first_ptr = allocator.root()->Alloc(allocation_size, "");
5140
5141 std::vector<void*> allocations;
5142 for (size_t i = 0; i < count; ++i) {
5143 allocations.push_back(allocator.root()->Alloc(allocation_size, ""));
5144 }
5145
5146 // Shuffle and free memory out of order.
5147 std::random_device rd;
5148 std::mt19937 generator(rd());
5149 std::shuffle(allocations.begin(), allocations.end(), generator);
5150
5151 // Keep one allocation alive (first_ptr), so that the SlotSpan is not fully
5152 // empty.
5153 for (void* ptr : allocations) {
5154 allocator.root()->Free(ptr);
5155 }
5156 allocations.clear();
5157
5158 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
5159
5160 size_t bucket_index =
5161 SizeToIndex(allocation_size + ExtraAllocSize(allocator));
5162 auto& bucket = allocator.root()->buckets[bucket_index];
5163 EXPECT_TRUE(bucket.active_slot_spans_head->freelist_is_sorted());
5164
5165 // Can sort again.
5166 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
5167 EXPECT_TRUE(bucket.active_slot_spans_head->freelist_is_sorted());
5168
5169 for (size_t i = 0; i < count; ++i) {
5170 allocations.push_back(allocator.root()->Alloc(allocation_size, ""));
5171 // Allocating keeps the freelist sorted.
5172 EXPECT_TRUE(bucket.active_slot_spans_head->freelist_is_sorted());
5173 }
5174
5175 // Check that it is sorted.
5176 for (size_t i = 1; i < allocations.size(); i++) {
5177 EXPECT_LT(UntagPtr(allocations[i - 1]), UntagPtr(allocations[i]));
5178 }
5179
5180 for (void* ptr : allocations) {
5181 allocator.root()->Free(ptr);
5182 // Free()-ing memory destroys order. Not looking at the head of the active
5183 // list, as it is not necessarily the one from which |ptr| came from.
5184 auto* slot_span =
5185 SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
5186 EXPECT_FALSE(slot_span->freelist_is_sorted());
5187 }
5188
5189 allocator.root()->Free(first_ptr);
5190 }
5191
5192 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_LINUX) && \
5193 defined(ARCH_CPU_64_BITS)
TEST_P(PartitionAllocTest,CrashOnUnknownPointer)5194 TEST_P(PartitionAllocTest, CrashOnUnknownPointer) {
5195 int not_a_heap_object = 42;
5196 EXPECT_DEATH(allocator.root()->Free(¬_a_heap_object), "");
5197 }
5198 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
5199 // BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_64_BITS)
5200
5201 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_MAC)
5202
5203 // Adapted from crashpad tests.
5204 class ScopedOpenCLNoOpKernel {
5205 public:
ScopedOpenCLNoOpKernel()5206 ScopedOpenCLNoOpKernel()
5207 : context_(nullptr),
5208 program_(nullptr),
5209 kernel_(nullptr),
5210 success_(false) {}
5211
5212 ScopedOpenCLNoOpKernel(const ScopedOpenCLNoOpKernel&) = delete;
5213 ScopedOpenCLNoOpKernel& operator=(const ScopedOpenCLNoOpKernel&) = delete;
5214
~ScopedOpenCLNoOpKernel()5215 ~ScopedOpenCLNoOpKernel() {
5216 if (kernel_) {
5217 cl_int rv = clReleaseKernel(kernel_);
5218 EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseKernel";
5219 }
5220
5221 if (program_) {
5222 cl_int rv = clReleaseProgram(program_);
5223 EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseProgram";
5224 }
5225
5226 if (context_) {
5227 cl_int rv = clReleaseContext(context_);
5228 EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseContext";
5229 }
5230 }
5231
SetUp()5232 void SetUp() {
5233 cl_platform_id platform_id;
5234 cl_int rv = clGetPlatformIDs(1, &platform_id, nullptr);
5235 ASSERT_EQ(rv, CL_SUCCESS) << "clGetPlatformIDs";
5236 cl_device_id device_id;
5237 rv =
5238 clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_CPU, 1, &device_id, nullptr);
5239 #if defined(ARCH_CPU_ARM64)
5240 // CL_DEVICE_TYPE_CPU doesn’t seem to work at all on arm64, meaning that
5241 // these weird OpenCL modules probably don’t show up there at all. Keep this
5242 // test even on arm64 in case this ever does start working.
5243 if (rv == CL_INVALID_VALUE) {
5244 return;
5245 }
5246 #endif // ARCH_CPU_ARM64
5247 ASSERT_EQ(rv, CL_SUCCESS) << "clGetDeviceIDs";
5248
5249 context_ = clCreateContext(nullptr, 1, &device_id, nullptr, nullptr, &rv);
5250 ASSERT_EQ(rv, CL_SUCCESS) << "clCreateContext";
5251
5252 const char* sources[] = {
5253 "__kernel void NoOp(void) {barrier(CLK_LOCAL_MEM_FENCE);}",
5254 };
5255 const size_t source_lengths[] = {
5256 strlen(sources[0]),
5257 };
5258 static_assert(std::size(sources) == std::size(source_lengths),
5259 "arrays must be parallel");
5260
5261 program_ = clCreateProgramWithSource(context_, std::size(sources), sources,
5262 source_lengths, &rv);
5263 ASSERT_EQ(rv, CL_SUCCESS) << "clCreateProgramWithSource";
5264
5265 rv = clBuildProgram(program_, 1, &device_id, "-cl-opt-disable", nullptr,
5266 nullptr);
5267 ASSERT_EQ(rv, CL_SUCCESS) << "clBuildProgram";
5268
5269 kernel_ = clCreateKernel(program_, "NoOp", &rv);
5270 ASSERT_EQ(rv, CL_SUCCESS) << "clCreateKernel";
5271
5272 success_ = true;
5273 }
5274
success() const5275 bool success() const { return success_; }
5276
5277 private:
5278 cl_context context_;
5279 cl_program program_;
5280 cl_kernel kernel_;
5281 bool success_;
5282 };
5283
5284 // On macOS 10.11, allocations are made with PartitionAlloc, but the pointer
5285 // is incorrectly passed by CoreFoundation to the previous default zone,
5286 // causing crashes. This is intended to detect these issues regressing in future
5287 // versions of macOS.
TEST_P(PartitionAllocTest,OpenCL)5288 TEST_P(PartitionAllocTest, OpenCL) {
5289 ScopedOpenCLNoOpKernel kernel;
5290 kernel.SetUp();
5291 #if !defined(ARCH_CPU_ARM64)
5292 ASSERT_TRUE(kernel.success());
5293 #endif
5294 }
5295
5296 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
5297 // BUILDFLAG(IS_MAC)
5298
TEST_P(PartitionAllocTest,SmallSlotSpanWaste)5299 TEST_P(PartitionAllocTest, SmallSlotSpanWaste) {
5300 for (PartitionRoot<ThreadSafe>::Bucket& bucket : allocator.root()->buckets) {
5301 const size_t slot_size = bucket.slot_size;
5302 if (slot_size == kInvalidBucketSize) {
5303 continue;
5304 }
5305
5306 size_t small_system_page_count =
5307 partition_alloc::internal::ComputeSystemPagesPerSlotSpan(
5308 bucket.slot_size, true);
5309 size_t small_waste =
5310 (small_system_page_count * SystemPageSize()) % slot_size;
5311
5312 EXPECT_LT(small_waste, .05 * SystemPageSize());
5313 if (slot_size <= MaxRegularSlotSpanSize()) {
5314 EXPECT_LE(small_system_page_count, MaxSystemPagesPerRegularSlotSpan());
5315 }
5316 }
5317 }
5318
TEST_P(PartitionAllocTest,SortActiveSlotSpans)5319 TEST_P(PartitionAllocTest, SortActiveSlotSpans) {
5320 auto run_test = [](size_t count) {
5321 PartitionBucket<ThreadSafe> bucket;
5322 bucket.Init(16);
5323 bucket.active_slot_spans_head = nullptr;
5324
5325 std::vector<SlotSpanMetadata<ThreadSafe>> slot_spans;
5326 slot_spans.reserve(count);
5327
5328 // Add slot spans with random freelist length.
5329 for (size_t i = 0; i < count; i++) {
5330 slot_spans.emplace_back(&bucket);
5331 auto& slot_span = slot_spans.back();
5332 slot_span.num_unprovisioned_slots =
5333 partition_alloc::internal::base::RandGenerator(
5334 bucket.get_slots_per_span() / 2);
5335 slot_span.num_allocated_slots =
5336 partition_alloc::internal::base::RandGenerator(
5337 bucket.get_slots_per_span() - slot_span.num_unprovisioned_slots);
5338 slot_span.next_slot_span = bucket.active_slot_spans_head;
5339 bucket.active_slot_spans_head = &slot_span;
5340 }
5341
5342 bucket.SortActiveSlotSpans();
5343
5344 std::set<SlotSpanMetadata<ThreadSafe>*> seen_slot_spans;
5345 std::vector<SlotSpanMetadata<ThreadSafe>*> sorted_slot_spans;
5346 for (auto* slot_span = bucket.active_slot_spans_head; slot_span;
5347 slot_span = slot_span->next_slot_span) {
5348 sorted_slot_spans.push_back(slot_span);
5349 seen_slot_spans.insert(slot_span);
5350 }
5351
5352 // None repeated, none missing.
5353 EXPECT_EQ(seen_slot_spans.size(), sorted_slot_spans.size());
5354 EXPECT_EQ(seen_slot_spans.size(), slot_spans.size());
5355
5356 // The first slot spans are sorted.
5357 size_t sorted_spans_count =
5358 std::min(PartitionBucket<ThreadSafe>::kMaxSlotSpansToSort, count);
5359 EXPECT_TRUE(std::is_sorted(sorted_slot_spans.begin(),
5360 sorted_slot_spans.begin() + sorted_spans_count,
5361 partition_alloc::internal::CompareSlotSpans));
5362
5363 // Slot spans with no freelist entries are at the end of the sorted run.
5364 auto has_empty_freelist = [](SlotSpanMetadata<ThreadSafe>* a) {
5365 return a->GetFreelistLength() == 0;
5366 };
5367 auto it = std::find_if(sorted_slot_spans.begin(),
5368 sorted_slot_spans.begin() + sorted_spans_count,
5369 has_empty_freelist);
5370 if (it != sorted_slot_spans.end()) {
5371 EXPECT_TRUE(std::all_of(it,
5372 sorted_slot_spans.begin() + sorted_spans_count,
5373 has_empty_freelist));
5374 }
5375 };
5376
5377 // Everything is sorted.
5378 run_test(PartitionBucket<ThreadSafe>::kMaxSlotSpansToSort / 2);
5379 // Only the first slot spans are sorted.
5380 run_test(PartitionBucket<ThreadSafe>::kMaxSlotSpansToSort * 2);
5381
5382 // Corner cases.
5383 run_test(0);
5384 run_test(1);
5385 }
5386
5387 #if BUILDFLAG(USE_FREESLOT_BITMAP)
TEST_P(PartitionAllocTest,FreeSlotBitmapMarkedAsUsedAfterAlloc)5388 TEST_P(PartitionAllocTest, FreeSlotBitmapMarkedAsUsedAfterAlloc) {
5389 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
5390 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
5391 EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
5392
5393 allocator.root()->Free(ptr);
5394 }
5395
TEST_P(PartitionAllocTest,FreeSlotBitmapMarkedAsFreeAfterFree)5396 TEST_P(PartitionAllocTest, FreeSlotBitmapMarkedAsFreeAfterFree) {
5397 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
5398 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
5399 EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
5400
5401 allocator.root()->Free(ptr);
5402 EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
5403 }
5404
TEST_P(PartitionAllocTest,FreeSlotBitmapResetAfterDecommit)5405 TEST_P(PartitionAllocTest, FreeSlotBitmapResetAfterDecommit) {
5406 void* ptr1 = allocator.root()->Alloc(
5407 SystemPageSize() - ExtraAllocSize(allocator), type_name);
5408 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr1);
5409 allocator.root()->Free(ptr1);
5410
5411 EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
5412 // Decommit the slot span. Bitmap will be rewritten in Decommit().
5413 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
5414 EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
5415 }
5416
TEST_P(PartitionAllocTest,FreeSlotBitmapResetAfterPurge)5417 TEST_P(PartitionAllocTest, FreeSlotBitmapResetAfterPurge) {
5418 void* ptr1 = allocator.root()->Alloc(
5419 SystemPageSize() - ExtraAllocSize(allocator), type_name);
5420 char* ptr2 = static_cast<char*>(allocator.root()->Alloc(
5421 SystemPageSize() - ExtraAllocSize(allocator), type_name));
5422 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr2);
5423 allocator.root()->Free(ptr2);
5424
5425 CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, true);
5426 EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
5427 // Bitmap will be rewritten in PartitionPurgeSlotSpan().
5428 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
5429 CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, false);
5430 EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
5431
5432 allocator.root()->Free(ptr1);
5433 }
5434
5435 #endif // BUILDFLAG(USE_FREESLOT_BITMAP)
5436
5437 } // namespace partition_alloc::internal
5438
5439 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
5440