1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/metrics/persistent_memory_allocator.h"
6
7 #include <memory>
8
9 #include "base/files/file.h"
10 #include "base/files/file_util.h"
11 #include "base/files/memory_mapped_file.h"
12 #include "base/files/scoped_temp_dir.h"
13 #include "base/memory/raw_ptr.h"
14 #include "base/memory/read_only_shared_memory_region.h"
15 #include "base/memory/shared_memory_mapping.h"
16 #include "base/memory/writable_shared_memory_region.h"
17 #include "base/metrics/histogram.h"
18 #include "base/rand_util.h"
19 #include "base/strings/safe_sprintf.h"
20 #include "base/strings/stringprintf.h"
21 #include "base/synchronization/condition_variable.h"
22 #include "base/synchronization/lock.h"
23 #include "base/threading/simple_thread.h"
24 #include "build/build_config.h"
25 #include "testing/gmock/include/gmock/gmock.h"
26
27 namespace base {
28
29 namespace {
30
31 const uint32_t TEST_MEMORY_SIZE = 1 << 20; // 1 MiB
32 const uint32_t TEST_MEMORY_PAGE = 64 << 10; // 64 KiB
33 const uint32_t TEST_ID = 12345;
34 const char TEST_NAME[] = "TestAllocator";
35
SetFileLength(const base::FilePath & path,size_t length)36 void SetFileLength(const base::FilePath& path, size_t length) {
37 {
38 File file(path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE);
39 DCHECK(file.IsValid());
40 ASSERT_TRUE(file.SetLength(static_cast<int64_t>(length)));
41 }
42
43 int64_t actual_length;
44 DCHECK(GetFileSize(path, &actual_length));
45 DCHECK_EQ(length, static_cast<size_t>(actual_length));
46 }
47
48 } // namespace
49
50 typedef PersistentMemoryAllocator::Reference Reference;
51
52 class PersistentMemoryAllocatorTest : public testing::Test {
53 public:
54 // This can't be statically initialized because it's value isn't defined
55 // in the PersistentMemoryAllocator header file. Instead, it's simply set
56 // in the constructor.
57 uint32_t kAllocAlignment;
58
59 struct TestObject1 {
60 static constexpr uint32_t kPersistentTypeId = 1;
61 static constexpr size_t kExpectedInstanceSize = 4 + 1 + 3;
62 int32_t onething;
63 char oranother;
64 };
65
66 struct TestObject2 {
67 static constexpr uint32_t kPersistentTypeId = 2;
68 static constexpr size_t kExpectedInstanceSize = 8 + 4 + 4 + 8 + 8;
69 int64_t thiis;
70 int32_t that;
71 float andthe;
72 double other;
73 char thing[8];
74 };
75
PersistentMemoryAllocatorTest()76 PersistentMemoryAllocatorTest() {
77 kAllocAlignment = GetAllocAlignment();
78 mem_segment_.reset(new char[TEST_MEMORY_SIZE]);
79 }
80
SetUp()81 void SetUp() override {
82 allocator_.reset();
83 ::memset(mem_segment_.get(), 0, TEST_MEMORY_SIZE);
84 allocator_ = std::make_unique<PersistentMemoryAllocator>(
85 mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE, TEST_ID,
86 TEST_NAME, PersistentMemoryAllocator::kReadWrite);
87 }
88
TearDown()89 void TearDown() override {
90 allocator_.reset();
91 }
92
CountIterables()93 unsigned CountIterables() {
94 PersistentMemoryAllocator::Iterator iter(allocator_.get());
95 uint32_t type;
96 unsigned count = 0;
97 while (iter.GetNext(&type) != 0) {
98 ++count;
99 }
100 return count;
101 }
102
GetAllocAlignment()103 static uint32_t GetAllocAlignment() {
104 return PersistentMemoryAllocator::kAllocAlignment;
105 }
106
107 protected:
108 std::unique_ptr<char[]> mem_segment_;
109 std::unique_ptr<PersistentMemoryAllocator> allocator_;
110 };
111
TEST_F(PersistentMemoryAllocatorTest,AllocateAndIterate)112 TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
113 allocator_->CreateTrackingHistograms(allocator_->Name());
114
115 std::string base_name(TEST_NAME);
116 EXPECT_EQ(TEST_ID, allocator_->Id());
117 EXPECT_TRUE(allocator_->used_histogram_);
118 EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".UsedPct",
119 allocator_->used_histogram_->histogram_name());
120 EXPECT_EQ(PersistentMemoryAllocator::MEMORY_INITIALIZED,
121 allocator_->GetMemoryState());
122
123 // Get base memory info for later comparison.
124 PersistentMemoryAllocator::MemoryInfo meminfo0;
125 allocator_->GetMemoryInfo(&meminfo0);
126 EXPECT_EQ(TEST_MEMORY_SIZE, meminfo0.total);
127 EXPECT_GT(meminfo0.total, meminfo0.free);
128
129 // Validate allocation of test object and make sure it can be referenced
130 // and all metadata looks correct.
131 TestObject1* obj1 = allocator_->New<TestObject1>();
132 ASSERT_TRUE(obj1);
133 Reference block1 = allocator_->GetAsReference(obj1);
134 ASSERT_NE(0U, block1);
135 EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1));
136 EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1));
137 EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1));
138 EXPECT_GT(sizeof(TestObject1) + kAllocAlignment,
139 allocator_->GetAllocSize(block1));
140 PersistentMemoryAllocator::MemoryInfo meminfo1;
141 allocator_->GetMemoryInfo(&meminfo1);
142 EXPECT_EQ(meminfo0.total, meminfo1.total);
143 EXPECT_GT(meminfo0.free, meminfo1.free);
144
145 // Verify that pointers can be turned back into references and that invalid
146 // addresses return null.
147 char* memory1 = allocator_->GetAsArray<char>(block1, 1, 1);
148 ASSERT_TRUE(memory1);
149 EXPECT_EQ(block1, allocator_->GetAsReference(memory1, 0));
150 EXPECT_EQ(block1, allocator_->GetAsReference(memory1, 1));
151 EXPECT_EQ(0U, allocator_->GetAsReference(memory1, 2));
152 EXPECT_EQ(0U, allocator_->GetAsReference(memory1 + 1, 0));
153 EXPECT_EQ(0U, allocator_->GetAsReference(memory1 + 16, 0));
154 EXPECT_EQ(0U, allocator_->GetAsReference(nullptr, 0));
155 EXPECT_EQ(0U, allocator_->GetAsReference(&base_name, 0));
156
157 // Ensure that the test-object can be made iterable.
158 PersistentMemoryAllocator::Iterator iter1a(allocator_.get());
159 EXPECT_EQ(0U, iter1a.GetLast());
160 uint32_t type;
161 EXPECT_EQ(0U, iter1a.GetNext(&type));
162 allocator_->MakeIterable(block1);
163 EXPECT_EQ(block1, iter1a.GetNext(&type));
164 EXPECT_EQ(1U, type);
165 EXPECT_EQ(block1, iter1a.GetLast());
166 EXPECT_EQ(0U, iter1a.GetNext(&type));
167 EXPECT_EQ(block1, iter1a.GetLast());
168
169 // Create second test-object and ensure everything is good and it cannot
170 // be confused with test-object of another type.
171 TestObject2* obj2 = allocator_->New<TestObject2>();
172 ASSERT_TRUE(obj2);
173 Reference block2 = allocator_->GetAsReference(obj2);
174 ASSERT_NE(0U, block2);
175 EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2));
176 EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject1>(block2));
177 EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2));
178 EXPECT_GT(sizeof(TestObject2) + kAllocAlignment,
179 allocator_->GetAllocSize(block2));
180 PersistentMemoryAllocator::MemoryInfo meminfo2;
181 allocator_->GetMemoryInfo(&meminfo2);
182 EXPECT_EQ(meminfo1.total, meminfo2.total);
183 EXPECT_GT(meminfo1.free, meminfo2.free);
184
185 // Ensure that second test-object can also be made iterable.
186 allocator_->MakeIterable(obj2);
187 EXPECT_EQ(block2, iter1a.GetNext(&type));
188 EXPECT_EQ(2U, type);
189 EXPECT_EQ(block2, iter1a.GetLast());
190 EXPECT_EQ(0U, iter1a.GetNext(&type));
191 EXPECT_EQ(block2, iter1a.GetLast());
192
193 // Check that the iterator can be reset to the beginning.
194 iter1a.Reset();
195 EXPECT_EQ(0U, iter1a.GetLast());
196 EXPECT_EQ(block1, iter1a.GetNext(&type));
197 EXPECT_EQ(block1, iter1a.GetLast());
198 EXPECT_EQ(block2, iter1a.GetNext(&type));
199 EXPECT_EQ(block2, iter1a.GetLast());
200 EXPECT_EQ(0U, iter1a.GetNext(&type));
201
202 // Check that the iterator can be reset to an arbitrary location.
203 iter1a.Reset(block1);
204 EXPECT_EQ(block1, iter1a.GetLast());
205 EXPECT_EQ(block2, iter1a.GetNext(&type));
206 EXPECT_EQ(block2, iter1a.GetLast());
207 EXPECT_EQ(0U, iter1a.GetNext(&type));
208
209 // Check that iteration can begin after an arbitrary location.
210 PersistentMemoryAllocator::Iterator iter1b(allocator_.get(), block1);
211 EXPECT_EQ(block2, iter1b.GetNext(&type));
212 EXPECT_EQ(0U, iter1b.GetNext(&type));
213
214 // Ensure nothing has gone noticably wrong.
215 EXPECT_FALSE(allocator_->IsFull());
216 EXPECT_FALSE(allocator_->IsCorrupt());
217
218 // Check the internal histogram record of used memory.
219 allocator_->UpdateTrackingHistograms();
220 std::unique_ptr<HistogramSamples> used_samples(
221 allocator_->used_histogram_->SnapshotSamples());
222 EXPECT_TRUE(used_samples);
223 EXPECT_EQ(1, used_samples->TotalCount());
224
225 // Check that an object's type can be changed.
226 EXPECT_EQ(2U, allocator_->GetType(block2));
227 allocator_->ChangeType(block2, 3, 2, false);
228 EXPECT_EQ(3U, allocator_->GetType(block2));
229 allocator_->New<TestObject2>(block2, 3, false);
230 EXPECT_EQ(2U, allocator_->GetType(block2));
231
232 // Create second allocator (read/write) using the same memory segment.
233 std::unique_ptr<PersistentMemoryAllocator> allocator2(
234 new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
235 TEST_MEMORY_PAGE, 0, "",
236 PersistentMemoryAllocator::kReadWrite));
237 EXPECT_EQ(TEST_ID, allocator2->Id());
238 EXPECT_FALSE(allocator2->used_histogram_);
239
240 // Ensure that iteration and access through second allocator works.
241 PersistentMemoryAllocator::Iterator iter2(allocator2.get());
242 EXPECT_EQ(block1, iter2.GetNext(&type));
243 EXPECT_EQ(block2, iter2.GetNext(&type));
244 EXPECT_EQ(0U, iter2.GetNext(&type));
245 EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1));
246 EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2));
247
248 // Create a third allocator (read-only) using the same memory segment.
249 std::unique_ptr<const PersistentMemoryAllocator> allocator3(
250 new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
251 TEST_MEMORY_PAGE, 0, "",
252 PersistentMemoryAllocator::kReadOnly));
253 EXPECT_EQ(TEST_ID, allocator3->Id());
254 EXPECT_FALSE(allocator3->used_histogram_);
255
256 // Ensure that iteration and access through third allocator works.
257 PersistentMemoryAllocator::Iterator iter3(allocator3.get());
258 EXPECT_EQ(block1, iter3.GetNext(&type));
259 EXPECT_EQ(block2, iter3.GetNext(&type));
260 EXPECT_EQ(0U, iter3.GetNext(&type));
261 EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1));
262 EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2));
263
264 // Ensure that GetNextOfType works.
265 PersistentMemoryAllocator::Iterator iter1c(allocator_.get());
266 EXPECT_EQ(block2, iter1c.GetNextOfType<TestObject2>());
267 EXPECT_EQ(0U, iter1c.GetNextOfType(2));
268
269 // Ensure that GetNextOfObject works.
270 PersistentMemoryAllocator::Iterator iter1d(allocator_.get());
271 EXPECT_EQ(obj2, iter1d.GetNextOfObject<TestObject2>());
272 EXPECT_EQ(nullptr, iter1d.GetNextOfObject<TestObject2>());
273
274 // Ensure that deleting an object works.
275 allocator_->Delete(obj2);
276 PersistentMemoryAllocator::Iterator iter1z(allocator_.get());
277 EXPECT_EQ(nullptr, iter1z.GetNextOfObject<TestObject2>());
278
279 // Ensure that the memory state can be set.
280 allocator_->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
281 EXPECT_EQ(PersistentMemoryAllocator::MEMORY_DELETED,
282 allocator_->GetMemoryState());
283 }
284
TEST_F(PersistentMemoryAllocatorTest,PageTest)285 TEST_F(PersistentMemoryAllocatorTest, PageTest) {
286 // This allocation will go into the first memory page.
287 Reference block1 = allocator_->Allocate(TEST_MEMORY_PAGE / 2, 1);
288 EXPECT_LT(0U, block1);
289 EXPECT_GT(TEST_MEMORY_PAGE, block1);
290
291 // This allocation won't fit in same page as previous block.
292 Reference block2 =
293 allocator_->Allocate(TEST_MEMORY_PAGE - 2 * kAllocAlignment, 2);
294 EXPECT_EQ(TEST_MEMORY_PAGE, block2);
295
296 // This allocation will also require a new page.
297 Reference block3 = allocator_->Allocate(2 * kAllocAlignment + 99, 3);
298 EXPECT_EQ(2U * TEST_MEMORY_PAGE, block3);
299 }
300
301 // A simple thread that takes an allocator and repeatedly allocates random-
302 // sized chunks from it until no more can be done.
303 class AllocatorThread : public SimpleThread {
304 public:
AllocatorThread(const std::string & name,void * base,uint32_t size,uint32_t page_size)305 AllocatorThread(const std::string& name,
306 void* base,
307 uint32_t size,
308 uint32_t page_size)
309 : SimpleThread(name, Options()),
310 count_(0),
311 iterable_(0),
312 allocator_(base,
313 size,
314 page_size,
315 0,
316 "",
317 PersistentMemoryAllocator::kReadWrite) {}
318
Run()319 void Run() override {
320 for (;;) {
321 uint32_t size = RandInt(1, 99);
322 uint32_t type = RandInt(100, 999);
323 Reference block = allocator_.Allocate(size, type);
324 if (!block)
325 break;
326
327 count_++;
328 if (RandInt(0, 1)) {
329 allocator_.MakeIterable(block);
330 iterable_++;
331 }
332 }
333 }
334
iterable()335 unsigned iterable() { return iterable_; }
count()336 unsigned count() { return count_; }
337
338 private:
339 unsigned count_;
340 unsigned iterable_;
341 PersistentMemoryAllocator allocator_;
342 };
343
344 // Test parallel allocation/iteration and ensure consistency across all
345 // instances.
TEST_F(PersistentMemoryAllocatorTest,ParallelismTest)346 TEST_F(PersistentMemoryAllocatorTest, ParallelismTest) {
347 void* memory = mem_segment_.get();
348 AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
349 AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
350 AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
351 AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
352 AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
353
354 t1.Start();
355 t2.Start();
356 t3.Start();
357 t4.Start();
358 t5.Start();
359
360 unsigned last_count = 0;
361 do {
362 unsigned count = CountIterables();
363 EXPECT_LE(last_count, count);
364 } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
365
366 t1.Join();
367 t2.Join();
368 t3.Join();
369 t4.Join();
370 t5.Join();
371
372 EXPECT_FALSE(allocator_->IsCorrupt());
373 EXPECT_TRUE(allocator_->IsFull());
374 EXPECT_EQ(CountIterables(),
375 t1.iterable() + t2.iterable() + t3.iterable() + t4.iterable() +
376 t5.iterable());
377 }
378
379 // A simple thread that counts objects by iterating through an allocator.
380 class CounterThread : public SimpleThread {
381 public:
CounterThread(const std::string & name,PersistentMemoryAllocator::Iterator * iterator,Lock * lock,ConditionVariable * condition,bool * wake_up)382 CounterThread(const std::string& name,
383 PersistentMemoryAllocator::Iterator* iterator,
384 Lock* lock,
385 ConditionVariable* condition,
386 bool* wake_up)
387 : SimpleThread(name, Options()),
388 iterator_(iterator),
389 lock_(lock),
390 condition_(condition),
391 count_(0),
392 wake_up_(wake_up) {}
393
394 CounterThread(const CounterThread&) = delete;
395 CounterThread& operator=(const CounterThread&) = delete;
396
Run()397 void Run() override {
398 // Wait so all threads can start at approximately the same time.
399 // Best performance comes from releasing a single worker which then
400 // releases the next, etc., etc.
401 {
402 AutoLock autolock(*lock_);
403
404 // Before calling Wait(), make sure that the wake up condition
405 // has not already passed. Also, since spurious signal events
406 // are possible, check the condition in a while loop to make
407 // sure that the wake up condition is met when this thread
408 // returns from the Wait().
409 // See usage comments in src/base/synchronization/condition_variable.h.
410 while (!*wake_up_) {
411 condition_->Wait();
412 condition_->Signal();
413 }
414 }
415
416 uint32_t type;
417 while (iterator_->GetNext(&type) != 0) {
418 ++count_;
419 }
420 }
421
count()422 unsigned count() { return count_; }
423
424 private:
425 raw_ptr<PersistentMemoryAllocator::Iterator> iterator_;
426 raw_ptr<Lock> lock_;
427 raw_ptr<ConditionVariable> condition_;
428 unsigned count_;
429 raw_ptr<bool> wake_up_;
430 };
431
432 // Ensure that parallel iteration returns the same number of objects as
433 // single-threaded iteration.
TEST_F(PersistentMemoryAllocatorTest,IteratorParallelismTest)434 TEST_F(PersistentMemoryAllocatorTest, IteratorParallelismTest) {
435 // Fill the memory segment with random allocations.
436 unsigned iterable_count = 0;
437 for (;;) {
438 uint32_t size = RandInt(1, 99);
439 uint32_t type = RandInt(100, 999);
440 Reference block = allocator_->Allocate(size, type);
441 if (!block)
442 break;
443 allocator_->MakeIterable(block);
444 ++iterable_count;
445 }
446 EXPECT_FALSE(allocator_->IsCorrupt());
447 EXPECT_TRUE(allocator_->IsFull());
448 EXPECT_EQ(iterable_count, CountIterables());
449
450 PersistentMemoryAllocator::Iterator iter(allocator_.get());
451 Lock lock;
452 ConditionVariable condition(&lock);
453 bool wake_up = false;
454
455 CounterThread t1("t1", &iter, &lock, &condition, &wake_up);
456 CounterThread t2("t2", &iter, &lock, &condition, &wake_up);
457 CounterThread t3("t3", &iter, &lock, &condition, &wake_up);
458 CounterThread t4("t4", &iter, &lock, &condition, &wake_up);
459 CounterThread t5("t5", &iter, &lock, &condition, &wake_up);
460
461 t1.Start();
462 t2.Start();
463 t3.Start();
464 t4.Start();
465 t5.Start();
466
467 // Take the lock and set the wake up condition to true. This helps to
468 // avoid a race condition where the Signal() event is called before
469 // all the threads have reached the Wait() and thus never get woken up.
470 {
471 AutoLock autolock(lock);
472 wake_up = true;
473 }
474
475 // This will release all the waiting threads.
476 condition.Signal();
477
478 t1.Join();
479 t2.Join();
480 t3.Join();
481 t4.Join();
482 t5.Join();
483
484 EXPECT_EQ(iterable_count,
485 t1.count() + t2.count() + t3.count() + t4.count() + t5.count());
486
487 #if 0
488 // These ensure that the threads don't run sequentially. It shouldn't be
489 // enabled in general because it could lead to a flaky test if it happens
490 // simply by chance but it is useful during development to ensure that the
491 // test is working correctly.
492 EXPECT_NE(iterable_count, t1.count());
493 EXPECT_NE(iterable_count, t2.count());
494 EXPECT_NE(iterable_count, t3.count());
495 EXPECT_NE(iterable_count, t4.count());
496 EXPECT_NE(iterable_count, t5.count());
497 #endif
498 }
499
TEST_F(PersistentMemoryAllocatorTest,DelayedAllocationTest)500 TEST_F(PersistentMemoryAllocatorTest, DelayedAllocationTest) {
501 std::atomic<Reference> ref1, ref2;
502 ref1.store(0, std::memory_order_relaxed);
503 ref2.store(0, std::memory_order_relaxed);
504 DelayedPersistentAllocation da1(allocator_.get(), &ref1, 1001, 100);
505 DelayedPersistentAllocation da2a(allocator_.get(), &ref2, 2002, 200, 0);
506 DelayedPersistentAllocation da2b(allocator_.get(), &ref2, 2002, 200, 5);
507
508 // Nothing should yet have been allocated.
509 uint32_t type;
510 PersistentMemoryAllocator::Iterator iter(allocator_.get());
511 EXPECT_EQ(0U, iter.GetNext(&type));
512
513 // Do first delayed allocation and check that a new persistent object exists.
514 EXPECT_EQ(0U, da1.reference());
515 void* mem1 = da1.Get();
516 ASSERT_TRUE(mem1);
517 EXPECT_NE(0U, da1.reference());
518 EXPECT_EQ(allocator_->GetAsReference(mem1, 1001),
519 ref1.load(std::memory_order_relaxed));
520 allocator_->MakeIterable(da1.reference());
521 EXPECT_NE(0U, iter.GetNext(&type));
522 EXPECT_EQ(1001U, type);
523 EXPECT_EQ(0U, iter.GetNext(&type));
524
525 // Do second delayed allocation and check.
526 void* mem2a = da2a.Get();
527 ASSERT_TRUE(mem2a);
528 EXPECT_EQ(allocator_->GetAsReference(mem2a, 2002),
529 ref2.load(std::memory_order_relaxed));
530 allocator_->MakeIterable(da2a.reference());
531 EXPECT_NE(0U, iter.GetNext(&type));
532 EXPECT_EQ(2002U, type);
533 EXPECT_EQ(0U, iter.GetNext(&type));
534
535 // Third allocation should just return offset into second allocation.
536 void* mem2b = da2b.Get();
537 ASSERT_TRUE(mem2b);
538 allocator_->MakeIterable(da2b.reference());
539 EXPECT_EQ(0U, iter.GetNext(&type));
540 EXPECT_EQ(reinterpret_cast<uintptr_t>(mem2a) + 5,
541 reinterpret_cast<uintptr_t>(mem2b));
542 }
543
544 // This test doesn't verify anything other than it doesn't crash. Its goal
545 // is to find coding errors that aren't otherwise tested for, much like a
546 // "fuzzer" would.
547 // This test is suppsoed to fail on TSAN bot (crbug.com/579867).
548 #if defined(THREAD_SANITIZER)
549 #define MAYBE_CorruptionTest DISABLED_CorruptionTest
550 #else
551 #define MAYBE_CorruptionTest CorruptionTest
552 #endif
TEST_F(PersistentMemoryAllocatorTest,MAYBE_CorruptionTest)553 TEST_F(PersistentMemoryAllocatorTest, MAYBE_CorruptionTest) {
554 char* memory = mem_segment_.get();
555 AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
556 AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
557 AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
558 AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
559 AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
560
561 t1.Start();
562 t2.Start();
563 t3.Start();
564 t4.Start();
565 t5.Start();
566
567 do {
568 size_t offset = RandInt(0, TEST_MEMORY_SIZE - 1);
569 char value = RandInt(0, 255);
570 memory[offset] = value;
571 } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
572
573 t1.Join();
574 t2.Join();
575 t3.Join();
576 t4.Join();
577 t5.Join();
578
579 CountIterables();
580 }
581
582 // Attempt to cause crashes or loops by expressly creating dangerous conditions.
TEST_F(PersistentMemoryAllocatorTest,MaliciousTest)583 TEST_F(PersistentMemoryAllocatorTest, MaliciousTest) {
584 Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1);
585 Reference block2 = allocator_->Allocate(sizeof(TestObject1), 2);
586 Reference block3 = allocator_->Allocate(sizeof(TestObject1), 3);
587 Reference block4 = allocator_->Allocate(sizeof(TestObject1), 3);
588 Reference block5 = allocator_->Allocate(sizeof(TestObject1), 3);
589 allocator_->MakeIterable(block1);
590 allocator_->MakeIterable(block2);
591 allocator_->MakeIterable(block3);
592 allocator_->MakeIterable(block4);
593 allocator_->MakeIterable(block5);
594 EXPECT_EQ(5U, CountIterables());
595 EXPECT_FALSE(allocator_->IsCorrupt());
596
597 // Create loop in iterable list and ensure it doesn't hang. The return value
598 // from CountIterables() in these cases is unpredictable. If there is a
599 // failure, the call will hang and the test killed for taking too long.
600 uint32_t* header4 = (uint32_t*)(mem_segment_.get() + block4);
601 EXPECT_EQ(block5, header4[3]);
602 header4[3] = block4;
603 CountIterables(); // loop: 1-2-3-4-4
604 EXPECT_TRUE(allocator_->IsCorrupt());
605
606 // Test where loop goes back to previous block.
607 header4[3] = block3;
608 CountIterables(); // loop: 1-2-3-4-3
609
610 // Test where loop goes back to the beginning.
611 header4[3] = block1;
612 CountIterables(); // loop: 1-2-3-4-1
613 }
614
615
616 //----- LocalPersistentMemoryAllocator -----------------------------------------
617
TEST(LocalPersistentMemoryAllocatorTest,CreationTest)618 TEST(LocalPersistentMemoryAllocatorTest, CreationTest) {
619 LocalPersistentMemoryAllocator allocator(TEST_MEMORY_SIZE, 42, "");
620 EXPECT_EQ(42U, allocator.Id());
621 EXPECT_NE(0U, allocator.Allocate(24, 1));
622 EXPECT_FALSE(allocator.IsFull());
623 EXPECT_FALSE(allocator.IsCorrupt());
624 }
625
626 //----- {Writable,ReadOnly}SharedPersistentMemoryAllocator ---------------------
627
TEST(SharedPersistentMemoryAllocatorTest,CreationTest)628 TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
629 base::WritableSharedMemoryRegion rw_region =
630 base::WritableSharedMemoryRegion::Create(TEST_MEMORY_SIZE);
631 ASSERT_TRUE(rw_region.IsValid());
632
633 PersistentMemoryAllocator::MemoryInfo meminfo1;
634 Reference r123, r456, r789;
635 {
636 base::WritableSharedMemoryMapping mapping = rw_region.Map();
637 ASSERT_TRUE(mapping.IsValid());
638 WritableSharedPersistentMemoryAllocator local(std::move(mapping), TEST_ID,
639 "");
640 EXPECT_FALSE(local.IsReadonly());
641 r123 = local.Allocate(123, 123);
642 r456 = local.Allocate(456, 456);
643 r789 = local.Allocate(789, 789);
644 local.MakeIterable(r123);
645 local.ChangeType(r456, 654, 456, false);
646 local.MakeIterable(r789);
647 local.GetMemoryInfo(&meminfo1);
648 EXPECT_FALSE(local.IsFull());
649 EXPECT_FALSE(local.IsCorrupt());
650 }
651
652 // Create writable and read-only mappings of the same region.
653 base::WritableSharedMemoryMapping rw_mapping = rw_region.Map();
654 ASSERT_TRUE(rw_mapping.IsValid());
655 base::ReadOnlySharedMemoryRegion ro_region =
656 base::WritableSharedMemoryRegion::ConvertToReadOnly(std::move(rw_region));
657 ASSERT_TRUE(ro_region.IsValid());
658 base::ReadOnlySharedMemoryMapping ro_mapping = ro_region.Map();
659 ASSERT_TRUE(ro_mapping.IsValid());
660
661 // Read-only test.
662 ReadOnlySharedPersistentMemoryAllocator shalloc2(std::move(ro_mapping), 0,
663 "");
664 EXPECT_TRUE(shalloc2.IsReadonly());
665 EXPECT_EQ(TEST_ID, shalloc2.Id());
666 EXPECT_FALSE(shalloc2.IsFull());
667 EXPECT_FALSE(shalloc2.IsCorrupt());
668
669 PersistentMemoryAllocator::Iterator iter2(&shalloc2);
670 uint32_t type;
671 EXPECT_EQ(r123, iter2.GetNext(&type));
672 EXPECT_EQ(r789, iter2.GetNext(&type));
673 EXPECT_EQ(0U, iter2.GetNext(&type));
674
675 EXPECT_EQ(123U, shalloc2.GetType(r123));
676 EXPECT_EQ(654U, shalloc2.GetType(r456));
677 EXPECT_EQ(789U, shalloc2.GetType(r789));
678
679 PersistentMemoryAllocator::MemoryInfo meminfo2;
680 shalloc2.GetMemoryInfo(&meminfo2);
681 EXPECT_EQ(meminfo1.total, meminfo2.total);
682 EXPECT_EQ(meminfo1.free, meminfo2.free);
683
684 // Read/write test.
685 WritableSharedPersistentMemoryAllocator shalloc3(std::move(rw_mapping), 0,
686 "");
687 EXPECT_FALSE(shalloc3.IsReadonly());
688 EXPECT_EQ(TEST_ID, shalloc3.Id());
689 EXPECT_FALSE(shalloc3.IsFull());
690 EXPECT_FALSE(shalloc3.IsCorrupt());
691
692 PersistentMemoryAllocator::Iterator iter3(&shalloc3);
693 EXPECT_EQ(r123, iter3.GetNext(&type));
694 EXPECT_EQ(r789, iter3.GetNext(&type));
695 EXPECT_EQ(0U, iter3.GetNext(&type));
696
697 EXPECT_EQ(123U, shalloc3.GetType(r123));
698 EXPECT_EQ(654U, shalloc3.GetType(r456));
699 EXPECT_EQ(789U, shalloc3.GetType(r789));
700
701 PersistentMemoryAllocator::MemoryInfo meminfo3;
702 shalloc3.GetMemoryInfo(&meminfo3);
703 EXPECT_EQ(meminfo1.total, meminfo3.total);
704 EXPECT_EQ(meminfo1.free, meminfo3.free);
705
706 // Interconnectivity test.
707 Reference obj = shalloc3.Allocate(42, 42);
708 ASSERT_TRUE(obj);
709 shalloc3.MakeIterable(obj);
710 EXPECT_EQ(obj, iter2.GetNext(&type));
711 EXPECT_EQ(42U, type);
712
713 // Clear-on-change test.
714 Reference data_ref = shalloc3.Allocate(sizeof(int) * 4, 911);
715 int* data = shalloc3.GetAsArray<int>(data_ref, 911, 4);
716 ASSERT_TRUE(data);
717 data[0] = 0;
718 data[1] = 1;
719 data[2] = 2;
720 data[3] = 3;
721 ASSERT_TRUE(shalloc3.ChangeType(data_ref, 119, 911, false));
722 EXPECT_EQ(0, data[0]);
723 EXPECT_EQ(1, data[1]);
724 EXPECT_EQ(2, data[2]);
725 EXPECT_EQ(3, data[3]);
726 ASSERT_TRUE(shalloc3.ChangeType(data_ref, 191, 119, true));
727 EXPECT_EQ(0, data[0]);
728 EXPECT_EQ(0, data[1]);
729 EXPECT_EQ(0, data[2]);
730 EXPECT_EQ(0, data[3]);
731 }
732
733 #if !BUILDFLAG(IS_NACL)
734 //----- FilePersistentMemoryAllocator ------------------------------------------
735
TEST(FilePersistentMemoryAllocatorTest,CreationTest)736 TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
737 ScopedTempDir temp_dir;
738 ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
739 FilePath file_path = temp_dir.GetPath().AppendASCII("persistent_memory");
740
741 PersistentMemoryAllocator::MemoryInfo meminfo1;
742 Reference r123, r456, r789;
743 {
744 LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
745 EXPECT_FALSE(local.IsReadonly());
746 r123 = local.Allocate(123, 123);
747 r456 = local.Allocate(456, 456);
748 r789 = local.Allocate(789, 789);
749 local.MakeIterable(r123);
750 local.ChangeType(r456, 654, 456, false);
751 local.MakeIterable(r789);
752 local.GetMemoryInfo(&meminfo1);
753 EXPECT_FALSE(local.IsFull());
754 EXPECT_FALSE(local.IsCorrupt());
755
756 File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
757 ASSERT_TRUE(writer.IsValid());
758 writer.Write(0, (const char*)local.data(), local.used());
759 }
760
761 std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
762 ASSERT_TRUE(mmfile->Initialize(file_path));
763 EXPECT_TRUE(mmfile->IsValid());
764 const size_t mmlength = mmfile->length();
765 EXPECT_GE(meminfo1.total, mmlength);
766
767 FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "",
768 FilePersistentMemoryAllocator::kReadWrite);
769 EXPECT_FALSE(file.IsReadonly());
770 EXPECT_EQ(TEST_ID, file.Id());
771 EXPECT_FALSE(file.IsFull());
772 EXPECT_FALSE(file.IsCorrupt());
773
774 PersistentMemoryAllocator::Iterator iter(&file);
775 uint32_t type;
776 EXPECT_EQ(r123, iter.GetNext(&type));
777 EXPECT_EQ(r789, iter.GetNext(&type));
778 EXPECT_EQ(0U, iter.GetNext(&type));
779
780 EXPECT_EQ(123U, file.GetType(r123));
781 EXPECT_EQ(654U, file.GetType(r456));
782 EXPECT_EQ(789U, file.GetType(r789));
783
784 PersistentMemoryAllocator::MemoryInfo meminfo2;
785 file.GetMemoryInfo(&meminfo2);
786 EXPECT_GE(meminfo1.total, meminfo2.total);
787 EXPECT_GE(meminfo1.free, meminfo2.free);
788 EXPECT_EQ(mmlength, meminfo2.total);
789 EXPECT_EQ(0U, meminfo2.free);
790
791 // There's no way of knowing if Flush actually does anything but at least
792 // verify that it runs without CHECK violations.
793 file.Flush(false);
794 file.Flush(true);
795 }
796
TEST(FilePersistentMemoryAllocatorTest,ExtendTest)797 TEST(FilePersistentMemoryAllocatorTest, ExtendTest) {
798 ScopedTempDir temp_dir;
799 ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
800 FilePath file_path = temp_dir.GetPath().AppendASCII("extend_test");
801 MemoryMappedFile::Region region = {0, 16 << 10}; // 16KiB maximum size.
802
803 // Start with a small but valid file of persistent data.
804 ASSERT_FALSE(PathExists(file_path));
805 {
806 LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
807 local.Allocate(1, 1);
808 local.Allocate(11, 11);
809
810 File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
811 ASSERT_TRUE(writer.IsValid());
812 writer.Write(0, (const char*)local.data(), local.used());
813 }
814 ASSERT_TRUE(PathExists(file_path));
815 int64_t before_size;
816 ASSERT_TRUE(GetFileSize(file_path, &before_size));
817
818 // Map it as an extendable read/write file and append to it.
819 {
820 std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
821 ASSERT_TRUE(mmfile->Initialize(
822 File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
823 region, MemoryMappedFile::READ_WRITE_EXTEND));
824 FilePersistentMemoryAllocator allocator(
825 std::move(mmfile), region.size, 0, "",
826 FilePersistentMemoryAllocator::kReadWrite);
827 EXPECT_EQ(static_cast<size_t>(before_size), allocator.used());
828
829 allocator.Allocate(111, 111);
830 EXPECT_LT(static_cast<size_t>(before_size), allocator.used());
831 }
832
833 // Validate that append worked.
834 int64_t after_size;
835 ASSERT_TRUE(GetFileSize(file_path, &after_size));
836 EXPECT_LT(before_size, after_size);
837
838 // Verify that it's still an acceptable file.
839 {
840 std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
841 ASSERT_TRUE(mmfile->Initialize(
842 File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
843 region, MemoryMappedFile::READ_WRITE_EXTEND));
844 EXPECT_TRUE(FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true));
845 EXPECT_TRUE(
846 FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, false));
847 }
848 }
849
TEST(FilePersistentMemoryAllocatorTest,AcceptableTest)850 TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
851 const uint32_t kAllocAlignment =
852 PersistentMemoryAllocatorTest::GetAllocAlignment();
853 ScopedTempDir temp_dir;
854 ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
855
856 LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
857 local.MakeIterable(local.Allocate(1, 1));
858 local.MakeIterable(local.Allocate(11, 11));
859 const size_t minsize = local.used();
860 std::unique_ptr<char[]> garbage(new char[minsize]);
861 RandBytes(garbage.get(), minsize);
862
863 std::unique_ptr<MemoryMappedFile> mmfile;
864 char filename[100];
865 for (size_t filesize = minsize; filesize > 0; --filesize) {
866 strings::SafeSPrintf(filename, "memory_%d_A", filesize);
867 FilePath file_path = temp_dir.GetPath().AppendASCII(filename);
868 ASSERT_FALSE(PathExists(file_path));
869 {
870 File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
871 ASSERT_TRUE(writer.IsValid());
872 writer.Write(0, (const char*)local.data(), filesize);
873 }
874 ASSERT_TRUE(PathExists(file_path));
875
876 // Request read/write access for some sizes that are a multple of the
877 // allocator's alignment size. The allocator is strict about file size
878 // being a multiple of its internal alignment when doing read/write access.
879 const bool read_only = (filesize % (2 * kAllocAlignment)) != 0;
880 const uint32_t file_flags =
881 File::FLAG_OPEN | File::FLAG_READ | (read_only ? 0 : File::FLAG_WRITE);
882 const MemoryMappedFile::Access map_access =
883 read_only ? MemoryMappedFile::READ_ONLY : MemoryMappedFile::READ_WRITE;
884
885 mmfile = std::make_unique<MemoryMappedFile>();
886 ASSERT_TRUE(mmfile->Initialize(File(file_path, file_flags), map_access));
887 EXPECT_EQ(filesize, mmfile->length());
888 if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
889 // Make sure construction doesn't crash. It will, however, cause
890 // error messages warning about about a corrupted memory segment.
891 FilePersistentMemoryAllocator allocator(
892 std::move(mmfile), 0, 0, "",
893 read_only ? FilePersistentMemoryAllocator::kReadOnly
894 : FilePersistentMemoryAllocator::kReadWrite);
895 // Also make sure that iteration doesn't crash.
896 PersistentMemoryAllocator::Iterator iter(&allocator);
897 uint32_t type_id;
898 Reference ref;
899 while ((ref = iter.GetNext(&type_id)) != 0) {
900 const char* data = allocator.GetAsArray<char>(
901 ref, 0, PersistentMemoryAllocator::kSizeAny);
902 uint32_t type = allocator.GetType(ref);
903 size_t size = allocator.GetAllocSize(ref);
904 // Ensure compiler can't optimize-out above variables.
905 (void)data;
906 (void)type;
907 (void)size;
908 }
909
910 // Ensure that short files are detected as corrupt and full files are not.
911 EXPECT_EQ(filesize != minsize, allocator.IsCorrupt());
912 } else {
913 // For filesize >= minsize, the file must be acceptable. This
914 // else clause (file-not-acceptable) should be reached only if
915 // filesize < minsize.
916 EXPECT_LT(filesize, minsize);
917 }
918
919 strings::SafeSPrintf(filename, "memory_%d_B", filesize);
920 file_path = temp_dir.GetPath().AppendASCII(filename);
921 ASSERT_FALSE(PathExists(file_path));
922 {
923 File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
924 ASSERT_TRUE(writer.IsValid());
925 writer.Write(0, (const char*)garbage.get(), filesize);
926 }
927 ASSERT_TRUE(PathExists(file_path));
928
929 mmfile = std::make_unique<MemoryMappedFile>();
930 ASSERT_TRUE(mmfile->Initialize(File(file_path, file_flags), map_access));
931 EXPECT_EQ(filesize, mmfile->length());
932 if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
933 // Make sure construction doesn't crash. It will, however, cause
934 // error messages warning about about a corrupted memory segment.
935 FilePersistentMemoryAllocator allocator(
936 std::move(mmfile), 0, 0, "",
937 read_only ? FilePersistentMemoryAllocator::kReadOnly
938 : FilePersistentMemoryAllocator::kReadWrite);
939 EXPECT_TRUE(allocator.IsCorrupt()); // Garbage data so it should be.
940 } else {
941 // For filesize >= minsize, the file must be acceptable. This
942 // else clause (file-not-acceptable) should be reached only if
943 // filesize < minsize.
944 EXPECT_GT(minsize, filesize);
945 }
946 }
947 }
948
TEST_F(PersistentMemoryAllocatorTest,TruncateTest)949 TEST_F(PersistentMemoryAllocatorTest, TruncateTest) {
950 ScopedTempDir temp_dir;
951 ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
952 FilePath file_path = temp_dir.GetPath().AppendASCII("truncate_test");
953
954 // Start with a small but valid file of persistent data. Keep the "used"
955 // amount for both allocations.
956 Reference a1_ref;
957 Reference a2_ref;
958 size_t a1_used;
959 size_t a2_used;
960 ASSERT_FALSE(PathExists(file_path));
961 {
962 LocalPersistentMemoryAllocator allocator(TEST_MEMORY_SIZE, TEST_ID, "");
963 a1_ref = allocator.Allocate(100 << 10, 1);
964 allocator.MakeIterable(a1_ref);
965 a1_used = allocator.used();
966 a2_ref = allocator.Allocate(200 << 10, 11);
967 allocator.MakeIterable(a2_ref);
968 a2_used = allocator.used();
969
970 File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
971 ASSERT_TRUE(writer.IsValid());
972 writer.Write(0, static_cast<const char*>(allocator.data()),
973 allocator.size());
974 }
975 ASSERT_TRUE(PathExists(file_path));
976 EXPECT_LE(a1_used, a2_ref);
977
978 // Truncate the file to include everything and make sure it can be read, both
979 // with read-write and read-only access.
980 for (size_t file_length : {a2_used, a1_used, a1_used / 2}) {
981 SCOPED_TRACE(StringPrintf("file_length=%zu", file_length));
982 SetFileLength(file_path, file_length);
983
984 for (bool read_only : {false, true}) {
985 SCOPED_TRACE(StringPrintf("read_only=%s", read_only ? "true" : "false"));
986
987 std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
988 ASSERT_TRUE(mmfile->Initialize(
989 File(file_path, File::FLAG_OPEN |
990 (read_only ? File::FLAG_READ
991 : File::FLAG_READ | File::FLAG_WRITE)),
992 read_only ? MemoryMappedFile::READ_ONLY
993 : MemoryMappedFile::READ_WRITE));
994 ASSERT_TRUE(
995 FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only));
996
997 FilePersistentMemoryAllocator allocator(
998 std::move(mmfile), 0, 0, "",
999 read_only ? FilePersistentMemoryAllocator::kReadOnly
1000 : FilePersistentMemoryAllocator::kReadWrite);
1001
1002 PersistentMemoryAllocator::Iterator iter(&allocator);
1003 uint32_t type_id;
1004 EXPECT_EQ(file_length >= a1_used ? a1_ref : 0U, iter.GetNext(&type_id));
1005 EXPECT_EQ(file_length >= a2_used ? a2_ref : 0U, iter.GetNext(&type_id));
1006 EXPECT_EQ(0U, iter.GetNext(&type_id));
1007
1008 // Ensure that short files are detected as corrupt and full files are not.
1009 EXPECT_EQ(file_length < a2_used, allocator.IsCorrupt());
1010 }
1011
1012 // Ensure that file length was not adjusted.
1013 int64_t actual_length;
1014 ASSERT_TRUE(GetFileSize(file_path, &actual_length));
1015 EXPECT_EQ(file_length, static_cast<size_t>(actual_length));
1016 }
1017 }
1018
1019 #endif // !BUILDFLAG(IS_NACL)
1020
1021 } // namespace base
1022