1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/metrics/persistent_memory_allocator.h"
6
7 #include <memory>
8
9 #include "base/files/file.h"
10 #include "base/files/file_util.h"
11 #include "base/files/memory_mapped_file.h"
12 #include "base/files/scoped_temp_dir.h"
13 #include "base/memory/shared_memory.h"
14 #include "base/metrics/histogram.h"
15 #include "base/rand_util.h"
16 #include "base/strings/safe_sprintf.h"
17 #include "base/synchronization/condition_variable.h"
18 #include "base/synchronization/lock.h"
19 #include "base/threading/simple_thread.h"
20 #include "testing/gmock/include/gmock/gmock.h"
21
22 namespace {
23
24 const uint32_t TEST_MEMORY_SIZE = 1 << 20; // 1 MiB
25 const uint32_t TEST_MEMORY_PAGE = 64 << 10; // 64 KiB
26 const uint32_t TEST_ID = 12345;
27 const char TEST_NAME[] = "TestAllocator";
28
29 } // namespace
30
31 namespace base {
32
33 typedef PersistentMemoryAllocator::Reference Reference;
34
35 class PersistentMemoryAllocatorTest : public testing::Test {
36 public:
37 // This can't be statically initialized because it's value isn't defined
38 // in the PersistentMemoryAllocator header file. Instead, it's simply set
39 // in the constructor.
40 uint32_t kAllocAlignment;
41
42 struct TestObject1 {
43 static constexpr uint32_t kPersistentTypeId = 1;
44 static constexpr size_t kExpectedInstanceSize = 4 + 1 + 3;
45 int32_t onething;
46 char oranother;
47 };
48
49 struct TestObject2 {
50 static constexpr uint32_t kPersistentTypeId = 2;
51 static constexpr size_t kExpectedInstanceSize = 8 + 4 + 4 + 8 + 8;
52 int64_t thiis;
53 int32_t that;
54 float andthe;
55 double other;
56 char thing[8];
57 };
58
PersistentMemoryAllocatorTest()59 PersistentMemoryAllocatorTest() {
60 kAllocAlignment = GetAllocAlignment();
61 mem_segment_.reset(new char[TEST_MEMORY_SIZE]);
62 }
63
SetUp()64 void SetUp() override {
65 allocator_.reset();
66 ::memset(mem_segment_.get(), 0, TEST_MEMORY_SIZE);
67 allocator_.reset(new PersistentMemoryAllocator(
68 mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE,
69 TEST_ID, TEST_NAME, false));
70 }
71
TearDown()72 void TearDown() override {
73 allocator_.reset();
74 }
75
CountIterables()76 unsigned CountIterables() {
77 PersistentMemoryAllocator::Iterator iter(allocator_.get());
78 uint32_t type;
79 unsigned count = 0;
80 while (iter.GetNext(&type) != 0) {
81 ++count;
82 }
83 return count;
84 }
85
GetAllocAlignment()86 static uint32_t GetAllocAlignment() {
87 return PersistentMemoryAllocator::kAllocAlignment;
88 }
89
90 protected:
91 std::unique_ptr<char[]> mem_segment_;
92 std::unique_ptr<PersistentMemoryAllocator> allocator_;
93 };
94
TEST_F(PersistentMemoryAllocatorTest,AllocateAndIterate)95 TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
96 allocator_->CreateTrackingHistograms(allocator_->Name());
97
98 std::string base_name(TEST_NAME);
99 EXPECT_EQ(TEST_ID, allocator_->Id());
100 EXPECT_TRUE(allocator_->used_histogram_);
101 EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".UsedPct",
102 allocator_->used_histogram_->histogram_name());
103 EXPECT_EQ(PersistentMemoryAllocator::MEMORY_INITIALIZED,
104 allocator_->GetMemoryState());
105
106 // Get base memory info for later comparison.
107 PersistentMemoryAllocator::MemoryInfo meminfo0;
108 allocator_->GetMemoryInfo(&meminfo0);
109 EXPECT_EQ(TEST_MEMORY_SIZE, meminfo0.total);
110 EXPECT_GT(meminfo0.total, meminfo0.free);
111
112 // Validate allocation of test object and make sure it can be referenced
113 // and all metadata looks correct.
114 TestObject1* obj1 = allocator_->New<TestObject1>();
115 ASSERT_TRUE(obj1);
116 Reference block1 = allocator_->GetAsReference(obj1);
117 ASSERT_NE(0U, block1);
118 EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1));
119 EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1));
120 EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1));
121 EXPECT_GT(sizeof(TestObject1) + kAllocAlignment,
122 allocator_->GetAllocSize(block1));
123 PersistentMemoryAllocator::MemoryInfo meminfo1;
124 allocator_->GetMemoryInfo(&meminfo1);
125 EXPECT_EQ(meminfo0.total, meminfo1.total);
126 EXPECT_GT(meminfo0.free, meminfo1.free);
127
128 // Verify that pointers can be turned back into references and that invalid
129 // addresses return null.
130 char* memory1 = allocator_->GetAsArray<char>(block1, 1, 1);
131 ASSERT_TRUE(memory1);
132 EXPECT_EQ(block1, allocator_->GetAsReference(memory1, 0));
133 EXPECT_EQ(block1, allocator_->GetAsReference(memory1, 1));
134 EXPECT_EQ(0U, allocator_->GetAsReference(memory1, 2));
135 EXPECT_EQ(0U, allocator_->GetAsReference(memory1 + 1, 0));
136 EXPECT_EQ(0U, allocator_->GetAsReference(memory1 + 16, 0));
137 EXPECT_EQ(0U, allocator_->GetAsReference(nullptr, 0));
138 EXPECT_EQ(0U, allocator_->GetAsReference(&base_name, 0));
139
140 // Ensure that the test-object can be made iterable.
141 PersistentMemoryAllocator::Iterator iter1a(allocator_.get());
142 EXPECT_EQ(0U, iter1a.GetLast());
143 uint32_t type;
144 EXPECT_EQ(0U, iter1a.GetNext(&type));
145 allocator_->MakeIterable(block1);
146 EXPECT_EQ(block1, iter1a.GetNext(&type));
147 EXPECT_EQ(1U, type);
148 EXPECT_EQ(block1, iter1a.GetLast());
149 EXPECT_EQ(0U, iter1a.GetNext(&type));
150 EXPECT_EQ(block1, iter1a.GetLast());
151
152 // Create second test-object and ensure everything is good and it cannot
153 // be confused with test-object of another type.
154 TestObject2* obj2 = allocator_->New<TestObject2>();
155 ASSERT_TRUE(obj2);
156 Reference block2 = allocator_->GetAsReference(obj2);
157 ASSERT_NE(0U, block2);
158 EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2));
159 EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject1>(block2));
160 EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2));
161 EXPECT_GT(sizeof(TestObject2) + kAllocAlignment,
162 allocator_->GetAllocSize(block2));
163 PersistentMemoryAllocator::MemoryInfo meminfo2;
164 allocator_->GetMemoryInfo(&meminfo2);
165 EXPECT_EQ(meminfo1.total, meminfo2.total);
166 EXPECT_GT(meminfo1.free, meminfo2.free);
167
168 // Ensure that second test-object can also be made iterable.
169 allocator_->MakeIterable(obj2);
170 EXPECT_EQ(block2, iter1a.GetNext(&type));
171 EXPECT_EQ(2U, type);
172 EXPECT_EQ(block2, iter1a.GetLast());
173 EXPECT_EQ(0U, iter1a.GetNext(&type));
174 EXPECT_EQ(block2, iter1a.GetLast());
175
176 // Check that the iterator can be reset to the beginning.
177 iter1a.Reset();
178 EXPECT_EQ(0U, iter1a.GetLast());
179 EXPECT_EQ(block1, iter1a.GetNext(&type));
180 EXPECT_EQ(block1, iter1a.GetLast());
181 EXPECT_EQ(block2, iter1a.GetNext(&type));
182 EXPECT_EQ(block2, iter1a.GetLast());
183 EXPECT_EQ(0U, iter1a.GetNext(&type));
184
185 // Check that the iterator can be reset to an arbitrary location.
186 iter1a.Reset(block1);
187 EXPECT_EQ(block1, iter1a.GetLast());
188 EXPECT_EQ(block2, iter1a.GetNext(&type));
189 EXPECT_EQ(block2, iter1a.GetLast());
190 EXPECT_EQ(0U, iter1a.GetNext(&type));
191
192 // Check that iteration can begin after an arbitrary location.
193 PersistentMemoryAllocator::Iterator iter1b(allocator_.get(), block1);
194 EXPECT_EQ(block2, iter1b.GetNext(&type));
195 EXPECT_EQ(0U, iter1b.GetNext(&type));
196
197 // Ensure nothing has gone noticably wrong.
198 EXPECT_FALSE(allocator_->IsFull());
199 EXPECT_FALSE(allocator_->IsCorrupt());
200
201 // Check the internal histogram record of used memory.
202 allocator_->UpdateTrackingHistograms();
203 std::unique_ptr<HistogramSamples> used_samples(
204 allocator_->used_histogram_->SnapshotSamples());
205 EXPECT_TRUE(used_samples);
206 EXPECT_EQ(1, used_samples->TotalCount());
207
208 // Check that an object's type can be changed.
209 EXPECT_EQ(2U, allocator_->GetType(block2));
210 allocator_->ChangeType(block2, 3, 2, false);
211 EXPECT_EQ(3U, allocator_->GetType(block2));
212 allocator_->New<TestObject2>(block2, 3, false);
213 EXPECT_EQ(2U, allocator_->GetType(block2));
214
215 // Create second allocator (read/write) using the same memory segment.
216 std::unique_ptr<PersistentMemoryAllocator> allocator2(
217 new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
218 TEST_MEMORY_PAGE, 0, "", false));
219 EXPECT_EQ(TEST_ID, allocator2->Id());
220 EXPECT_FALSE(allocator2->used_histogram_);
221
222 // Ensure that iteration and access through second allocator works.
223 PersistentMemoryAllocator::Iterator iter2(allocator2.get());
224 EXPECT_EQ(block1, iter2.GetNext(&type));
225 EXPECT_EQ(block2, iter2.GetNext(&type));
226 EXPECT_EQ(0U, iter2.GetNext(&type));
227 EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1));
228 EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2));
229
230 // Create a third allocator (read-only) using the same memory segment.
231 std::unique_ptr<const PersistentMemoryAllocator> allocator3(
232 new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
233 TEST_MEMORY_PAGE, 0, "", true));
234 EXPECT_EQ(TEST_ID, allocator3->Id());
235 EXPECT_FALSE(allocator3->used_histogram_);
236
237 // Ensure that iteration and access through third allocator works.
238 PersistentMemoryAllocator::Iterator iter3(allocator3.get());
239 EXPECT_EQ(block1, iter3.GetNext(&type));
240 EXPECT_EQ(block2, iter3.GetNext(&type));
241 EXPECT_EQ(0U, iter3.GetNext(&type));
242 EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1));
243 EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2));
244
245 // Ensure that GetNextOfType works.
246 PersistentMemoryAllocator::Iterator iter1c(allocator_.get());
247 EXPECT_EQ(block2, iter1c.GetNextOfType<TestObject2>());
248 EXPECT_EQ(0U, iter1c.GetNextOfType(2));
249
250 // Ensure that GetNextOfObject works.
251 PersistentMemoryAllocator::Iterator iter1d(allocator_.get());
252 EXPECT_EQ(obj2, iter1d.GetNextOfObject<TestObject2>());
253 EXPECT_EQ(nullptr, iter1d.GetNextOfObject<TestObject2>());
254
255 // Ensure that deleting an object works.
256 allocator_->Delete(obj2);
257 PersistentMemoryAllocator::Iterator iter1z(allocator_.get());
258 EXPECT_EQ(nullptr, iter1z.GetNextOfObject<TestObject2>());
259
260 // Ensure that the memory state can be set.
261 allocator_->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
262 EXPECT_EQ(PersistentMemoryAllocator::MEMORY_DELETED,
263 allocator_->GetMemoryState());
264 }
265
TEST_F(PersistentMemoryAllocatorTest,PageTest)266 TEST_F(PersistentMemoryAllocatorTest, PageTest) {
267 // This allocation will go into the first memory page.
268 Reference block1 = allocator_->Allocate(TEST_MEMORY_PAGE / 2, 1);
269 EXPECT_LT(0U, block1);
270 EXPECT_GT(TEST_MEMORY_PAGE, block1);
271
272 // This allocation won't fit in same page as previous block.
273 Reference block2 =
274 allocator_->Allocate(TEST_MEMORY_PAGE - 2 * kAllocAlignment, 2);
275 EXPECT_EQ(TEST_MEMORY_PAGE, block2);
276
277 // This allocation will also require a new page.
278 Reference block3 = allocator_->Allocate(2 * kAllocAlignment + 99, 3);
279 EXPECT_EQ(2U * TEST_MEMORY_PAGE, block3);
280 }
281
282 // A simple thread that takes an allocator and repeatedly allocates random-
283 // sized chunks from it until no more can be done.
284 class AllocatorThread : public SimpleThread {
285 public:
AllocatorThread(const std::string & name,void * base,uint32_t size,uint32_t page_size)286 AllocatorThread(const std::string& name,
287 void* base,
288 uint32_t size,
289 uint32_t page_size)
290 : SimpleThread(name, Options()),
291 count_(0),
292 iterable_(0),
293 allocator_(base, size, page_size, 0, std::string(), false) {}
294
Run()295 void Run() override {
296 for (;;) {
297 uint32_t size = RandInt(1, 99);
298 uint32_t type = RandInt(100, 999);
299 Reference block = allocator_.Allocate(size, type);
300 if (!block)
301 break;
302
303 count_++;
304 if (RandInt(0, 1)) {
305 allocator_.MakeIterable(block);
306 iterable_++;
307 }
308 }
309 }
310
iterable()311 unsigned iterable() { return iterable_; }
count()312 unsigned count() { return count_; }
313
314 private:
315 unsigned count_;
316 unsigned iterable_;
317 PersistentMemoryAllocator allocator_;
318 };
319
320 // Test parallel allocation/iteration and ensure consistency across all
321 // instances.
TEST_F(PersistentMemoryAllocatorTest,ParallelismTest)322 TEST_F(PersistentMemoryAllocatorTest, ParallelismTest) {
323 void* memory = mem_segment_.get();
324 AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
325 AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
326 AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
327 AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
328 AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
329
330 t1.Start();
331 t2.Start();
332 t3.Start();
333 t4.Start();
334 t5.Start();
335
336 unsigned last_count = 0;
337 do {
338 unsigned count = CountIterables();
339 EXPECT_LE(last_count, count);
340 } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
341
342 t1.Join();
343 t2.Join();
344 t3.Join();
345 t4.Join();
346 t5.Join();
347
348 EXPECT_FALSE(allocator_->IsCorrupt());
349 EXPECT_TRUE(allocator_->IsFull());
350 EXPECT_EQ(CountIterables(),
351 t1.iterable() + t2.iterable() + t3.iterable() + t4.iterable() +
352 t5.iterable());
353 }
354
355 // A simple thread that counts objects by iterating through an allocator.
356 class CounterThread : public SimpleThread {
357 public:
CounterThread(const std::string & name,PersistentMemoryAllocator::Iterator * iterator,Lock * lock,ConditionVariable * condition,bool * wake_up)358 CounterThread(const std::string& name,
359 PersistentMemoryAllocator::Iterator* iterator,
360 Lock* lock,
361 ConditionVariable* condition,
362 bool* wake_up)
363 : SimpleThread(name, Options()),
364 iterator_(iterator),
365 lock_(lock),
366 condition_(condition),
367 count_(0),
368 wake_up_(wake_up) {}
369
Run()370 void Run() override {
371 // Wait so all threads can start at approximately the same time.
372 // Best performance comes from releasing a single worker which then
373 // releases the next, etc., etc.
374 {
375 AutoLock autolock(*lock_);
376
377 // Before calling Wait(), make sure that the wake up condition
378 // has not already passed. Also, since spurious signal events
379 // are possible, check the condition in a while loop to make
380 // sure that the wake up condition is met when this thread
381 // returns from the Wait().
382 // See usage comments in src/base/synchronization/condition_variable.h.
383 while (!*wake_up_) {
384 condition_->Wait();
385 condition_->Signal();
386 }
387 }
388
389 uint32_t type;
390 while (iterator_->GetNext(&type) != 0) {
391 ++count_;
392 }
393 }
394
count()395 unsigned count() { return count_; }
396
397 private:
398 PersistentMemoryAllocator::Iterator* iterator_;
399 Lock* lock_;
400 ConditionVariable* condition_;
401 unsigned count_;
402 bool* wake_up_;
403
404 DISALLOW_COPY_AND_ASSIGN(CounterThread);
405 };
406
407 // Ensure that parallel iteration returns the same number of objects as
408 // single-threaded iteration.
TEST_F(PersistentMemoryAllocatorTest,IteratorParallelismTest)409 TEST_F(PersistentMemoryAllocatorTest, IteratorParallelismTest) {
410 // Fill the memory segment with random allocations.
411 unsigned iterable_count = 0;
412 for (;;) {
413 uint32_t size = RandInt(1, 99);
414 uint32_t type = RandInt(100, 999);
415 Reference block = allocator_->Allocate(size, type);
416 if (!block)
417 break;
418 allocator_->MakeIterable(block);
419 ++iterable_count;
420 }
421 EXPECT_FALSE(allocator_->IsCorrupt());
422 EXPECT_TRUE(allocator_->IsFull());
423 EXPECT_EQ(iterable_count, CountIterables());
424
425 PersistentMemoryAllocator::Iterator iter(allocator_.get());
426 Lock lock;
427 ConditionVariable condition(&lock);
428 bool wake_up = false;
429
430 CounterThread t1("t1", &iter, &lock, &condition, &wake_up);
431 CounterThread t2("t2", &iter, &lock, &condition, &wake_up);
432 CounterThread t3("t3", &iter, &lock, &condition, &wake_up);
433 CounterThread t4("t4", &iter, &lock, &condition, &wake_up);
434 CounterThread t5("t5", &iter, &lock, &condition, &wake_up);
435
436 t1.Start();
437 t2.Start();
438 t3.Start();
439 t4.Start();
440 t5.Start();
441
442 // Take the lock and set the wake up condition to true. This helps to
443 // avoid a race condition where the Signal() event is called before
444 // all the threads have reached the Wait() and thus never get woken up.
445 {
446 AutoLock autolock(lock);
447 wake_up = true;
448 }
449
450 // This will release all the waiting threads.
451 condition.Signal();
452
453 t1.Join();
454 t2.Join();
455 t3.Join();
456 t4.Join();
457 t5.Join();
458
459 EXPECT_EQ(iterable_count,
460 t1.count() + t2.count() + t3.count() + t4.count() + t5.count());
461
462 #if 0
463 // These ensure that the threads don't run sequentially. It shouldn't be
464 // enabled in general because it could lead to a flaky test if it happens
465 // simply by chance but it is useful during development to ensure that the
466 // test is working correctly.
467 EXPECT_NE(iterable_count, t1.count());
468 EXPECT_NE(iterable_count, t2.count());
469 EXPECT_NE(iterable_count, t3.count());
470 EXPECT_NE(iterable_count, t4.count());
471 EXPECT_NE(iterable_count, t5.count());
472 #endif
473 }
474
475 // This test doesn't verify anything other than it doesn't crash. Its goal
476 // is to find coding errors that aren't otherwise tested for, much like a
477 // "fuzzer" would.
478 // This test is suppsoed to fail on TSAN bot (crbug.com/579867).
479 #if defined(THREAD_SANITIZER)
480 #define MAYBE_CorruptionTest DISABLED_CorruptionTest
481 #else
482 #define MAYBE_CorruptionTest CorruptionTest
483 #endif
TEST_F(PersistentMemoryAllocatorTest,MAYBE_CorruptionTest)484 TEST_F(PersistentMemoryAllocatorTest, MAYBE_CorruptionTest) {
485 char* memory = mem_segment_.get();
486 AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
487 AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
488 AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
489 AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
490 AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
491
492 t1.Start();
493 t2.Start();
494 t3.Start();
495 t4.Start();
496 t5.Start();
497
498 do {
499 size_t offset = RandInt(0, TEST_MEMORY_SIZE - 1);
500 char value = RandInt(0, 255);
501 memory[offset] = value;
502 } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
503
504 t1.Join();
505 t2.Join();
506 t3.Join();
507 t4.Join();
508 t5.Join();
509
510 CountIterables();
511 }
512
513 // Attempt to cause crashes or loops by expressly creating dangerous conditions.
TEST_F(PersistentMemoryAllocatorTest,MaliciousTest)514 TEST_F(PersistentMemoryAllocatorTest, MaliciousTest) {
515 Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1);
516 Reference block2 = allocator_->Allocate(sizeof(TestObject1), 2);
517 Reference block3 = allocator_->Allocate(sizeof(TestObject1), 3);
518 Reference block4 = allocator_->Allocate(sizeof(TestObject1), 3);
519 Reference block5 = allocator_->Allocate(sizeof(TestObject1), 3);
520 allocator_->MakeIterable(block1);
521 allocator_->MakeIterable(block2);
522 allocator_->MakeIterable(block3);
523 allocator_->MakeIterable(block4);
524 allocator_->MakeIterable(block5);
525 EXPECT_EQ(5U, CountIterables());
526 EXPECT_FALSE(allocator_->IsCorrupt());
527
528 // Create loop in iterable list and ensure it doesn't hang. The return value
529 // from CountIterables() in these cases is unpredictable. If there is a
530 // failure, the call will hang and the test killed for taking too long.
531 uint32_t* header4 = (uint32_t*)(mem_segment_.get() + block4);
532 EXPECT_EQ(block5, header4[3]);
533 header4[3] = block4;
534 CountIterables(); // loop: 1-2-3-4-4
535 EXPECT_TRUE(allocator_->IsCorrupt());
536
537 // Test where loop goes back to previous block.
538 header4[3] = block3;
539 CountIterables(); // loop: 1-2-3-4-3
540
541 // Test where loop goes back to the beginning.
542 header4[3] = block1;
543 CountIterables(); // loop: 1-2-3-4-1
544 }
545
546
547 //----- LocalPersistentMemoryAllocator -----------------------------------------
548
TEST(LocalPersistentMemoryAllocatorTest,CreationTest)549 TEST(LocalPersistentMemoryAllocatorTest, CreationTest) {
550 LocalPersistentMemoryAllocator allocator(TEST_MEMORY_SIZE, 42, "");
551 EXPECT_EQ(42U, allocator.Id());
552 EXPECT_NE(0U, allocator.Allocate(24, 1));
553 EXPECT_FALSE(allocator.IsFull());
554 EXPECT_FALSE(allocator.IsCorrupt());
555 }
556
557
558 //----- SharedPersistentMemoryAllocator ----------------------------------------
559
TEST(SharedPersistentMemoryAllocatorTest,CreationTest)560 TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
561 SharedMemoryHandle shared_handle_1;
562 SharedMemoryHandle shared_handle_2;
563
564 PersistentMemoryAllocator::MemoryInfo meminfo1;
565 Reference r123, r456, r789;
566 {
567 std::unique_ptr<SharedMemory> shmem1(new SharedMemory());
568 ASSERT_TRUE(shmem1->CreateAndMapAnonymous(TEST_MEMORY_SIZE));
569 SharedPersistentMemoryAllocator local(std::move(shmem1), TEST_ID, "",
570 false);
571 EXPECT_FALSE(local.IsReadonly());
572 r123 = local.Allocate(123, 123);
573 r456 = local.Allocate(456, 456);
574 r789 = local.Allocate(789, 789);
575 local.MakeIterable(r123);
576 local.ChangeType(r456, 654, 456, false);
577 local.MakeIterable(r789);
578 local.GetMemoryInfo(&meminfo1);
579 EXPECT_FALSE(local.IsFull());
580 EXPECT_FALSE(local.IsCorrupt());
581
582 ASSERT_TRUE(local.shared_memory()->ShareToProcess(GetCurrentProcessHandle(),
583 &shared_handle_1));
584 ASSERT_TRUE(local.shared_memory()->ShareToProcess(GetCurrentProcessHandle(),
585 &shared_handle_2));
586 }
587
588 // Read-only test.
589 std::unique_ptr<SharedMemory> shmem2(new SharedMemory(shared_handle_1,
590 /*readonly=*/true));
591 ASSERT_TRUE(shmem2->Map(TEST_MEMORY_SIZE));
592
593 SharedPersistentMemoryAllocator shalloc2(std::move(shmem2), 0, "", true);
594 EXPECT_TRUE(shalloc2.IsReadonly());
595 EXPECT_EQ(TEST_ID, shalloc2.Id());
596 EXPECT_FALSE(shalloc2.IsFull());
597 EXPECT_FALSE(shalloc2.IsCorrupt());
598
599 PersistentMemoryAllocator::Iterator iter2(&shalloc2);
600 uint32_t type;
601 EXPECT_EQ(r123, iter2.GetNext(&type));
602 EXPECT_EQ(r789, iter2.GetNext(&type));
603 EXPECT_EQ(0U, iter2.GetNext(&type));
604
605 EXPECT_EQ(123U, shalloc2.GetType(r123));
606 EXPECT_EQ(654U, shalloc2.GetType(r456));
607 EXPECT_EQ(789U, shalloc2.GetType(r789));
608
609 PersistentMemoryAllocator::MemoryInfo meminfo2;
610 shalloc2.GetMemoryInfo(&meminfo2);
611 EXPECT_EQ(meminfo1.total, meminfo2.total);
612 EXPECT_EQ(meminfo1.free, meminfo2.free);
613
614 // Read/write test.
615 std::unique_ptr<SharedMemory> shmem3(new SharedMemory(shared_handle_2,
616 /*readonly=*/false));
617 ASSERT_TRUE(shmem3->Map(TEST_MEMORY_SIZE));
618
619 SharedPersistentMemoryAllocator shalloc3(std::move(shmem3), 0, "", false);
620 EXPECT_FALSE(shalloc3.IsReadonly());
621 EXPECT_EQ(TEST_ID, shalloc3.Id());
622 EXPECT_FALSE(shalloc3.IsFull());
623 EXPECT_FALSE(shalloc3.IsCorrupt());
624
625 PersistentMemoryAllocator::Iterator iter3(&shalloc3);
626 EXPECT_EQ(r123, iter3.GetNext(&type));
627 EXPECT_EQ(r789, iter3.GetNext(&type));
628 EXPECT_EQ(0U, iter3.GetNext(&type));
629
630 EXPECT_EQ(123U, shalloc3.GetType(r123));
631 EXPECT_EQ(654U, shalloc3.GetType(r456));
632 EXPECT_EQ(789U, shalloc3.GetType(r789));
633
634 PersistentMemoryAllocator::MemoryInfo meminfo3;
635 shalloc3.GetMemoryInfo(&meminfo3);
636 EXPECT_EQ(meminfo1.total, meminfo3.total);
637 EXPECT_EQ(meminfo1.free, meminfo3.free);
638
639 // Interconnectivity test.
640 Reference obj = shalloc3.Allocate(42, 42);
641 ASSERT_TRUE(obj);
642 shalloc3.MakeIterable(obj);
643 EXPECT_EQ(obj, iter2.GetNext(&type));
644 EXPECT_EQ(42U, type);
645
646 // Clear-on-change test.
647 Reference data_ref = shalloc3.Allocate(sizeof(int) * 4, 911);
648 int* data = shalloc3.GetAsArray<int>(data_ref, 911, 4);
649 ASSERT_TRUE(data);
650 data[0] = 0;
651 data[1] = 1;
652 data[2] = 2;
653 data[3] = 3;
654 ASSERT_TRUE(shalloc3.ChangeType(data_ref, 119, 911, false));
655 EXPECT_EQ(0, data[0]);
656 EXPECT_EQ(1, data[1]);
657 EXPECT_EQ(2, data[2]);
658 EXPECT_EQ(3, data[3]);
659 ASSERT_TRUE(shalloc3.ChangeType(data_ref, 191, 119, true));
660 EXPECT_EQ(0, data[0]);
661 EXPECT_EQ(0, data[1]);
662 EXPECT_EQ(0, data[2]);
663 EXPECT_EQ(0, data[3]);
664 }
665
666
667 #if !defined(OS_NACL)
668 //----- FilePersistentMemoryAllocator ------------------------------------------
669
TEST(FilePersistentMemoryAllocatorTest,CreationTest)670 TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
671 ScopedTempDir temp_dir;
672 ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
673 FilePath file_path = temp_dir.GetPath().AppendASCII("persistent_memory");
674
675 PersistentMemoryAllocator::MemoryInfo meminfo1;
676 Reference r123, r456, r789;
677 {
678 LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
679 EXPECT_FALSE(local.IsReadonly());
680 r123 = local.Allocate(123, 123);
681 r456 = local.Allocate(456, 456);
682 r789 = local.Allocate(789, 789);
683 local.MakeIterable(r123);
684 local.ChangeType(r456, 654, 456, false);
685 local.MakeIterable(r789);
686 local.GetMemoryInfo(&meminfo1);
687 EXPECT_FALSE(local.IsFull());
688 EXPECT_FALSE(local.IsCorrupt());
689
690 File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
691 ASSERT_TRUE(writer.IsValid());
692 writer.Write(0, (const char*)local.data(), local.used());
693 }
694
695 std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
696 mmfile->Initialize(file_path);
697 EXPECT_TRUE(mmfile->IsValid());
698 const size_t mmlength = mmfile->length();
699 EXPECT_GE(meminfo1.total, mmlength);
700
701 FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", false);
702 EXPECT_FALSE(file.IsReadonly());
703 EXPECT_EQ(TEST_ID, file.Id());
704 EXPECT_FALSE(file.IsFull());
705 EXPECT_FALSE(file.IsCorrupt());
706
707 PersistentMemoryAllocator::Iterator iter(&file);
708 uint32_t type;
709 EXPECT_EQ(r123, iter.GetNext(&type));
710 EXPECT_EQ(r789, iter.GetNext(&type));
711 EXPECT_EQ(0U, iter.GetNext(&type));
712
713 EXPECT_EQ(123U, file.GetType(r123));
714 EXPECT_EQ(654U, file.GetType(r456));
715 EXPECT_EQ(789U, file.GetType(r789));
716
717 PersistentMemoryAllocator::MemoryInfo meminfo2;
718 file.GetMemoryInfo(&meminfo2);
719 EXPECT_GE(meminfo1.total, meminfo2.total);
720 EXPECT_GE(meminfo1.free, meminfo2.free);
721 EXPECT_EQ(mmlength, meminfo2.total);
722 EXPECT_EQ(0U, meminfo2.free);
723
724 // There's no way of knowing if Flush actually does anything but at least
725 // verify that it runs without CHECK violations.
726 file.Flush(false);
727 file.Flush(true);
728 }
729
TEST(FilePersistentMemoryAllocatorTest,ExtendTest)730 TEST(FilePersistentMemoryAllocatorTest, ExtendTest) {
731 ScopedTempDir temp_dir;
732 ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
733 FilePath file_path = temp_dir.GetPath().AppendASCII("extend_test");
734 MemoryMappedFile::Region region = {0, 16 << 10}; // 16KiB maximum size.
735
736 // Start with a small but valid file of persistent data.
737 ASSERT_FALSE(PathExists(file_path));
738 {
739 LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
740 local.Allocate(1, 1);
741 local.Allocate(11, 11);
742
743 File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
744 ASSERT_TRUE(writer.IsValid());
745 writer.Write(0, (const char*)local.data(), local.used());
746 }
747 ASSERT_TRUE(PathExists(file_path));
748 int64_t before_size;
749 ASSERT_TRUE(GetFileSize(file_path, &before_size));
750
751 // Map it as an extendable read/write file and append to it.
752 {
753 std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
754 mmfile->Initialize(
755 File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
756 region, MemoryMappedFile::READ_WRITE_EXTEND);
757 FilePersistentMemoryAllocator allocator(std::move(mmfile), region.size, 0,
758 "", false);
759 EXPECT_EQ(static_cast<size_t>(before_size), allocator.used());
760
761 allocator.Allocate(111, 111);
762 EXPECT_LT(static_cast<size_t>(before_size), allocator.used());
763 }
764
765 // Validate that append worked.
766 int64_t after_size;
767 ASSERT_TRUE(GetFileSize(file_path, &after_size));
768 EXPECT_LT(before_size, after_size);
769
770 // Verify that it's still an acceptable file.
771 {
772 std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
773 mmfile->Initialize(
774 File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
775 region, MemoryMappedFile::READ_WRITE_EXTEND);
776 EXPECT_TRUE(FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true));
777 EXPECT_TRUE(
778 FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, false));
779 }
780 }
781
TEST(FilePersistentMemoryAllocatorTest,AcceptableTest)782 TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
783 const uint32_t kAllocAlignment =
784 PersistentMemoryAllocatorTest::GetAllocAlignment();
785 ScopedTempDir temp_dir;
786 ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
787
788 LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
789 local.MakeIterable(local.Allocate(1, 1));
790 local.MakeIterable(local.Allocate(11, 11));
791 const size_t minsize = local.used();
792 std::unique_ptr<char[]> garbage(new char[minsize]);
793 RandBytes(garbage.get(), minsize);
794
795 std::unique_ptr<MemoryMappedFile> mmfile;
796 char filename[100];
797 for (size_t filesize = minsize; filesize > 0; --filesize) {
798 strings::SafeSPrintf(filename, "memory_%d_A", filesize);
799 FilePath file_path = temp_dir.GetPath().AppendASCII(filename);
800 ASSERT_FALSE(PathExists(file_path));
801 {
802 File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
803 ASSERT_TRUE(writer.IsValid());
804 writer.Write(0, (const char*)local.data(), filesize);
805 }
806 ASSERT_TRUE(PathExists(file_path));
807
808 // Request read/write access for some sizes that are a multple of the
809 // allocator's alignment size. The allocator is strict about file size
810 // being a multiple of its internal alignment when doing read/write access.
811 const bool read_only = (filesize % (2 * kAllocAlignment)) != 0;
812 const uint32_t file_flags =
813 File::FLAG_OPEN | File::FLAG_READ | (read_only ? 0 : File::FLAG_WRITE);
814 const MemoryMappedFile::Access map_access =
815 read_only ? MemoryMappedFile::READ_ONLY : MemoryMappedFile::READ_WRITE;
816
817 mmfile.reset(new MemoryMappedFile());
818 mmfile->Initialize(File(file_path, file_flags), map_access);
819 EXPECT_EQ(filesize, mmfile->length());
820 if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
821 // Make sure construction doesn't crash. It will, however, cause
822 // error messages warning about about a corrupted memory segment.
823 FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
824 read_only);
825 // Also make sure that iteration doesn't crash.
826 PersistentMemoryAllocator::Iterator iter(&allocator);
827 uint32_t type_id;
828 Reference ref;
829 while ((ref = iter.GetNext(&type_id)) != 0) {
830 const char* data = allocator.GetAsArray<char>(
831 ref, 0, PersistentMemoryAllocator::kSizeAny);
832 uint32_t type = allocator.GetType(ref);
833 size_t size = allocator.GetAllocSize(ref);
834 // Ensure compiler can't optimize-out above variables.
835 (void)data;
836 (void)type;
837 (void)size;
838 }
839
840 // Ensure that short files are detected as corrupt and full files are not.
841 EXPECT_EQ(filesize != minsize, allocator.IsCorrupt());
842 } else {
843 // For filesize >= minsize, the file must be acceptable. This
844 // else clause (file-not-acceptable) should be reached only if
845 // filesize < minsize.
846 EXPECT_LT(filesize, minsize);
847 }
848
849 strings::SafeSPrintf(filename, "memory_%d_B", filesize);
850 file_path = temp_dir.GetPath().AppendASCII(filename);
851 ASSERT_FALSE(PathExists(file_path));
852 {
853 File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
854 ASSERT_TRUE(writer.IsValid());
855 writer.Write(0, (const char*)garbage.get(), filesize);
856 }
857 ASSERT_TRUE(PathExists(file_path));
858
859 mmfile.reset(new MemoryMappedFile());
860 mmfile->Initialize(File(file_path, file_flags), map_access);
861 EXPECT_EQ(filesize, mmfile->length());
862 if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
863 // Make sure construction doesn't crash. It will, however, cause
864 // error messages warning about about a corrupted memory segment.
865 FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
866 read_only);
867 EXPECT_TRUE(allocator.IsCorrupt()); // Garbage data so it should be.
868 } else {
869 // For filesize >= minsize, the file must be acceptable. This
870 // else clause (file-not-acceptable) should be reached only if
871 // filesize < minsize.
872 EXPECT_GT(minsize, filesize);
873 }
874 }
875 }
876 #endif // !defined(OS_NACL)
877
878 } // namespace base
879