• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/metrics/persistent_memory_allocator.h"
6 
7 #include <memory>
8 
9 #include "base/files/file.h"
10 #include "base/files/file_util.h"
11 #include "base/files/memory_mapped_file.h"
12 #include "base/files/scoped_temp_dir.h"
13 #include "base/memory/shared_memory.h"
14 #include "base/metrics/histogram.h"
15 #include "base/rand_util.h"
16 #include "base/strings/safe_sprintf.h"
17 #include "base/synchronization/condition_variable.h"
18 #include "base/synchronization/lock.h"
19 #include "base/threading/simple_thread.h"
20 #include "testing/gmock/include/gmock/gmock.h"
21 
22 namespace {
23 
24 const uint32_t TEST_MEMORY_SIZE = 1 << 20;   // 1 MiB
25 const uint32_t TEST_MEMORY_PAGE = 64 << 10;  // 64 KiB
26 const uint32_t TEST_ID = 12345;
27 const char TEST_NAME[] = "TestAllocator";
28 
29 }  // namespace
30 
31 namespace base {
32 
33 typedef PersistentMemoryAllocator::Reference Reference;
34 
35 class PersistentMemoryAllocatorTest : public testing::Test {
36  public:
37   // This can't be statically initialized because it's value isn't defined
38   // in the PersistentMemoryAllocator header file. Instead, it's simply set
39   // in the constructor.
40   uint32_t kAllocAlignment;
41 
42   struct TestObject1 {
43     int onething;
44     char oranother;
45   };
46 
47   struct TestObject2 {
48     int thiis;
49     long that;
50     float andthe;
51     char other;
52     double thing;
53   };
54 
PersistentMemoryAllocatorTest()55   PersistentMemoryAllocatorTest() {
56     kAllocAlignment = GetAllocAlignment();
57     mem_segment_.reset(new char[TEST_MEMORY_SIZE]);
58   }
59 
SetUp()60   void SetUp() override {
61     allocator_.reset();
62     ::memset(mem_segment_.get(), 0, TEST_MEMORY_SIZE);
63     allocator_.reset(new PersistentMemoryAllocator(
64         mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE,
65         TEST_ID, TEST_NAME, false));
66     allocator_->CreateTrackingHistograms(allocator_->Name());
67   }
68 
TearDown()69   void TearDown() override {
70     allocator_.reset();
71   }
72 
CountIterables()73   unsigned CountIterables() {
74     PersistentMemoryAllocator::Iterator iter(allocator_.get());
75     uint32_t type;
76     unsigned count = 0;
77     while (iter.GetNext(&type) != 0) {
78       ++count;
79     }
80     return count;
81   }
82 
GetAllocAlignment()83   static uint32_t GetAllocAlignment() {
84     return PersistentMemoryAllocator::kAllocAlignment;
85   }
86 
87  protected:
88   std::unique_ptr<char[]> mem_segment_;
89   std::unique_ptr<PersistentMemoryAllocator> allocator_;
90 };
91 
TEST_F(PersistentMemoryAllocatorTest,AllocateAndIterate)92 TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
93   std::string base_name(TEST_NAME);
94   EXPECT_EQ(TEST_ID, allocator_->Id());
95   EXPECT_TRUE(allocator_->used_histogram_);
96   EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".UsedPct",
97             allocator_->used_histogram_->histogram_name());
98   EXPECT_TRUE(allocator_->allocs_histogram_);
99   EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".Allocs",
100             allocator_->allocs_histogram_->histogram_name());
101 
102   // Get base memory info for later comparison.
103   PersistentMemoryAllocator::MemoryInfo meminfo0;
104   allocator_->GetMemoryInfo(&meminfo0);
105   EXPECT_EQ(TEST_MEMORY_SIZE, meminfo0.total);
106   EXPECT_GT(meminfo0.total, meminfo0.free);
107 
108   // Validate allocation of test object and make sure it can be referenced
109   // and all metadata looks correct.
110   Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1);
111   EXPECT_NE(0U, block1);
112   EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1, 1));
113   EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1, 1));
114   EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1));
115   EXPECT_GT(sizeof(TestObject1) + kAllocAlignment,
116             allocator_->GetAllocSize(block1));
117   PersistentMemoryAllocator::MemoryInfo meminfo1;
118   allocator_->GetMemoryInfo(&meminfo1);
119   EXPECT_EQ(meminfo0.total, meminfo1.total);
120   EXPECT_GT(meminfo0.free, meminfo1.free);
121 
122   // Ensure that the test-object can be made iterable.
123   PersistentMemoryAllocator::Iterator iter1a(allocator_.get());
124   uint32_t type;
125   EXPECT_EQ(0U, iter1a.GetNext(&type));
126   allocator_->MakeIterable(block1);
127   EXPECT_EQ(block1, iter1a.GetNext(&type));
128   EXPECT_EQ(1U, type);
129   EXPECT_EQ(0U, iter1a.GetNext(&type));
130 
131   // Create second test-object and ensure everything is good and it cannot
132   // be confused with test-object of another type.
133   Reference block2 = allocator_->Allocate(sizeof(TestObject2), 2);
134   EXPECT_NE(0U, block2);
135   EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2, 2));
136   EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block2, 1));
137   EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2));
138   EXPECT_GT(sizeof(TestObject2) + kAllocAlignment,
139             allocator_->GetAllocSize(block2));
140   PersistentMemoryAllocator::MemoryInfo meminfo2;
141   allocator_->GetMemoryInfo(&meminfo2);
142   EXPECT_EQ(meminfo1.total, meminfo2.total);
143   EXPECT_GT(meminfo1.free, meminfo2.free);
144 
145   // Ensure that second test-object can also be made iterable.
146   allocator_->MakeIterable(block2);
147   EXPECT_EQ(block2, iter1a.GetNext(&type));
148   EXPECT_EQ(2U, type);
149   EXPECT_EQ(0U, iter1a.GetNext(&type));
150 
151   // Check that iteration can begin after an arbitrary location.
152   PersistentMemoryAllocator::Iterator iter1b(allocator_.get(), block1);
153   EXPECT_EQ(block2, iter1b.GetNext(&type));
154   EXPECT_EQ(0U, iter1b.GetNext(&type));
155 
156   // Ensure nothing has gone noticably wrong.
157   EXPECT_FALSE(allocator_->IsFull());
158   EXPECT_FALSE(allocator_->IsCorrupt());
159 
160   // Check the internal histogram record of used memory.
161   allocator_->UpdateTrackingHistograms();
162   std::unique_ptr<HistogramSamples> used_samples(
163       allocator_->used_histogram_->SnapshotSamples());
164   EXPECT_TRUE(used_samples);
165   EXPECT_EQ(1, used_samples->TotalCount());
166 
167   // Check the internal histogram record of allocation requests.
168   std::unique_ptr<HistogramSamples> allocs_samples(
169       allocator_->allocs_histogram_->SnapshotSamples());
170   EXPECT_TRUE(allocs_samples);
171   EXPECT_EQ(2, allocs_samples->TotalCount());
172   EXPECT_EQ(0, allocs_samples->GetCount(0));
173   EXPECT_EQ(1, allocs_samples->GetCount(sizeof(TestObject1)));
174   EXPECT_EQ(1, allocs_samples->GetCount(sizeof(TestObject2)));
175 #if !DCHECK_IS_ON()  // DCHECK builds will die at a NOTREACHED().
176   EXPECT_EQ(0U, allocator_->Allocate(TEST_MEMORY_SIZE + 1, 0));
177   allocs_samples = allocator_->allocs_histogram_->SnapshotSamples();
178   EXPECT_EQ(3, allocs_samples->TotalCount());
179   EXPECT_EQ(1, allocs_samples->GetCount(0));
180 #endif
181 
182   // Check that an objcet's type can be changed.
183   EXPECT_EQ(2U, allocator_->GetType(block2));
184   allocator_->ChangeType(block2, 3, 2);
185   EXPECT_EQ(3U, allocator_->GetType(block2));
186   allocator_->ChangeType(block2, 2, 3);
187   EXPECT_EQ(2U, allocator_->GetType(block2));
188 
189   // Create second allocator (read/write) using the same memory segment.
190   std::unique_ptr<PersistentMemoryAllocator> allocator2(
191       new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
192                                     TEST_MEMORY_PAGE, 0, "", false));
193   EXPECT_EQ(TEST_ID, allocator2->Id());
194   EXPECT_FALSE(allocator2->used_histogram_);
195   EXPECT_FALSE(allocator2->allocs_histogram_);
196   EXPECT_NE(allocator2->allocs_histogram_, allocator_->allocs_histogram_);
197 
198   // Ensure that iteration and access through second allocator works.
199   PersistentMemoryAllocator::Iterator iter2(allocator2.get());
200   EXPECT_EQ(block1, iter2.GetNext(&type));
201   EXPECT_EQ(block2, iter2.GetNext(&type));
202   EXPECT_EQ(0U, iter2.GetNext(&type));
203   EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1, 1));
204   EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2, 2));
205 
206   // Create a third allocator (read-only) using the same memory segment.
207   std::unique_ptr<const PersistentMemoryAllocator> allocator3(
208       new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
209                                     TEST_MEMORY_PAGE, 0, "", true));
210   EXPECT_EQ(TEST_ID, allocator3->Id());
211   EXPECT_FALSE(allocator3->used_histogram_);
212   EXPECT_FALSE(allocator3->allocs_histogram_);
213 
214   // Ensure that iteration and access through third allocator works.
215   PersistentMemoryAllocator::Iterator iter3(allocator3.get());
216   EXPECT_EQ(block1, iter3.GetNext(&type));
217   EXPECT_EQ(block2, iter3.GetNext(&type));
218   EXPECT_EQ(0U, iter3.GetNext(&type));
219   EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1, 1));
220   EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2, 2));
221 
222   // Ensure that GetNextOfType works.
223   PersistentMemoryAllocator::Iterator iter1c(allocator_.get());
224   EXPECT_EQ(block2, iter1c.GetNextOfType(2));
225   EXPECT_EQ(0U, iter1c.GetNextOfType(2));
226 }
227 
TEST_F(PersistentMemoryAllocatorTest,PageTest)228 TEST_F(PersistentMemoryAllocatorTest, PageTest) {
229   // This allocation will go into the first memory page.
230   Reference block1 = allocator_->Allocate(TEST_MEMORY_PAGE / 2, 1);
231   EXPECT_LT(0U, block1);
232   EXPECT_GT(TEST_MEMORY_PAGE, block1);
233 
234   // This allocation won't fit in same page as previous block.
235   Reference block2 =
236       allocator_->Allocate(TEST_MEMORY_PAGE - 2 * kAllocAlignment, 2);
237   EXPECT_EQ(TEST_MEMORY_PAGE, block2);
238 
239   // This allocation will also require a new page.
240   Reference block3 = allocator_->Allocate(2 * kAllocAlignment + 99, 3);
241   EXPECT_EQ(2U * TEST_MEMORY_PAGE, block3);
242 }
243 
244 // A simple thread that takes an allocator and repeatedly allocates random-
245 // sized chunks from it until no more can be done.
246 class AllocatorThread : public SimpleThread {
247  public:
AllocatorThread(const std::string & name,void * base,uint32_t size,uint32_t page_size)248   AllocatorThread(const std::string& name,
249                   void* base,
250                   uint32_t size,
251                   uint32_t page_size)
252       : SimpleThread(name, Options()),
253         count_(0),
254         iterable_(0),
255         allocator_(base, size, page_size, 0, std::string(), false) {}
256 
Run()257   void Run() override {
258     for (;;) {
259       uint32_t size = RandInt(1, 99);
260       uint32_t type = RandInt(100, 999);
261       Reference block = allocator_.Allocate(size, type);
262       if (!block)
263         break;
264 
265       count_++;
266       if (RandInt(0, 1)) {
267         allocator_.MakeIterable(block);
268         iterable_++;
269       }
270     }
271   }
272 
iterable()273   unsigned iterable() { return iterable_; }
count()274   unsigned count() { return count_; }
275 
276  private:
277   unsigned count_;
278   unsigned iterable_;
279   PersistentMemoryAllocator allocator_;
280 };
281 
282 // Test parallel allocation/iteration and ensure consistency across all
283 // instances.
TEST_F(PersistentMemoryAllocatorTest,ParallelismTest)284 TEST_F(PersistentMemoryAllocatorTest, ParallelismTest) {
285   void* memory = mem_segment_.get();
286   AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
287   AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
288   AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
289   AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
290   AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
291 
292   t1.Start();
293   t2.Start();
294   t3.Start();
295   t4.Start();
296   t5.Start();
297 
298   unsigned last_count = 0;
299   do {
300     unsigned count = CountIterables();
301     EXPECT_LE(last_count, count);
302   } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
303 
304   t1.Join();
305   t2.Join();
306   t3.Join();
307   t4.Join();
308   t5.Join();
309 
310   EXPECT_FALSE(allocator_->IsCorrupt());
311   EXPECT_TRUE(allocator_->IsFull());
312   EXPECT_EQ(CountIterables(),
313             t1.iterable() + t2.iterable() + t3.iterable() + t4.iterable() +
314             t5.iterable());
315 }
316 
317 // A simple thread that counts objects by iterating through an allocator.
318 class CounterThread : public SimpleThread {
319  public:
CounterThread(const std::string & name,PersistentMemoryAllocator::Iterator * iterator,Lock * lock,ConditionVariable * condition,bool * wake_up)320   CounterThread(const std::string& name,
321                 PersistentMemoryAllocator::Iterator* iterator,
322                 Lock* lock,
323                 ConditionVariable* condition,
324                 bool* wake_up)
325       : SimpleThread(name, Options()),
326         iterator_(iterator),
327         lock_(lock),
328         condition_(condition),
329         count_(0),
330         wake_up_(wake_up) {}
331 
Run()332   void Run() override {
333     // Wait so all threads can start at approximately the same time.
334     // Best performance comes from releasing a single worker which then
335     // releases the next, etc., etc.
336     {
337       AutoLock autolock(*lock_);
338 
339       // Before calling Wait(), make sure that the wake up condition
340       // has not already passed.  Also, since spurious signal events
341       // are possible, check the condition in a while loop to make
342       // sure that the wake up condition is met when this thread
343       // returns from the Wait().
344       // See usage comments in src/base/synchronization/condition_variable.h.
345       while (!*wake_up_) {
346         condition_->Wait();
347         condition_->Signal();
348       }
349     }
350 
351     uint32_t type;
352     while (iterator_->GetNext(&type) != 0) {
353       ++count_;
354     }
355   }
356 
count()357   unsigned count() { return count_; }
358 
359  private:
360   PersistentMemoryAllocator::Iterator* iterator_;
361   Lock* lock_;
362   ConditionVariable* condition_;
363   unsigned count_;
364   bool* wake_up_;
365 
366   DISALLOW_COPY_AND_ASSIGN(CounterThread);
367 };
368 
369 // Ensure that parallel iteration returns the same number of objects as
370 // single-threaded iteration.
TEST_F(PersistentMemoryAllocatorTest,IteratorParallelismTest)371 TEST_F(PersistentMemoryAllocatorTest, IteratorParallelismTest) {
372   // Fill the memory segment with random allocations.
373   unsigned iterable_count = 0;
374   for (;;) {
375     uint32_t size = RandInt(1, 99);
376     uint32_t type = RandInt(100, 999);
377     Reference block = allocator_->Allocate(size, type);
378     if (!block)
379       break;
380     allocator_->MakeIterable(block);
381     ++iterable_count;
382   }
383   EXPECT_FALSE(allocator_->IsCorrupt());
384   EXPECT_TRUE(allocator_->IsFull());
385   EXPECT_EQ(iterable_count, CountIterables());
386 
387   PersistentMemoryAllocator::Iterator iter(allocator_.get());
388   Lock lock;
389   ConditionVariable condition(&lock);
390   bool wake_up = false;
391 
392   CounterThread t1("t1", &iter, &lock, &condition, &wake_up);
393   CounterThread t2("t2", &iter, &lock, &condition, &wake_up);
394   CounterThread t3("t3", &iter, &lock, &condition, &wake_up);
395   CounterThread t4("t4", &iter, &lock, &condition, &wake_up);
396   CounterThread t5("t5", &iter, &lock, &condition, &wake_up);
397 
398   t1.Start();
399   t2.Start();
400   t3.Start();
401   t4.Start();
402   t5.Start();
403 
404   // Take the lock and set the wake up condition to true.  This helps to
405   // avoid a race condition where the Signal() event is called before
406   // all the threads have reached the Wait() and thus never get woken up.
407   {
408     AutoLock autolock(lock);
409     wake_up = true;
410   }
411 
412   // This will release all the waiting threads.
413   condition.Signal();
414 
415   t1.Join();
416   t2.Join();
417   t3.Join();
418   t4.Join();
419   t5.Join();
420 
421   EXPECT_EQ(iterable_count,
422             t1.count() + t2.count() + t3.count() + t4.count() + t5.count());
423 
424 #if 0
425   // These ensure that the threads don't run sequentially. It shouldn't be
426   // enabled in general because it could lead to a flaky test if it happens
427   // simply by chance but it is useful during development to ensure that the
428   // test is working correctly.
429   EXPECT_NE(iterable_count, t1.count());
430   EXPECT_NE(iterable_count, t2.count());
431   EXPECT_NE(iterable_count, t3.count());
432   EXPECT_NE(iterable_count, t4.count());
433   EXPECT_NE(iterable_count, t5.count());
434 #endif
435 }
436 
437 // This test doesn't verify anything other than it doesn't crash. Its goal
438 // is to find coding errors that aren't otherwise tested for, much like a
439 // "fuzzer" would.
440 // This test is suppsoed to fail on TSAN bot (crbug.com/579867).
441 #if defined(THREAD_SANITIZER)
442 #define MAYBE_CorruptionTest DISABLED_CorruptionTest
443 #else
444 #define MAYBE_CorruptionTest CorruptionTest
445 #endif
TEST_F(PersistentMemoryAllocatorTest,MAYBE_CorruptionTest)446 TEST_F(PersistentMemoryAllocatorTest, MAYBE_CorruptionTest) {
447   char* memory = mem_segment_.get();
448   AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
449   AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
450   AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
451   AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
452   AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
453 
454   t1.Start();
455   t2.Start();
456   t3.Start();
457   t4.Start();
458   t5.Start();
459 
460   do {
461     size_t offset = RandInt(0, TEST_MEMORY_SIZE - 1);
462     char value = RandInt(0, 255);
463     memory[offset] = value;
464   } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
465 
466   t1.Join();
467   t2.Join();
468   t3.Join();
469   t4.Join();
470   t5.Join();
471 
472   CountIterables();
473 }
474 
475 // Attempt to cause crashes or loops by expressly creating dangerous conditions.
TEST_F(PersistentMemoryAllocatorTest,MaliciousTest)476 TEST_F(PersistentMemoryAllocatorTest, MaliciousTest) {
477   Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1);
478   Reference block2 = allocator_->Allocate(sizeof(TestObject1), 2);
479   Reference block3 = allocator_->Allocate(sizeof(TestObject1), 3);
480   Reference block4 = allocator_->Allocate(sizeof(TestObject1), 3);
481   Reference block5 = allocator_->Allocate(sizeof(TestObject1), 3);
482   allocator_->MakeIterable(block1);
483   allocator_->MakeIterable(block2);
484   allocator_->MakeIterable(block3);
485   allocator_->MakeIterable(block4);
486   allocator_->MakeIterable(block5);
487   EXPECT_EQ(5U, CountIterables());
488   EXPECT_FALSE(allocator_->IsCorrupt());
489 
490   // Create loop in iterable list and ensure it doesn't hang. The return value
491   // from CountIterables() in these cases is unpredictable. If there is a
492   // failure, the call will hang and the test killed for taking too long.
493   uint32_t* header4 = (uint32_t*)(mem_segment_.get() + block4);
494   EXPECT_EQ(block5, header4[3]);
495   header4[3] = block4;
496   CountIterables();  // loop: 1-2-3-4-4
497   EXPECT_TRUE(allocator_->IsCorrupt());
498 
499   // Test where loop goes back to previous block.
500   header4[3] = block3;
501   CountIterables();  // loop: 1-2-3-4-3
502 
503   // Test where loop goes back to the beginning.
504   header4[3] = block1;
505   CountIterables();  // loop: 1-2-3-4-1
506 }
507 
508 
509 //----- LocalPersistentMemoryAllocator -----------------------------------------
510 
TEST(LocalPersistentMemoryAllocatorTest,CreationTest)511 TEST(LocalPersistentMemoryAllocatorTest, CreationTest) {
512   LocalPersistentMemoryAllocator allocator(TEST_MEMORY_SIZE, 42, "");
513   EXPECT_EQ(42U, allocator.Id());
514   EXPECT_NE(0U, allocator.Allocate(24, 1));
515   EXPECT_FALSE(allocator.IsFull());
516   EXPECT_FALSE(allocator.IsCorrupt());
517 }
518 
519 
520 //----- SharedPersistentMemoryAllocator ----------------------------------------
521 
TEST(SharedPersistentMemoryAllocatorTest,CreationTest)522 TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
523   SharedMemoryHandle shared_handle_1;
524   SharedMemoryHandle shared_handle_2;
525 
526   PersistentMemoryAllocator::MemoryInfo meminfo1;
527   Reference r123, r456, r789;
528   {
529     std::unique_ptr<SharedMemory> shmem1(new SharedMemory());
530     ASSERT_TRUE(shmem1->CreateAndMapAnonymous(TEST_MEMORY_SIZE));
531     SharedPersistentMemoryAllocator local(std::move(shmem1), TEST_ID, "",
532                                           false);
533     EXPECT_FALSE(local.IsReadonly());
534     r123 = local.Allocate(123, 123);
535     r456 = local.Allocate(456, 456);
536     r789 = local.Allocate(789, 789);
537     local.MakeIterable(r123);
538     local.ChangeType(r456, 654, 456);
539     local.MakeIterable(r789);
540     local.GetMemoryInfo(&meminfo1);
541     EXPECT_FALSE(local.IsFull());
542     EXPECT_FALSE(local.IsCorrupt());
543 
544     ASSERT_TRUE(local.shared_memory()->ShareToProcess(GetCurrentProcessHandle(),
545                                                       &shared_handle_1));
546     ASSERT_TRUE(local.shared_memory()->ShareToProcess(GetCurrentProcessHandle(),
547                                                       &shared_handle_2));
548   }
549 
550   // Read-only test.
551   std::unique_ptr<SharedMemory> shmem2(new SharedMemory(shared_handle_1,
552                                                         /*readonly=*/true));
553   ASSERT_TRUE(shmem2->Map(TEST_MEMORY_SIZE));
554 
555   SharedPersistentMemoryAllocator shalloc2(std::move(shmem2), 0, "", true);
556   EXPECT_TRUE(shalloc2.IsReadonly());
557   EXPECT_EQ(TEST_ID, shalloc2.Id());
558   EXPECT_FALSE(shalloc2.IsFull());
559   EXPECT_FALSE(shalloc2.IsCorrupt());
560 
561   PersistentMemoryAllocator::Iterator iter2(&shalloc2);
562   uint32_t type;
563   EXPECT_EQ(r123, iter2.GetNext(&type));
564   EXPECT_EQ(r789, iter2.GetNext(&type));
565   EXPECT_EQ(0U, iter2.GetNext(&type));
566 
567   EXPECT_EQ(123U, shalloc2.GetType(r123));
568   EXPECT_EQ(654U, shalloc2.GetType(r456));
569   EXPECT_EQ(789U, shalloc2.GetType(r789));
570 
571   PersistentMemoryAllocator::MemoryInfo meminfo2;
572   shalloc2.GetMemoryInfo(&meminfo2);
573   EXPECT_EQ(meminfo1.total, meminfo2.total);
574   EXPECT_EQ(meminfo1.free, meminfo2.free);
575 
576   // Read/write test.
577   std::unique_ptr<SharedMemory> shmem3(new SharedMemory(shared_handle_2,
578                                                         /*readonly=*/false));
579   ASSERT_TRUE(shmem3->Map(TEST_MEMORY_SIZE));
580 
581   SharedPersistentMemoryAllocator shalloc3(std::move(shmem3), 0, "", false);
582   EXPECT_FALSE(shalloc3.IsReadonly());
583   EXPECT_EQ(TEST_ID, shalloc3.Id());
584   EXPECT_FALSE(shalloc3.IsFull());
585   EXPECT_FALSE(shalloc3.IsCorrupt());
586 
587   PersistentMemoryAllocator::Iterator iter3(&shalloc3);
588   EXPECT_EQ(r123, iter3.GetNext(&type));
589   EXPECT_EQ(r789, iter3.GetNext(&type));
590   EXPECT_EQ(0U, iter3.GetNext(&type));
591 
592   EXPECT_EQ(123U, shalloc3.GetType(r123));
593   EXPECT_EQ(654U, shalloc3.GetType(r456));
594   EXPECT_EQ(789U, shalloc3.GetType(r789));
595 
596   PersistentMemoryAllocator::MemoryInfo meminfo3;
597   shalloc3.GetMemoryInfo(&meminfo3);
598   EXPECT_EQ(meminfo1.total, meminfo3.total);
599   EXPECT_EQ(meminfo1.free, meminfo3.free);
600 
601   // Interconnectivity test.
602   Reference obj = shalloc3.Allocate(42, 42);
603   ASSERT_TRUE(obj);
604   shalloc3.MakeIterable(obj);
605   EXPECT_EQ(obj, iter2.GetNext(&type));
606   EXPECT_EQ(42U, type);
607 }
608 
609 
610 #if !defined(OS_NACL)
611 //----- FilePersistentMemoryAllocator ------------------------------------------
612 
TEST(FilePersistentMemoryAllocatorTest,CreationTest)613 TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
614   ScopedTempDir temp_dir;
615   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
616   FilePath file_path = temp_dir.path().AppendASCII("persistent_memory");
617 
618   PersistentMemoryAllocator::MemoryInfo meminfo1;
619   Reference r123, r456, r789;
620   {
621     LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
622     EXPECT_FALSE(local.IsReadonly());
623     r123 = local.Allocate(123, 123);
624     r456 = local.Allocate(456, 456);
625     r789 = local.Allocate(789, 789);
626     local.MakeIterable(r123);
627     local.ChangeType(r456, 654, 456);
628     local.MakeIterable(r789);
629     local.GetMemoryInfo(&meminfo1);
630     EXPECT_FALSE(local.IsFull());
631     EXPECT_FALSE(local.IsCorrupt());
632 
633     File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
634     ASSERT_TRUE(writer.IsValid());
635     writer.Write(0, (const char*)local.data(), local.used());
636   }
637 
638   std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
639   mmfile->Initialize(file_path);
640   EXPECT_TRUE(mmfile->IsValid());
641   const size_t mmlength = mmfile->length();
642   EXPECT_GE(meminfo1.total, mmlength);
643 
644   FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", true);
645   EXPECT_TRUE(file.IsReadonly());
646   EXPECT_EQ(TEST_ID, file.Id());
647   EXPECT_FALSE(file.IsFull());
648   EXPECT_FALSE(file.IsCorrupt());
649 
650   PersistentMemoryAllocator::Iterator iter(&file);
651   uint32_t type;
652   EXPECT_EQ(r123, iter.GetNext(&type));
653   EXPECT_EQ(r789, iter.GetNext(&type));
654   EXPECT_EQ(0U, iter.GetNext(&type));
655 
656   EXPECT_EQ(123U, file.GetType(r123));
657   EXPECT_EQ(654U, file.GetType(r456));
658   EXPECT_EQ(789U, file.GetType(r789));
659 
660   PersistentMemoryAllocator::MemoryInfo meminfo2;
661   file.GetMemoryInfo(&meminfo2);
662   EXPECT_GE(meminfo1.total, meminfo2.total);
663   EXPECT_GE(meminfo1.free, meminfo2.free);
664   EXPECT_EQ(mmlength, meminfo2.total);
665   EXPECT_EQ(0U, meminfo2.free);
666 }
667 
TEST(FilePersistentMemoryAllocatorTest,ExtendTest)668 TEST(FilePersistentMemoryAllocatorTest, ExtendTest) {
669   ScopedTempDir temp_dir;
670   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
671   FilePath file_path = temp_dir.path().AppendASCII("extend_test");
672   MemoryMappedFile::Region region = {0, 16 << 10};  // 16KiB maximum size.
673 
674   // Start with a small but valid file of persistent data.
675   ASSERT_FALSE(PathExists(file_path));
676   {
677     LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
678     local.Allocate(1, 1);
679     local.Allocate(11, 11);
680 
681     File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
682     ASSERT_TRUE(writer.IsValid());
683     writer.Write(0, (const char*)local.data(), local.used());
684   }
685   ASSERT_TRUE(PathExists(file_path));
686   int64_t before_size;
687   ASSERT_TRUE(GetFileSize(file_path, &before_size));
688 
689   // Map it as an extendable read/write file and append to it.
690   {
691     std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
692     mmfile->Initialize(
693         File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
694         region, MemoryMappedFile::READ_WRITE_EXTEND);
695     FilePersistentMemoryAllocator allocator(std::move(mmfile), region.size, 0,
696                                             "", false);
697     EXPECT_EQ(static_cast<size_t>(before_size), allocator.used());
698 
699     allocator.Allocate(111, 111);
700     EXPECT_LT(static_cast<size_t>(before_size), allocator.used());
701   }
702 
703   // Validate that append worked.
704   int64_t after_size;
705   ASSERT_TRUE(GetFileSize(file_path, &after_size));
706   EXPECT_LT(before_size, after_size);
707 
708   // Verify that it's still an acceptable file.
709   {
710     std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
711     mmfile->Initialize(
712         File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
713         region, MemoryMappedFile::READ_WRITE_EXTEND);
714     EXPECT_TRUE(FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true));
715     EXPECT_TRUE(
716         FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, false));
717   }
718 }
719 
TEST(FilePersistentMemoryAllocatorTest,AcceptableTest)720 TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
721   const uint32_t kAllocAlignment =
722       PersistentMemoryAllocatorTest::GetAllocAlignment();
723   ScopedTempDir temp_dir;
724   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
725 
726   LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
727   local.MakeIterable(local.Allocate(1, 1));
728   local.MakeIterable(local.Allocate(11, 11));
729   const size_t minsize = local.used();
730   std::unique_ptr<char[]> garbage(new char[minsize]);
731   RandBytes(garbage.get(), minsize);
732 
733   std::unique_ptr<MemoryMappedFile> mmfile;
734   char filename[100];
735   for (size_t filesize = minsize; filesize > 0; --filesize) {
736     strings::SafeSPrintf(filename, "memory_%d_A", filesize);
737     FilePath file_path = temp_dir.path().AppendASCII(filename);
738     ASSERT_FALSE(PathExists(file_path));
739     {
740       File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
741       ASSERT_TRUE(writer.IsValid());
742       writer.Write(0, (const char*)local.data(), filesize);
743     }
744     ASSERT_TRUE(PathExists(file_path));
745 
746     // Request read/write access for some sizes that are a multple of the
747     // allocator's alignment size. The allocator is strict about file size
748     // being a multiple of its internal alignment when doing read/write access.
749     const bool read_only = (filesize % (2 * kAllocAlignment)) != 0;
750     const uint32_t file_flags =
751         File::FLAG_OPEN | File::FLAG_READ | (read_only ? 0 : File::FLAG_WRITE);
752     const MemoryMappedFile::Access map_access =
753         read_only ? MemoryMappedFile::READ_ONLY : MemoryMappedFile::READ_WRITE;
754 
755     mmfile.reset(new MemoryMappedFile());
756     mmfile->Initialize(File(file_path, file_flags), map_access);
757     EXPECT_EQ(filesize, mmfile->length());
758     if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
759       // Make sure construction doesn't crash. It will, however, cause
760       // error messages warning about about a corrupted memory segment.
761       FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
762                                               read_only);
763       // Also make sure that iteration doesn't crash.
764       PersistentMemoryAllocator::Iterator iter(&allocator);
765       uint32_t type_id;
766       Reference ref;
767       while ((ref = iter.GetNext(&type_id)) != 0) {
768         const char* data = allocator.GetAsObject<char>(ref, 0);
769         uint32_t type = allocator.GetType(ref);
770         size_t size = allocator.GetAllocSize(ref);
771         // Ensure compiler can't optimize-out above variables.
772         (void)data;
773         (void)type;
774         (void)size;
775       }
776 
777       // Ensure that short files are detected as corrupt and full files are not.
778       EXPECT_EQ(filesize != minsize, allocator.IsCorrupt());
779     } else {
780       // For filesize >= minsize, the file must be acceptable. This
781       // else clause (file-not-acceptable) should be reached only if
782       // filesize < minsize.
783       EXPECT_LT(filesize, minsize);
784     }
785 
786     strings::SafeSPrintf(filename, "memory_%d_B", filesize);
787     file_path = temp_dir.path().AppendASCII(filename);
788     ASSERT_FALSE(PathExists(file_path));
789     {
790       File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
791       ASSERT_TRUE(writer.IsValid());
792       writer.Write(0, (const char*)garbage.get(), filesize);
793     }
794     ASSERT_TRUE(PathExists(file_path));
795 
796     mmfile.reset(new MemoryMappedFile());
797     mmfile->Initialize(File(file_path, file_flags), map_access);
798     EXPECT_EQ(filesize, mmfile->length());
799     if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
800       // Make sure construction doesn't crash. It will, however, cause
801       // error messages warning about about a corrupted memory segment.
802       FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
803                                               read_only);
804       EXPECT_TRUE(allocator.IsCorrupt());  // Garbage data so it should be.
805     } else {
806       // For filesize >= minsize, the file must be acceptable. This
807       // else clause (file-not-acceptable) should be reached only if
808       // filesize < minsize.
809       EXPECT_GT(minsize, filesize);
810     }
811   }
812 }
813 #endif  // !defined(OS_NACL)
814 
815 }  // namespace base
816