• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/memory/discardable_shared_memory.h"
6 
7 #include <fcntl.h>
8 #include <stdint.h>
9 
10 #include <algorithm>
11 
12 #include "base/files/scoped_file.h"
13 #include "base/memory/page_size.h"
14 #include "base/memory/shared_memory_tracker.h"
15 #include "base/tracing_buildflags.h"
16 #include "build/build_config.h"
17 #include "testing/gtest/include/gtest/gtest.h"
18 
19 #if BUILDFLAG(ENABLE_BASE_TRACING)
20 #include "base/trace_event/memory_allocator_dump.h"  // no-presubmit-check
21 #include "base/trace_event/process_memory_dump.h"    // no-presubmit-check
22 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
23 
24 namespace base {
25 
26 class TestDiscardableSharedMemory : public DiscardableSharedMemory {
27  public:
28   TestDiscardableSharedMemory() = default;
29 
TestDiscardableSharedMemory(UnsafeSharedMemoryRegion region)30   explicit TestDiscardableSharedMemory(UnsafeSharedMemoryRegion region)
31       : DiscardableSharedMemory(std::move(region)) {}
32 
SetNow(Time now)33   void SetNow(Time now) { now_ = now; }
34 
35  private:
36   // Overriden from DiscardableSharedMemory:
Now() const37   Time Now() const override { return now_; }
38 
39   Time now_;
40 };
41 
TEST(DiscardableSharedMemoryTest,CreateAndMap)42 TEST(DiscardableSharedMemoryTest, CreateAndMap) {
43   const uint32_t kDataSize = 1024;
44 
45   TestDiscardableSharedMemory memory;
46   bool rv = memory.CreateAndMap(kDataSize);
47   ASSERT_TRUE(rv);
48   EXPECT_GE(memory.mapped_size(), kDataSize);
49   EXPECT_TRUE(memory.IsMemoryLocked());
50 }
51 
TEST(DiscardableSharedMemoryTest,CreateFromHandle)52 TEST(DiscardableSharedMemoryTest, CreateFromHandle) {
53   const uint32_t kDataSize = 1024;
54 
55   TestDiscardableSharedMemory memory1;
56   bool rv = memory1.CreateAndMap(kDataSize);
57   ASSERT_TRUE(rv);
58 
59   UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
60   ASSERT_TRUE(shared_region.IsValid());
61 
62   TestDiscardableSharedMemory memory2(std::move(shared_region));
63   rv = memory2.Map(kDataSize);
64   ASSERT_TRUE(rv);
65   EXPECT_TRUE(memory2.IsMemoryLocked());
66 }
67 
TEST(DiscardableSharedMemoryTest,LockAndUnlock)68 TEST(DiscardableSharedMemoryTest, LockAndUnlock) {
69   const uint32_t kDataSize = 1024;
70 
71   TestDiscardableSharedMemory memory1;
72   bool rv = memory1.CreateAndMap(kDataSize);
73   ASSERT_TRUE(rv);
74 
75   // Memory is initially locked. Unlock it.
76   memory1.SetNow(Time::FromSecondsSinceUnixEpoch(1));
77   memory1.Unlock(0, 0);
78   EXPECT_FALSE(memory1.IsMemoryLocked());
79 
80   // Lock and unlock memory.
81   DiscardableSharedMemory::LockResult lock_rv = memory1.Lock(0, 0);
82   EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
83   memory1.SetNow(Time::FromSecondsSinceUnixEpoch(2));
84   memory1.Unlock(0, 0);
85 
86   // Lock again before duplicating and passing ownership to new instance.
87   lock_rv = memory1.Lock(0, 0);
88   EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
89   EXPECT_TRUE(memory1.IsMemoryLocked());
90 
91   UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
92   ASSERT_TRUE(shared_region.IsValid());
93 
94   TestDiscardableSharedMemory memory2(std::move(shared_region));
95   rv = memory2.Map(kDataSize);
96   ASSERT_TRUE(rv);
97 
98   // Unlock second instance.
99   memory2.SetNow(Time::FromSecondsSinceUnixEpoch(3));
100   memory2.Unlock(0, 0);
101 
102   // Both memory instances should be unlocked now.
103   EXPECT_FALSE(memory2.IsMemoryLocked());
104   EXPECT_FALSE(memory1.IsMemoryLocked());
105 
106   // Lock second instance before passing ownership back to first instance.
107   lock_rv = memory2.Lock(0, 0);
108   EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
109 
110   // Memory should still be resident and locked.
111   rv = memory1.IsMemoryResident();
112   EXPECT_TRUE(rv);
113   EXPECT_TRUE(memory1.IsMemoryLocked());
114 
115   // Unlock first instance.
116   memory1.SetNow(Time::FromSecondsSinceUnixEpoch(4));
117   memory1.Unlock(0, 0);
118 }
119 
TEST(DiscardableSharedMemoryTest,Purge)120 TEST(DiscardableSharedMemoryTest, Purge) {
121   const uint32_t kDataSize = 1024;
122 
123   TestDiscardableSharedMemory memory1;
124   bool rv = memory1.CreateAndMap(kDataSize);
125   ASSERT_TRUE(rv);
126 
127   UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
128   ASSERT_TRUE(shared_region.IsValid());
129 
130   TestDiscardableSharedMemory memory2(std::move(shared_region));
131   rv = memory2.Map(kDataSize);
132   ASSERT_TRUE(rv);
133 
134   // This should fail as memory is locked.
135   rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(1));
136   EXPECT_FALSE(rv);
137 
138   memory2.SetNow(Time::FromSecondsSinceUnixEpoch(2));
139   memory2.Unlock(0, 0);
140 
141   ASSERT_TRUE(memory2.IsMemoryResident());
142 
143   // Memory is unlocked, but our usage timestamp is incorrect.
144   rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(3));
145   EXPECT_FALSE(rv);
146 
147   ASSERT_TRUE(memory2.IsMemoryResident());
148 
149   // Memory is unlocked and our usage timestamp should be correct.
150   rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(4));
151   EXPECT_TRUE(rv);
152 
153   // Lock should fail as memory has been purged.
154   DiscardableSharedMemory::LockResult lock_rv = memory2.Lock(0, 0);
155   EXPECT_EQ(DiscardableSharedMemory::FAILED, lock_rv);
156 
157   ASSERT_FALSE(memory2.IsMemoryResident());
158 }
159 
TEST(DiscardableSharedMemoryTest,PurgeAfterClose)160 TEST(DiscardableSharedMemoryTest, PurgeAfterClose) {
161   const uint32_t kDataSize = 1024;
162 
163   TestDiscardableSharedMemory memory;
164   bool rv = memory.CreateAndMap(kDataSize);
165   ASSERT_TRUE(rv);
166 
167   // Unlock things so we can Purge().
168   memory.SetNow(Time::FromSecondsSinceUnixEpoch(2));
169   memory.Unlock(0, 0);
170 
171   // It should be safe to Purge() |memory| after Close()ing the handle.
172   memory.Close();
173   rv = memory.Purge(Time::FromSecondsSinceUnixEpoch(4));
174   EXPECT_TRUE(rv);
175 }
176 
TEST(DiscardableSharedMemoryTest,LastUsed)177 TEST(DiscardableSharedMemoryTest, LastUsed) {
178   const uint32_t kDataSize = 1024;
179 
180   TestDiscardableSharedMemory memory1;
181   bool rv = memory1.CreateAndMap(kDataSize);
182   ASSERT_TRUE(rv);
183 
184   UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
185   ASSERT_TRUE(shared_region.IsValid());
186 
187   TestDiscardableSharedMemory memory2(std::move(shared_region));
188   rv = memory2.Map(kDataSize);
189   ASSERT_TRUE(rv);
190 
191   memory2.SetNow(Time::FromSecondsSinceUnixEpoch(1));
192   memory2.Unlock(0, 0);
193 
194   EXPECT_EQ(memory2.last_known_usage(), Time::FromSecondsSinceUnixEpoch(1));
195 
196   DiscardableSharedMemory::LockResult lock_rv = memory2.Lock(0, 0);
197   EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
198 
199   // This should fail as memory is locked.
200   rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(2));
201   ASSERT_FALSE(rv);
202 
203   // Last usage should have been updated to timestamp passed to Purge above.
204   EXPECT_EQ(memory1.last_known_usage(), Time::FromSecondsSinceUnixEpoch(2));
205 
206   memory2.SetNow(Time::FromSecondsSinceUnixEpoch(3));
207   memory2.Unlock(0, 0);
208 
209   // Usage time should be correct for |memory2| instance.
210   EXPECT_EQ(memory2.last_known_usage(), Time::FromSecondsSinceUnixEpoch(3));
211 
212   // However, usage time has not changed as far as |memory1| instance knows.
213   EXPECT_EQ(memory1.last_known_usage(), Time::FromSecondsSinceUnixEpoch(2));
214 
215   // Memory is unlocked, but our usage timestamp is incorrect.
216   rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(4));
217   EXPECT_FALSE(rv);
218 
219   // The failed purge attempt should have updated usage time to the correct
220   // value.
221   EXPECT_EQ(memory1.last_known_usage(), Time::FromSecondsSinceUnixEpoch(3));
222 
223   // Purge memory through |memory2| instance. The last usage time should be
224   // set to 0 as a result of this.
225   rv = memory2.Purge(Time::FromSecondsSinceUnixEpoch(5));
226   EXPECT_TRUE(rv);
227   EXPECT_TRUE(memory2.last_known_usage().is_null());
228 
229   // This should fail as memory has already been purged and |memory1|'s usage
230   // time is incorrect as a result.
231   rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(6));
232   EXPECT_FALSE(rv);
233 
234   // The failed purge attempt should have updated usage time to the correct
235   // value.
236   EXPECT_TRUE(memory1.last_known_usage().is_null());
237 
238   // Purge should succeed now that usage time is correct.
239   rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(7));
240   EXPECT_TRUE(rv);
241 }
242 
TEST(DiscardableSharedMemoryTest,LockShouldAlwaysFailAfterSuccessfulPurge)243 TEST(DiscardableSharedMemoryTest, LockShouldAlwaysFailAfterSuccessfulPurge) {
244   const uint32_t kDataSize = 1024;
245 
246   TestDiscardableSharedMemory memory1;
247   bool rv = memory1.CreateAndMap(kDataSize);
248   ASSERT_TRUE(rv);
249 
250   UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
251   ASSERT_TRUE(shared_region.IsValid());
252 
253   TestDiscardableSharedMemory memory2(std::move(shared_region));
254   rv = memory2.Map(kDataSize);
255   ASSERT_TRUE(rv);
256 
257   memory2.SetNow(Time::FromSecondsSinceUnixEpoch(1));
258   memory2.Unlock(0, 0);
259 
260   rv = memory2.Purge(Time::FromSecondsSinceUnixEpoch(2));
261   EXPECT_TRUE(rv);
262 
263   // Lock should fail as memory has been purged.
264   DiscardableSharedMemory::LockResult lock_rv = memory2.Lock(0, 0);
265   EXPECT_EQ(DiscardableSharedMemory::FAILED, lock_rv);
266 }
267 
268 #if BUILDFLAG(IS_ANDROID)
TEST(DiscardableSharedMemoryTest,LockShouldFailIfPlatformLockPagesFails)269 TEST(DiscardableSharedMemoryTest, LockShouldFailIfPlatformLockPagesFails) {
270   const uint32_t kDataSize = 1024;
271 
272   // This test cannot succeed on devices without a proper ashmem device
273   // because Lock() will always succeed.
274   if (!DiscardableSharedMemory::IsAshmemDeviceSupportedForTesting())
275     return;
276 
277   DiscardableSharedMemory memory1;
278   bool rv1 = memory1.CreateAndMap(kDataSize);
279   ASSERT_TRUE(rv1);
280 
281   base::UnsafeSharedMemoryRegion region = memory1.DuplicateRegion();
282   int fd = region.GetPlatformHandle();
283   DiscardableSharedMemory memory2(std::move(region));
284   bool rv2 = memory2.Map(kDataSize);
285   ASSERT_TRUE(rv2);
286 
287   // Unlock() the first page of memory, so we can test Lock()ing it.
288   memory2.Unlock(0, base::GetPageSize());
289   // To cause ashmem_pin_region() to fail, we arrange for it to be called with
290   // an invalid file-descriptor, which requires a valid-looking fd (i.e. we
291   // can't just Close() |memory|), but one on which the operation is invalid.
292   // We can overwrite the |memory| fd with a handle to a different file using
293   // dup2(), which has the nice properties that |memory| still has a valid fd
294   // that it can close, etc without errors, but on which ashmem_pin_region()
295   // will fail.
296   base::ScopedFD null(open("/dev/null", O_RDONLY));
297   ASSERT_EQ(fd, dup2(null.get(), fd));
298 
299   // Now re-Lock()ing the first page should fail.
300   DiscardableSharedMemory::LockResult lock_rv =
301       memory2.Lock(0, base::GetPageSize());
302   EXPECT_EQ(DiscardableSharedMemory::FAILED, lock_rv);
303 }
304 #endif  // BUILDFLAG(IS_ANDROID)
305 
TEST(DiscardableSharedMemoryTest,LockAndUnlockRange)306 TEST(DiscardableSharedMemoryTest, LockAndUnlockRange) {
307   const size_t kDataSize = 32;
308 
309   size_t data_size_in_bytes = kDataSize * base::GetPageSize();
310 
311   TestDiscardableSharedMemory memory1;
312   bool rv = memory1.CreateAndMap(data_size_in_bytes);
313   ASSERT_TRUE(rv);
314 
315   UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
316   ASSERT_TRUE(shared_region.IsValid());
317 
318   TestDiscardableSharedMemory memory2(std::move(shared_region));
319   rv = memory2.Map(data_size_in_bytes);
320   ASSERT_TRUE(rv);
321 
322   // Unlock first page.
323   memory2.SetNow(Time::FromSecondsSinceUnixEpoch(1));
324   memory2.Unlock(0, base::GetPageSize());
325 
326   rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(2));
327   EXPECT_FALSE(rv);
328 
329   // Lock first page again.
330   memory2.SetNow(Time::FromSecondsSinceUnixEpoch(3));
331   DiscardableSharedMemory::LockResult lock_rv =
332       memory2.Lock(0, base::GetPageSize());
333   EXPECT_NE(DiscardableSharedMemory::FAILED, lock_rv);
334 
335   // Unlock first page.
336   memory2.SetNow(Time::FromSecondsSinceUnixEpoch(4));
337   memory2.Unlock(0, base::GetPageSize());
338 
339   rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(5));
340   EXPECT_FALSE(rv);
341 
342   // Unlock second page.
343   memory2.SetNow(Time::FromSecondsSinceUnixEpoch(6));
344   memory2.Unlock(base::GetPageSize(), base::GetPageSize());
345 
346   rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(7));
347   EXPECT_FALSE(rv);
348 
349   // Unlock anything onwards.
350   memory2.SetNow(Time::FromSecondsSinceUnixEpoch(8));
351   memory2.Unlock(2 * base::GetPageSize(), 0);
352 
353   // Memory is unlocked, but our usage timestamp is incorrect.
354   rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(9));
355   EXPECT_FALSE(rv);
356 
357   // The failed purge attempt should have updated usage time to the correct
358   // value.
359   EXPECT_EQ(Time::FromSecondsSinceUnixEpoch(8), memory1.last_known_usage());
360 
361   // Purge should now succeed.
362   rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(10));
363   EXPECT_TRUE(rv);
364 }
365 
TEST(DiscardableSharedMemoryTest,MappedSize)366 TEST(DiscardableSharedMemoryTest, MappedSize) {
367   const uint32_t kDataSize = 1024;
368 
369   TestDiscardableSharedMemory memory;
370   bool rv = memory.CreateAndMap(kDataSize);
371   ASSERT_TRUE(rv);
372 
373   EXPECT_LE(kDataSize, memory.mapped_size());
374 
375   // Mapped size should be 0 after memory segment has been unmapped.
376   rv = memory.Unmap();
377   EXPECT_TRUE(rv);
378   EXPECT_EQ(0u, memory.mapped_size());
379 }
380 
TEST(DiscardableSharedMemoryTest,Close)381 TEST(DiscardableSharedMemoryTest, Close) {
382   const uint32_t kDataSize = 1024;
383 
384   TestDiscardableSharedMemory memory;
385   bool rv = memory.CreateAndMap(kDataSize);
386   ASSERT_TRUE(rv);
387 
388   // Mapped size should be unchanged after memory segment has been closed.
389   memory.Close();
390   EXPECT_LE(kDataSize, memory.mapped_size());
391 
392   // Memory is initially locked. Unlock it.
393   memory.SetNow(Time::FromSecondsSinceUnixEpoch(1));
394   memory.Unlock(0, 0);
395 
396   // Lock and unlock memory.
397   DiscardableSharedMemory::LockResult lock_rv = memory.Lock(0, 0);
398   EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
399   memory.SetNow(Time::FromSecondsSinceUnixEpoch(2));
400   memory.Unlock(0, 0);
401 }
402 
TEST(DiscardableSharedMemoryTest,ZeroSize)403 TEST(DiscardableSharedMemoryTest, ZeroSize) {
404   TestDiscardableSharedMemory memory;
405   bool rv = memory.CreateAndMap(0);
406   ASSERT_TRUE(rv);
407 
408   EXPECT_LE(0u, memory.mapped_size());
409 
410   // Memory is initially locked. Unlock it.
411   memory.SetNow(Time::FromSecondsSinceUnixEpoch(1));
412   memory.Unlock(0, 0);
413 
414   // Lock and unlock memory.
415   DiscardableSharedMemory::LockResult lock_rv = memory.Lock(0, 0);
416   EXPECT_NE(DiscardableSharedMemory::FAILED, lock_rv);
417   memory.SetNow(Time::FromSecondsSinceUnixEpoch(2));
418   memory.Unlock(0, 0);
419 }
420 
421 // This test checks that zero-filled pages are returned after purging a segment
422 // when DISCARDABLE_SHARED_MEMORY_ZERO_FILL_ON_DEMAND_PAGES_AFTER_PURGE is
423 // defined and MADV_REMOVE is supported.
424 #if defined(DISCARDABLE_SHARED_MEMORY_ZERO_FILL_ON_DEMAND_PAGES_AFTER_PURGE)
TEST(DiscardableSharedMemoryTest,ZeroFilledPagesAfterPurge)425 TEST(DiscardableSharedMemoryTest, ZeroFilledPagesAfterPurge) {
426   const uint32_t kDataSize = 1024;
427 
428   TestDiscardableSharedMemory memory1;
429   bool rv = memory1.CreateAndMap(kDataSize);
430   ASSERT_TRUE(rv);
431 
432   UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
433   ASSERT_TRUE(shared_region.IsValid());
434 
435   TestDiscardableSharedMemory memory2(std::move(shared_region));
436   rv = memory2.Map(kDataSize);
437   ASSERT_TRUE(rv);
438 
439   // Initialize all memory to '0xaa'.
440   std::ranges::fill(memory2.memory(), 0xaa);
441 
442   // Unlock memory.
443   memory2.SetNow(Time::FromSecondsSinceUnixEpoch(1));
444   memory2.Unlock(0, 0);
445   EXPECT_FALSE(memory1.IsMemoryLocked());
446 
447   // Memory is unlocked, but our usage timestamp is incorrect.
448   rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(2));
449   EXPECT_FALSE(rv);
450   rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(3));
451   EXPECT_TRUE(rv);
452 
453   // Check that reading memory after it has been purged is returning
454   // zero-filled pages.
455   uint8_t expected_data[kDataSize] = {};
456   EXPECT_EQ(base::span(expected_data), memory2.memory());
457 }
458 #endif
459 
460 #if BUILDFLAG(ENABLE_BASE_TRACING)
TEST(DiscardableSharedMemoryTest,TracingOwnershipEdges)461 TEST(DiscardableSharedMemoryTest, TracingOwnershipEdges) {
462   const uint32_t kDataSize = 1024;
463   TestDiscardableSharedMemory memory1;
464   bool rv = memory1.CreateAndMap(kDataSize);
465   ASSERT_TRUE(rv);
466 
467   base::trace_event::MemoryDumpArgs args = {
468       base::trace_event::MemoryDumpLevelOfDetail::kDetailed};
469   trace_event::ProcessMemoryDump pmd(args);
470   trace_event::MemoryAllocatorDump* client_dump =
471       pmd.CreateAllocatorDump("discardable_manager/map1");
472   const bool is_owned = false;
473   memory1.CreateSharedMemoryOwnershipEdge(client_dump, &pmd, is_owned);
474   const auto* shm_dump = pmd.GetAllocatorDump(
475       SharedMemoryTracker::GetDumpNameForTracing(memory1.mapped_id()));
476   EXPECT_TRUE(shm_dump);
477   EXPECT_EQ(shm_dump->GetSizeInternal(), client_dump->GetSizeInternal());
478   const auto edges = pmd.allocator_dumps_edges();
479   EXPECT_EQ(2u, edges.size());
480   EXPECT_NE(edges.end(), edges.find(shm_dump->guid()));
481   EXPECT_NE(edges.end(), edges.find(client_dump->guid()));
482   // TODO(ssid): test for weak global dump once the
483   // CreateWeakSharedMemoryOwnershipEdge() is fixed, crbug.com/661257.
484 }
485 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
486 
487 }  // namespace base
488