1 // Copyright 2014 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <fcntl.h>
6 #include <stdint.h>
7
8 #include "base/files/scoped_file.h"
9 #include "base/memory/discardable_shared_memory.h"
10 #include "base/memory/page_size.h"
11 #include "base/memory/shared_memory_tracker.h"
12 #include "base/tracing_buildflags.h"
13 #include "build/build_config.h"
14 #include "testing/gtest/include/gtest/gtest.h"
15
16 #if BUILDFLAG(ENABLE_BASE_TRACING)
17 #include "base/trace_event/memory_allocator_dump.h" // no-presubmit-check
18 #include "base/trace_event/process_memory_dump.h" // no-presubmit-check
19 #endif // BUILDFLAG(ENABLE_BASE_TRACING)
20
21 namespace base {
22
23 class TestDiscardableSharedMemory : public DiscardableSharedMemory {
24 public:
25 TestDiscardableSharedMemory() = default;
26
TestDiscardableSharedMemory(UnsafeSharedMemoryRegion region)27 explicit TestDiscardableSharedMemory(UnsafeSharedMemoryRegion region)
28 : DiscardableSharedMemory(std::move(region)) {}
29
SetNow(Time now)30 void SetNow(Time now) { now_ = now; }
31
32 private:
33 // Overriden from DiscardableSharedMemory:
Now() const34 Time Now() const override { return now_; }
35
36 Time now_;
37 };
38
TEST(DiscardableSharedMemoryTest,CreateAndMap)39 TEST(DiscardableSharedMemoryTest, CreateAndMap) {
40 const uint32_t kDataSize = 1024;
41
42 TestDiscardableSharedMemory memory;
43 bool rv = memory.CreateAndMap(kDataSize);
44 ASSERT_TRUE(rv);
45 EXPECT_GE(memory.mapped_size(), kDataSize);
46 EXPECT_TRUE(memory.IsMemoryLocked());
47 }
48
TEST(DiscardableSharedMemoryTest,CreateFromHandle)49 TEST(DiscardableSharedMemoryTest, CreateFromHandle) {
50 const uint32_t kDataSize = 1024;
51
52 TestDiscardableSharedMemory memory1;
53 bool rv = memory1.CreateAndMap(kDataSize);
54 ASSERT_TRUE(rv);
55
56 UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
57 ASSERT_TRUE(shared_region.IsValid());
58
59 TestDiscardableSharedMemory memory2(std::move(shared_region));
60 rv = memory2.Map(kDataSize);
61 ASSERT_TRUE(rv);
62 EXPECT_TRUE(memory2.IsMemoryLocked());
63 }
64
TEST(DiscardableSharedMemoryTest,LockAndUnlock)65 TEST(DiscardableSharedMemoryTest, LockAndUnlock) {
66 const uint32_t kDataSize = 1024;
67
68 TestDiscardableSharedMemory memory1;
69 bool rv = memory1.CreateAndMap(kDataSize);
70 ASSERT_TRUE(rv);
71
72 // Memory is initially locked. Unlock it.
73 memory1.SetNow(Time::FromSecondsSinceUnixEpoch(1));
74 memory1.Unlock(0, 0);
75 EXPECT_FALSE(memory1.IsMemoryLocked());
76
77 // Lock and unlock memory.
78 DiscardableSharedMemory::LockResult lock_rv = memory1.Lock(0, 0);
79 EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
80 memory1.SetNow(Time::FromSecondsSinceUnixEpoch(2));
81 memory1.Unlock(0, 0);
82
83 // Lock again before duplicating and passing ownership to new instance.
84 lock_rv = memory1.Lock(0, 0);
85 EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
86 EXPECT_TRUE(memory1.IsMemoryLocked());
87
88 UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
89 ASSERT_TRUE(shared_region.IsValid());
90
91 TestDiscardableSharedMemory memory2(std::move(shared_region));
92 rv = memory2.Map(kDataSize);
93 ASSERT_TRUE(rv);
94
95 // Unlock second instance.
96 memory2.SetNow(Time::FromSecondsSinceUnixEpoch(3));
97 memory2.Unlock(0, 0);
98
99 // Both memory instances should be unlocked now.
100 EXPECT_FALSE(memory2.IsMemoryLocked());
101 EXPECT_FALSE(memory1.IsMemoryLocked());
102
103 // Lock second instance before passing ownership back to first instance.
104 lock_rv = memory2.Lock(0, 0);
105 EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
106
107 // Memory should still be resident and locked.
108 rv = memory1.IsMemoryResident();
109 EXPECT_TRUE(rv);
110 EXPECT_TRUE(memory1.IsMemoryLocked());
111
112 // Unlock first instance.
113 memory1.SetNow(Time::FromSecondsSinceUnixEpoch(4));
114 memory1.Unlock(0, 0);
115 }
116
TEST(DiscardableSharedMemoryTest,Purge)117 TEST(DiscardableSharedMemoryTest, Purge) {
118 const uint32_t kDataSize = 1024;
119
120 TestDiscardableSharedMemory memory1;
121 bool rv = memory1.CreateAndMap(kDataSize);
122 ASSERT_TRUE(rv);
123
124 UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
125 ASSERT_TRUE(shared_region.IsValid());
126
127 TestDiscardableSharedMemory memory2(std::move(shared_region));
128 rv = memory2.Map(kDataSize);
129 ASSERT_TRUE(rv);
130
131 // This should fail as memory is locked.
132 rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(1));
133 EXPECT_FALSE(rv);
134
135 memory2.SetNow(Time::FromSecondsSinceUnixEpoch(2));
136 memory2.Unlock(0, 0);
137
138 ASSERT_TRUE(memory2.IsMemoryResident());
139
140 // Memory is unlocked, but our usage timestamp is incorrect.
141 rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(3));
142 EXPECT_FALSE(rv);
143
144 ASSERT_TRUE(memory2.IsMemoryResident());
145
146 // Memory is unlocked and our usage timestamp should be correct.
147 rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(4));
148 EXPECT_TRUE(rv);
149
150 // Lock should fail as memory has been purged.
151 DiscardableSharedMemory::LockResult lock_rv = memory2.Lock(0, 0);
152 EXPECT_EQ(DiscardableSharedMemory::FAILED, lock_rv);
153
154 ASSERT_FALSE(memory2.IsMemoryResident());
155 }
156
TEST(DiscardableSharedMemoryTest,PurgeAfterClose)157 TEST(DiscardableSharedMemoryTest, PurgeAfterClose) {
158 const uint32_t kDataSize = 1024;
159
160 TestDiscardableSharedMemory memory;
161 bool rv = memory.CreateAndMap(kDataSize);
162 ASSERT_TRUE(rv);
163
164 // Unlock things so we can Purge().
165 memory.SetNow(Time::FromSecondsSinceUnixEpoch(2));
166 memory.Unlock(0, 0);
167
168 // It should be safe to Purge() |memory| after Close()ing the handle.
169 memory.Close();
170 rv = memory.Purge(Time::FromSecondsSinceUnixEpoch(4));
171 EXPECT_TRUE(rv);
172 }
173
TEST(DiscardableSharedMemoryTest,LastUsed)174 TEST(DiscardableSharedMemoryTest, LastUsed) {
175 const uint32_t kDataSize = 1024;
176
177 TestDiscardableSharedMemory memory1;
178 bool rv = memory1.CreateAndMap(kDataSize);
179 ASSERT_TRUE(rv);
180
181 UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
182 ASSERT_TRUE(shared_region.IsValid());
183
184 TestDiscardableSharedMemory memory2(std::move(shared_region));
185 rv = memory2.Map(kDataSize);
186 ASSERT_TRUE(rv);
187
188 memory2.SetNow(Time::FromSecondsSinceUnixEpoch(1));
189 memory2.Unlock(0, 0);
190
191 EXPECT_EQ(memory2.last_known_usage(), Time::FromSecondsSinceUnixEpoch(1));
192
193 DiscardableSharedMemory::LockResult lock_rv = memory2.Lock(0, 0);
194 EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
195
196 // This should fail as memory is locked.
197 rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(2));
198 ASSERT_FALSE(rv);
199
200 // Last usage should have been updated to timestamp passed to Purge above.
201 EXPECT_EQ(memory1.last_known_usage(), Time::FromSecondsSinceUnixEpoch(2));
202
203 memory2.SetNow(Time::FromSecondsSinceUnixEpoch(3));
204 memory2.Unlock(0, 0);
205
206 // Usage time should be correct for |memory2| instance.
207 EXPECT_EQ(memory2.last_known_usage(), Time::FromSecondsSinceUnixEpoch(3));
208
209 // However, usage time has not changed as far as |memory1| instance knows.
210 EXPECT_EQ(memory1.last_known_usage(), Time::FromSecondsSinceUnixEpoch(2));
211
212 // Memory is unlocked, but our usage timestamp is incorrect.
213 rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(4));
214 EXPECT_FALSE(rv);
215
216 // The failed purge attempt should have updated usage time to the correct
217 // value.
218 EXPECT_EQ(memory1.last_known_usage(), Time::FromSecondsSinceUnixEpoch(3));
219
220 // Purge memory through |memory2| instance. The last usage time should be
221 // set to 0 as a result of this.
222 rv = memory2.Purge(Time::FromSecondsSinceUnixEpoch(5));
223 EXPECT_TRUE(rv);
224 EXPECT_TRUE(memory2.last_known_usage().is_null());
225
226 // This should fail as memory has already been purged and |memory1|'s usage
227 // time is incorrect as a result.
228 rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(6));
229 EXPECT_FALSE(rv);
230
231 // The failed purge attempt should have updated usage time to the correct
232 // value.
233 EXPECT_TRUE(memory1.last_known_usage().is_null());
234
235 // Purge should succeed now that usage time is correct.
236 rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(7));
237 EXPECT_TRUE(rv);
238 }
239
TEST(DiscardableSharedMemoryTest,LockShouldAlwaysFailAfterSuccessfulPurge)240 TEST(DiscardableSharedMemoryTest, LockShouldAlwaysFailAfterSuccessfulPurge) {
241 const uint32_t kDataSize = 1024;
242
243 TestDiscardableSharedMemory memory1;
244 bool rv = memory1.CreateAndMap(kDataSize);
245 ASSERT_TRUE(rv);
246
247 UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
248 ASSERT_TRUE(shared_region.IsValid());
249
250 TestDiscardableSharedMemory memory2(std::move(shared_region));
251 rv = memory2.Map(kDataSize);
252 ASSERT_TRUE(rv);
253
254 memory2.SetNow(Time::FromSecondsSinceUnixEpoch(1));
255 memory2.Unlock(0, 0);
256
257 rv = memory2.Purge(Time::FromSecondsSinceUnixEpoch(2));
258 EXPECT_TRUE(rv);
259
260 // Lock should fail as memory has been purged.
261 DiscardableSharedMemory::LockResult lock_rv = memory2.Lock(0, 0);
262 EXPECT_EQ(DiscardableSharedMemory::FAILED, lock_rv);
263 }
264
265 #if BUILDFLAG(IS_ANDROID)
TEST(DiscardableSharedMemoryTest,LockShouldFailIfPlatformLockPagesFails)266 TEST(DiscardableSharedMemoryTest, LockShouldFailIfPlatformLockPagesFails) {
267 const uint32_t kDataSize = 1024;
268
269 // This test cannot succeed on devices without a proper ashmem device
270 // because Lock() will always succeed.
271 if (!DiscardableSharedMemory::IsAshmemDeviceSupportedForTesting())
272 return;
273
274 DiscardableSharedMemory memory1;
275 bool rv1 = memory1.CreateAndMap(kDataSize);
276 ASSERT_TRUE(rv1);
277
278 base::UnsafeSharedMemoryRegion region = memory1.DuplicateRegion();
279 int fd = region.GetPlatformHandle();
280 DiscardableSharedMemory memory2(std::move(region));
281 bool rv2 = memory2.Map(kDataSize);
282 ASSERT_TRUE(rv2);
283
284 // Unlock() the first page of memory, so we can test Lock()ing it.
285 memory2.Unlock(0, base::GetPageSize());
286 // To cause ashmem_pin_region() to fail, we arrange for it to be called with
287 // an invalid file-descriptor, which requires a valid-looking fd (i.e. we
288 // can't just Close() |memory|), but one on which the operation is invalid.
289 // We can overwrite the |memory| fd with a handle to a different file using
290 // dup2(), which has the nice properties that |memory| still has a valid fd
291 // that it can close, etc without errors, but on which ashmem_pin_region()
292 // will fail.
293 base::ScopedFD null(open("/dev/null", O_RDONLY));
294 ASSERT_EQ(fd, dup2(null.get(), fd));
295
296 // Now re-Lock()ing the first page should fail.
297 DiscardableSharedMemory::LockResult lock_rv =
298 memory2.Lock(0, base::GetPageSize());
299 EXPECT_EQ(DiscardableSharedMemory::FAILED, lock_rv);
300 }
301 #endif // BUILDFLAG(IS_ANDROID)
302
TEST(DiscardableSharedMemoryTest,LockAndUnlockRange)303 TEST(DiscardableSharedMemoryTest, LockAndUnlockRange) {
304 const size_t kDataSize = 32;
305
306 size_t data_size_in_bytes = kDataSize * base::GetPageSize();
307
308 TestDiscardableSharedMemory memory1;
309 bool rv = memory1.CreateAndMap(data_size_in_bytes);
310 ASSERT_TRUE(rv);
311
312 UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
313 ASSERT_TRUE(shared_region.IsValid());
314
315 TestDiscardableSharedMemory memory2(std::move(shared_region));
316 rv = memory2.Map(data_size_in_bytes);
317 ASSERT_TRUE(rv);
318
319 // Unlock first page.
320 memory2.SetNow(Time::FromSecondsSinceUnixEpoch(1));
321 memory2.Unlock(0, base::GetPageSize());
322
323 rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(2));
324 EXPECT_FALSE(rv);
325
326 // Lock first page again.
327 memory2.SetNow(Time::FromSecondsSinceUnixEpoch(3));
328 DiscardableSharedMemory::LockResult lock_rv =
329 memory2.Lock(0, base::GetPageSize());
330 EXPECT_NE(DiscardableSharedMemory::FAILED, lock_rv);
331
332 // Unlock first page.
333 memory2.SetNow(Time::FromSecondsSinceUnixEpoch(4));
334 memory2.Unlock(0, base::GetPageSize());
335
336 rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(5));
337 EXPECT_FALSE(rv);
338
339 // Unlock second page.
340 memory2.SetNow(Time::FromSecondsSinceUnixEpoch(6));
341 memory2.Unlock(base::GetPageSize(), base::GetPageSize());
342
343 rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(7));
344 EXPECT_FALSE(rv);
345
346 // Unlock anything onwards.
347 memory2.SetNow(Time::FromSecondsSinceUnixEpoch(8));
348 memory2.Unlock(2 * base::GetPageSize(), 0);
349
350 // Memory is unlocked, but our usage timestamp is incorrect.
351 rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(9));
352 EXPECT_FALSE(rv);
353
354 // The failed purge attempt should have updated usage time to the correct
355 // value.
356 EXPECT_EQ(Time::FromSecondsSinceUnixEpoch(8), memory1.last_known_usage());
357
358 // Purge should now succeed.
359 rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(10));
360 EXPECT_TRUE(rv);
361 }
362
TEST(DiscardableSharedMemoryTest,MappedSize)363 TEST(DiscardableSharedMemoryTest, MappedSize) {
364 const uint32_t kDataSize = 1024;
365
366 TestDiscardableSharedMemory memory;
367 bool rv = memory.CreateAndMap(kDataSize);
368 ASSERT_TRUE(rv);
369
370 EXPECT_LE(kDataSize, memory.mapped_size());
371
372 // Mapped size should be 0 after memory segment has been unmapped.
373 rv = memory.Unmap();
374 EXPECT_TRUE(rv);
375 EXPECT_EQ(0u, memory.mapped_size());
376 }
377
TEST(DiscardableSharedMemoryTest,Close)378 TEST(DiscardableSharedMemoryTest, Close) {
379 const uint32_t kDataSize = 1024;
380
381 TestDiscardableSharedMemory memory;
382 bool rv = memory.CreateAndMap(kDataSize);
383 ASSERT_TRUE(rv);
384
385 // Mapped size should be unchanged after memory segment has been closed.
386 memory.Close();
387 EXPECT_LE(kDataSize, memory.mapped_size());
388
389 // Memory is initially locked. Unlock it.
390 memory.SetNow(Time::FromSecondsSinceUnixEpoch(1));
391 memory.Unlock(0, 0);
392
393 // Lock and unlock memory.
394 DiscardableSharedMemory::LockResult lock_rv = memory.Lock(0, 0);
395 EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
396 memory.SetNow(Time::FromSecondsSinceUnixEpoch(2));
397 memory.Unlock(0, 0);
398 }
399
TEST(DiscardableSharedMemoryTest,ZeroSize)400 TEST(DiscardableSharedMemoryTest, ZeroSize) {
401 TestDiscardableSharedMemory memory;
402 bool rv = memory.CreateAndMap(0);
403 ASSERT_TRUE(rv);
404
405 EXPECT_LE(0u, memory.mapped_size());
406
407 // Memory is initially locked. Unlock it.
408 memory.SetNow(Time::FromSecondsSinceUnixEpoch(1));
409 memory.Unlock(0, 0);
410
411 // Lock and unlock memory.
412 DiscardableSharedMemory::LockResult lock_rv = memory.Lock(0, 0);
413 EXPECT_NE(DiscardableSharedMemory::FAILED, lock_rv);
414 memory.SetNow(Time::FromSecondsSinceUnixEpoch(2));
415 memory.Unlock(0, 0);
416 }
417
418 // This test checks that zero-filled pages are returned after purging a segment
419 // when DISCARDABLE_SHARED_MEMORY_ZERO_FILL_ON_DEMAND_PAGES_AFTER_PURGE is
420 // defined and MADV_REMOVE is supported.
421 #if defined(DISCARDABLE_SHARED_MEMORY_ZERO_FILL_ON_DEMAND_PAGES_AFTER_PURGE)
TEST(DiscardableSharedMemoryTest,ZeroFilledPagesAfterPurge)422 TEST(DiscardableSharedMemoryTest, ZeroFilledPagesAfterPurge) {
423 const uint32_t kDataSize = 1024;
424
425 TestDiscardableSharedMemory memory1;
426 bool rv = memory1.CreateAndMap(kDataSize);
427 ASSERT_TRUE(rv);
428
429 UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
430 ASSERT_TRUE(shared_region.IsValid());
431
432 TestDiscardableSharedMemory memory2(std::move(shared_region));
433 rv = memory2.Map(kDataSize);
434 ASSERT_TRUE(rv);
435
436 // Initialize all memory to '0xaa'.
437 memset(memory2.memory(), 0xaa, kDataSize);
438
439 // Unlock memory.
440 memory2.SetNow(Time::FromSecondsSinceUnixEpoch(1));
441 memory2.Unlock(0, 0);
442 EXPECT_FALSE(memory1.IsMemoryLocked());
443
444 // Memory is unlocked, but our usage timestamp is incorrect.
445 rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(2));
446 EXPECT_FALSE(rv);
447 rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(3));
448 EXPECT_TRUE(rv);
449
450 // Check that reading memory after it has been purged is returning
451 // zero-filled pages.
452 uint8_t expected_data[kDataSize] = {};
453 EXPECT_EQ(memcmp(memory2.memory(), expected_data, kDataSize), 0);
454 }
455 #endif
456
457 #if BUILDFLAG(ENABLE_BASE_TRACING)
TEST(DiscardableSharedMemoryTest,TracingOwnershipEdges)458 TEST(DiscardableSharedMemoryTest, TracingOwnershipEdges) {
459 const uint32_t kDataSize = 1024;
460 TestDiscardableSharedMemory memory1;
461 bool rv = memory1.CreateAndMap(kDataSize);
462 ASSERT_TRUE(rv);
463
464 base::trace_event::MemoryDumpArgs args = {
465 base::trace_event::MemoryDumpLevelOfDetail::kDetailed};
466 trace_event::ProcessMemoryDump pmd(args);
467 trace_event::MemoryAllocatorDump* client_dump =
468 pmd.CreateAllocatorDump("discardable_manager/map1");
469 const bool is_owned = false;
470 memory1.CreateSharedMemoryOwnershipEdge(client_dump, &pmd, is_owned);
471 const auto* shm_dump = pmd.GetAllocatorDump(
472 SharedMemoryTracker::GetDumpNameForTracing(memory1.mapped_id()));
473 EXPECT_TRUE(shm_dump);
474 EXPECT_EQ(shm_dump->GetSizeInternal(), client_dump->GetSizeInternal());
475 const auto edges = pmd.allocator_dumps_edges();
476 EXPECT_EQ(2u, edges.size());
477 EXPECT_NE(edges.end(), edges.find(shm_dump->guid()));
478 EXPECT_NE(edges.end(), edges.find(client_dump->guid()));
479 // TODO(ssid): test for weak global dump once the
480 // CreateWeakSharedMemoryOwnershipEdge() is fixed, crbug.com/661257.
481 }
482 #endif // BUILDFLAG(ENABLE_BASE_TRACING)
483
484 } // namespace base
485