1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/memory/shared_memory.h"
6
7 #include <stddef.h>
8 #include <stdint.h>
9
10 #include <memory>
11
12 #include "base/atomicops.h"
13 #include "base/base_switches.h"
14 #include "base/bind.h"
15 #include "base/command_line.h"
16 #include "base/logging.h"
17 #include "base/macros.h"
18 #include "base/memory/shared_memory_handle.h"
19 #include "base/process/kill.h"
20 #include "base/rand_util.h"
21 #include "base/strings/string_number_conversions.h"
22 #include "base/strings/string_piece.h"
23 #include "base/strings/string_util.h"
24 #include "base/sys_info.h"
25 #include "base/test/multiprocess_test.h"
26 #include "base/threading/platform_thread.h"
27 #include "base/time/time.h"
28 #include "base/unguessable_token.h"
29 #include "build/build_config.h"
30 #include "testing/gtest/include/gtest/gtest.h"
31 #include "testing/multiprocess_func_list.h"
32
33 #if defined(OS_ANDROID)
34 #include "base/callback.h"
35 #endif
36
37 #if defined(OS_POSIX)
38 #include <errno.h>
39 #include <fcntl.h>
40 #include <sys/mman.h>
41 #include <sys/stat.h>
42 #include <sys/types.h>
43 #include <unistd.h>
44 #endif
45
46 #if defined(OS_LINUX)
47 #include <sys/syscall.h>
48 #endif
49
50 #if defined(OS_WIN)
51 #include "base/win/scoped_handle.h"
52 #endif
53
54 #if defined(OS_FUCHSIA)
55 #include <lib/zx/vmar.h>
56 #include <lib/zx/vmo.h>
57 #endif
58
59 namespace base {
60
61 namespace {
62
63 #if !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
64 // Each thread will open the shared memory. Each thread will take a different 4
65 // byte int pointer, and keep changing it, with some small pauses in between.
66 // Verify that each thread's value in the shared memory is always correct.
67 class MultipleThreadMain : public PlatformThread::Delegate {
68 public:
MultipleThreadMain(int16_t id)69 explicit MultipleThreadMain(int16_t id) : id_(id) {}
70 ~MultipleThreadMain() override = default;
71
CleanUp()72 static void CleanUp() {
73 SharedMemory memory;
74 memory.Delete(s_test_name_);
75 }
76
77 // PlatformThread::Delegate interface.
ThreadMain()78 void ThreadMain() override {
79 const uint32_t kDataSize = 1024;
80 SharedMemory memory;
81 bool rv = memory.CreateNamedDeprecated(s_test_name_, true, kDataSize);
82 EXPECT_TRUE(rv);
83 rv = memory.Map(kDataSize);
84 EXPECT_TRUE(rv);
85 int* ptr = static_cast<int*>(memory.memory()) + id_;
86 EXPECT_EQ(0, *ptr);
87
88 for (int idx = 0; idx < 100; idx++) {
89 *ptr = idx;
90 PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
91 EXPECT_EQ(*ptr, idx);
92 }
93 // Reset back to 0 for the next test that uses the same name.
94 *ptr = 0;
95
96 memory.Close();
97 }
98
99 private:
100 int16_t id_;
101
102 static const char s_test_name_[];
103
104 DISALLOW_COPY_AND_ASSIGN(MultipleThreadMain);
105 };
106
107 const char MultipleThreadMain::s_test_name_[] =
108 "SharedMemoryOpenThreadTest";
109 #endif // !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
110
111 enum class Mode {
112 Default,
113 #if defined(OS_LINUX) && !defined(OS_CHROMEOS)
114 DisableDevShm = 1,
115 #endif
116 };
117
118 class SharedMemoryTest : public ::testing::TestWithParam<Mode> {
119 public:
SetUp()120 void SetUp() override {
121 switch (GetParam()) {
122 case Mode::Default:
123 break;
124 #if defined(OS_LINUX) && !defined(OS_CHROMEOS)
125 case Mode::DisableDevShm:
126 CommandLine* cmdline = CommandLine::ForCurrentProcess();
127 cmdline->AppendSwitch(switches::kDisableDevShmUsage);
128 break;
129 #endif // defined(OS_LINUX) && !defined(OS_CHROMEOS)
130 }
131 }
132 };
133
134 } // namespace
135
136 // Android/Mac/Fuchsia doesn't support SharedMemory::Open/Delete/
137 // CreateNamedDeprecated(openExisting=true)
138 #if !defined(OS_ANDROID) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
139
TEST_P(SharedMemoryTest,OpenClose)140 TEST_P(SharedMemoryTest, OpenClose) {
141 const uint32_t kDataSize = 1024;
142 std::string test_name = "SharedMemoryOpenCloseTest";
143
144 // Open two handles to a memory segment, confirm that they are mapped
145 // separately yet point to the same space.
146 SharedMemory memory1;
147 bool rv = memory1.Delete(test_name);
148 EXPECT_TRUE(rv);
149 rv = memory1.Delete(test_name);
150 EXPECT_TRUE(rv);
151 rv = memory1.Open(test_name, false);
152 EXPECT_FALSE(rv);
153 rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
154 EXPECT_TRUE(rv);
155 rv = memory1.Map(kDataSize);
156 EXPECT_TRUE(rv);
157 SharedMemory memory2;
158 rv = memory2.Open(test_name, false);
159 EXPECT_TRUE(rv);
160 rv = memory2.Map(kDataSize);
161 EXPECT_TRUE(rv);
162 EXPECT_NE(memory1.memory(), memory2.memory()); // Compare the pointers.
163
164 // Make sure we don't segfault. (it actually happened!)
165 ASSERT_NE(memory1.memory(), static_cast<void*>(nullptr));
166 ASSERT_NE(memory2.memory(), static_cast<void*>(nullptr));
167
168 // Write data to the first memory segment, verify contents of second.
169 memset(memory1.memory(), '1', kDataSize);
170 EXPECT_EQ(memcmp(memory1.memory(), memory2.memory(), kDataSize), 0);
171
172 // Close the first memory segment, and verify the second has the right data.
173 memory1.Close();
174 char* start_ptr = static_cast<char*>(memory2.memory());
175 char* end_ptr = start_ptr + kDataSize;
176 for (char* ptr = start_ptr; ptr < end_ptr; ptr++)
177 EXPECT_EQ(*ptr, '1');
178
179 // Close the second memory segment.
180 memory2.Close();
181
182 rv = memory1.Delete(test_name);
183 EXPECT_TRUE(rv);
184 rv = memory2.Delete(test_name);
185 EXPECT_TRUE(rv);
186 }
187
TEST_P(SharedMemoryTest,OpenExclusive)188 TEST_P(SharedMemoryTest, OpenExclusive) {
189 const uint32_t kDataSize = 1024;
190 const uint32_t kDataSize2 = 2048;
191 std::ostringstream test_name_stream;
192 test_name_stream << "SharedMemoryOpenExclusiveTest."
193 << Time::Now().ToDoubleT();
194 std::string test_name = test_name_stream.str();
195
196 // Open two handles to a memory segment and check that
197 // open_existing_deprecated works as expected.
198 SharedMemory memory1;
199 bool rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
200 EXPECT_TRUE(rv);
201
202 // Memory1 knows it's size because it created it.
203 EXPECT_EQ(memory1.requested_size(), kDataSize);
204
205 rv = memory1.Map(kDataSize);
206 EXPECT_TRUE(rv);
207
208 // The mapped memory1 must be at least the size we asked for.
209 EXPECT_GE(memory1.mapped_size(), kDataSize);
210
211 // The mapped memory1 shouldn't exceed rounding for allocation granularity.
212 EXPECT_LT(memory1.mapped_size(),
213 kDataSize + SysInfo::VMAllocationGranularity());
214
215 memset(memory1.memory(), 'G', kDataSize);
216
217 SharedMemory memory2;
218 // Should not be able to create if openExisting is false.
219 rv = memory2.CreateNamedDeprecated(test_name, false, kDataSize2);
220 EXPECT_FALSE(rv);
221
222 // Should be able to create with openExisting true.
223 rv = memory2.CreateNamedDeprecated(test_name, true, kDataSize2);
224 EXPECT_TRUE(rv);
225
226 // Memory2 shouldn't know the size because we didn't create it.
227 EXPECT_EQ(memory2.requested_size(), 0U);
228
229 // We should be able to map the original size.
230 rv = memory2.Map(kDataSize);
231 EXPECT_TRUE(rv);
232
233 // The mapped memory2 must be at least the size of the original.
234 EXPECT_GE(memory2.mapped_size(), kDataSize);
235
236 // The mapped memory2 shouldn't exceed rounding for allocation granularity.
237 EXPECT_LT(memory2.mapped_size(),
238 kDataSize2 + SysInfo::VMAllocationGranularity());
239
240 // Verify that opening memory2 didn't truncate or delete memory 1.
241 char* start_ptr = static_cast<char*>(memory2.memory());
242 char* end_ptr = start_ptr + kDataSize;
243 for (char* ptr = start_ptr; ptr < end_ptr; ptr++) {
244 EXPECT_EQ(*ptr, 'G');
245 }
246
247 memory1.Close();
248 memory2.Close();
249
250 rv = memory1.Delete(test_name);
251 EXPECT_TRUE(rv);
252 }
253 #endif // !defined(OS_ANDROID) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
254
255 // Check that memory is still mapped after its closed.
TEST_P(SharedMemoryTest,CloseNoUnmap)256 TEST_P(SharedMemoryTest, CloseNoUnmap) {
257 const size_t kDataSize = 4096;
258
259 SharedMemory memory;
260 ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
261 char* ptr = static_cast<char*>(memory.memory());
262 ASSERT_NE(ptr, static_cast<void*>(nullptr));
263 memset(ptr, 'G', kDataSize);
264
265 memory.Close();
266
267 EXPECT_EQ(ptr, memory.memory());
268 EXPECT_TRUE(!memory.handle().IsValid());
269
270 for (size_t i = 0; i < kDataSize; i++) {
271 EXPECT_EQ('G', ptr[i]);
272 }
273
274 memory.Unmap();
275 EXPECT_EQ(nullptr, memory.memory());
276 }
277
278 #if !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
279 // Create a set of N threads to each open a shared memory segment and write to
280 // it. Verify that they are always reading/writing consistent data.
TEST_P(SharedMemoryTest,MultipleThreads)281 TEST_P(SharedMemoryTest, MultipleThreads) {
282 const int kNumThreads = 5;
283
284 MultipleThreadMain::CleanUp();
285 // On POSIX we have a problem when 2 threads try to create the shmem
286 // (a file) at exactly the same time, since create both creates the
287 // file and zerofills it. We solve the problem for this unit test
288 // (make it not flaky) by starting with 1 thread, then
289 // intentionally don't clean up its shmem before running with
290 // kNumThreads.
291
292 int threadcounts[] = { 1, kNumThreads };
293 for (size_t i = 0; i < arraysize(threadcounts); i++) {
294 int numthreads = threadcounts[i];
295 std::unique_ptr<PlatformThreadHandle[]> thread_handles;
296 std::unique_ptr<MultipleThreadMain* []> thread_delegates;
297
298 thread_handles.reset(new PlatformThreadHandle[numthreads]);
299 thread_delegates.reset(new MultipleThreadMain*[numthreads]);
300
301 // Spawn the threads.
302 for (int16_t index = 0; index < numthreads; index++) {
303 PlatformThreadHandle pth;
304 thread_delegates[index] = new MultipleThreadMain(index);
305 EXPECT_TRUE(PlatformThread::Create(0, thread_delegates[index], &pth));
306 thread_handles[index] = pth;
307 }
308
309 // Wait for the threads to finish.
310 for (int index = 0; index < numthreads; index++) {
311 PlatformThread::Join(thread_handles[index]);
312 delete thread_delegates[index];
313 }
314 }
315 MultipleThreadMain::CleanUp();
316 }
317 #endif
318
319 // Allocate private (unique) shared memory with an empty string for a
320 // name. Make sure several of them don't point to the same thing as
321 // we might expect if the names are equal.
TEST_P(SharedMemoryTest,AnonymousPrivate)322 TEST_P(SharedMemoryTest, AnonymousPrivate) {
323 int i, j;
324 int count = 4;
325 bool rv;
326 const uint32_t kDataSize = 8192;
327
328 std::unique_ptr<SharedMemory[]> memories(new SharedMemory[count]);
329 std::unique_ptr<int* []> pointers(new int*[count]);
330 ASSERT_TRUE(memories.get());
331 ASSERT_TRUE(pointers.get());
332
333 for (i = 0; i < count; i++) {
334 rv = memories[i].CreateAndMapAnonymous(kDataSize);
335 EXPECT_TRUE(rv);
336 int* ptr = static_cast<int*>(memories[i].memory());
337 EXPECT_TRUE(ptr);
338 pointers[i] = ptr;
339 }
340
341 for (i = 0; i < count; i++) {
342 // zero out the first int in each except for i; for that one, make it 100.
343 for (j = 0; j < count; j++) {
344 if (i == j)
345 pointers[j][0] = 100;
346 else
347 pointers[j][0] = 0;
348 }
349 // make sure there is no bleeding of the 100 into the other pointers
350 for (j = 0; j < count; j++) {
351 if (i == j)
352 EXPECT_EQ(100, pointers[j][0]);
353 else
354 EXPECT_EQ(0, pointers[j][0]);
355 }
356 }
357
358 for (int i = 0; i < count; i++) {
359 memories[i].Close();
360 }
361 }
362
TEST_P(SharedMemoryTest,GetReadOnlyHandle)363 TEST_P(SharedMemoryTest, GetReadOnlyHandle) {
364 StringPiece contents = "Hello World";
365
366 SharedMemory writable_shmem;
367 SharedMemoryCreateOptions options;
368 options.size = contents.size();
369 options.share_read_only = true;
370 #if defined(OS_MACOSX) && !defined(OS_IOS)
371 // The Mach functionality is tested in shared_memory_mac_unittest.cc.
372 options.type = SharedMemoryHandle::POSIX;
373 #endif
374 ASSERT_TRUE(writable_shmem.Create(options));
375 ASSERT_TRUE(writable_shmem.Map(options.size));
376 memcpy(writable_shmem.memory(), contents.data(), contents.size());
377 EXPECT_TRUE(writable_shmem.Unmap());
378
379 SharedMemoryHandle readonly_handle = writable_shmem.GetReadOnlyHandle();
380 EXPECT_EQ(writable_shmem.handle().GetGUID(), readonly_handle.GetGUID());
381 EXPECT_EQ(writable_shmem.handle().GetSize(), readonly_handle.GetSize());
382 ASSERT_TRUE(readonly_handle.IsValid());
383 SharedMemory readonly_shmem(readonly_handle, /*readonly=*/true);
384
385 ASSERT_TRUE(readonly_shmem.Map(contents.size()));
386 EXPECT_EQ(contents,
387 StringPiece(static_cast<const char*>(readonly_shmem.memory()),
388 contents.size()));
389 EXPECT_TRUE(readonly_shmem.Unmap());
390
391 #if defined(OS_ANDROID)
392 // On Android, mapping a region through a read-only descriptor makes the
393 // region read-only. Any writable mapping attempt should fail.
394 ASSERT_FALSE(writable_shmem.Map(contents.size()));
395 #else
396 // Make sure the writable instance is still writable.
397 ASSERT_TRUE(writable_shmem.Map(contents.size()));
398 StringPiece new_contents = "Goodbye";
399 memcpy(writable_shmem.memory(), new_contents.data(), new_contents.size());
400 EXPECT_EQ(new_contents,
401 StringPiece(static_cast<const char*>(writable_shmem.memory()),
402 new_contents.size()));
403 #endif
404
405 // We'd like to check that if we send the read-only segment to another
406 // process, then that other process can't reopen it read/write. (Since that
407 // would be a security hole.) Setting up multiple processes is hard in a
408 // unittest, so this test checks that the *current* process can't reopen the
409 // segment read/write. I think the test here is stronger than we actually
410 // care about, but there's a remote possibility that sending a file over a
411 // pipe would transform it into read/write.
412 SharedMemoryHandle handle = readonly_shmem.handle();
413
414 #if defined(OS_ANDROID)
415 // The "read-only" handle is still writable on Android:
416 // http://crbug.com/320865
417 (void)handle;
418 #elif defined(OS_FUCHSIA)
419 uintptr_t addr;
420 EXPECT_NE(ZX_OK, zx::vmar::root_self()->map(
421 0, *zx::unowned_vmo(handle.GetHandle()), 0,
422 contents.size(), ZX_VM_FLAG_PERM_WRITE, &addr))
423 << "Shouldn't be able to map as writable.";
424
425 zx::vmo duped_handle;
426 EXPECT_NE(ZX_OK, zx::unowned_vmo(handle.GetHandle())
427 ->duplicate(ZX_RIGHT_WRITE, &duped_handle))
428 << "Shouldn't be able to duplicate the handle into a writable one.";
429
430 EXPECT_EQ(ZX_OK, zx::unowned_vmo(handle.GetHandle())
431 ->duplicate(ZX_RIGHT_READ, &duped_handle))
432 << "Should be able to duplicate the handle into a readable one.";
433 #elif defined(OS_POSIX)
434 int handle_fd = SharedMemory::GetFdFromSharedMemoryHandle(handle);
435 EXPECT_EQ(O_RDONLY, fcntl(handle_fd, F_GETFL) & O_ACCMODE)
436 << "The descriptor itself should be read-only.";
437
438 errno = 0;
439 void* writable = mmap(nullptr, contents.size(), PROT_READ | PROT_WRITE,
440 MAP_SHARED, handle_fd, 0);
441 int mmap_errno = errno;
442 EXPECT_EQ(MAP_FAILED, writable)
443 << "It shouldn't be possible to re-mmap the descriptor writable.";
444 EXPECT_EQ(EACCES, mmap_errno) << strerror(mmap_errno);
445 if (writable != MAP_FAILED)
446 EXPECT_EQ(0, munmap(writable, readonly_shmem.mapped_size()));
447
448 #elif defined(OS_WIN)
449 EXPECT_EQ(NULL, MapViewOfFile(handle.GetHandle(), FILE_MAP_WRITE, 0, 0, 0))
450 << "Shouldn't be able to map memory writable.";
451
452 HANDLE temp_handle;
453 BOOL rv = ::DuplicateHandle(GetCurrentProcess(), handle.GetHandle(),
454 GetCurrentProcess(), &temp_handle,
455 FILE_MAP_ALL_ACCESS, false, 0);
456 EXPECT_EQ(FALSE, rv)
457 << "Shouldn't be able to duplicate the handle into a writable one.";
458 if (rv)
459 win::ScopedHandle writable_handle(temp_handle);
460 rv = ::DuplicateHandle(GetCurrentProcess(), handle.GetHandle(),
461 GetCurrentProcess(), &temp_handle, FILE_MAP_READ,
462 false, 0);
463 EXPECT_EQ(TRUE, rv)
464 << "Should be able to duplicate the handle into a readable one.";
465 if (rv)
466 win::ScopedHandle writable_handle(temp_handle);
467 #else
468 #error Unexpected platform; write a test that tries to make 'handle' writable.
469 #endif // defined(OS_POSIX) || defined(OS_WIN)
470 }
471
TEST_P(SharedMemoryTest,ShareToSelf)472 TEST_P(SharedMemoryTest, ShareToSelf) {
473 StringPiece contents = "Hello World";
474
475 SharedMemory shmem;
476 ASSERT_TRUE(shmem.CreateAndMapAnonymous(contents.size()));
477 memcpy(shmem.memory(), contents.data(), contents.size());
478 EXPECT_TRUE(shmem.Unmap());
479
480 SharedMemoryHandle shared_handle = shmem.handle().Duplicate();
481 ASSERT_TRUE(shared_handle.IsValid());
482 EXPECT_TRUE(shared_handle.OwnershipPassesToIPC());
483 EXPECT_EQ(shared_handle.GetGUID(), shmem.handle().GetGUID());
484 EXPECT_EQ(shared_handle.GetSize(), shmem.handle().GetSize());
485 SharedMemory shared(shared_handle, /*readonly=*/false);
486
487 ASSERT_TRUE(shared.Map(contents.size()));
488 EXPECT_EQ(
489 contents,
490 StringPiece(static_cast<const char*>(shared.memory()), contents.size()));
491
492 shared_handle = shmem.handle().Duplicate();
493 ASSERT_TRUE(shared_handle.IsValid());
494 ASSERT_TRUE(shared_handle.OwnershipPassesToIPC());
495 SharedMemory readonly(shared_handle, /*readonly=*/true);
496
497 ASSERT_TRUE(readonly.Map(contents.size()));
498 EXPECT_EQ(contents,
499 StringPiece(static_cast<const char*>(readonly.memory()),
500 contents.size()));
501 }
502
TEST_P(SharedMemoryTest,ShareWithMultipleInstances)503 TEST_P(SharedMemoryTest, ShareWithMultipleInstances) {
504 static const StringPiece kContents = "Hello World";
505
506 SharedMemory shmem;
507 ASSERT_TRUE(shmem.CreateAndMapAnonymous(kContents.size()));
508 // We do not need to unmap |shmem| to let |shared| map.
509 const StringPiece shmem_contents(static_cast<const char*>(shmem.memory()),
510 shmem.requested_size());
511
512 SharedMemoryHandle shared_handle = shmem.handle().Duplicate();
513 ASSERT_TRUE(shared_handle.IsValid());
514 SharedMemory shared(shared_handle, /*readonly=*/false);
515 ASSERT_TRUE(shared.Map(kContents.size()));
516 // The underlying shared memory is created by |shmem|, so both
517 // |shared|.requested_size() and |readonly|.requested_size() are zero.
518 ASSERT_EQ(0U, shared.requested_size());
519 const StringPiece shared_contents(static_cast<const char*>(shared.memory()),
520 shmem.requested_size());
521
522 shared_handle = shmem.handle().Duplicate();
523 ASSERT_TRUE(shared_handle.IsValid());
524 ASSERT_TRUE(shared_handle.OwnershipPassesToIPC());
525 SharedMemory readonly(shared_handle, /*readonly=*/true);
526 ASSERT_TRUE(readonly.Map(kContents.size()));
527 ASSERT_EQ(0U, readonly.requested_size());
528 const StringPiece readonly_contents(
529 static_cast<const char*>(readonly.memory()),
530 shmem.requested_size());
531
532 // |shmem| should be able to update the content.
533 memcpy(shmem.memory(), kContents.data(), kContents.size());
534
535 ASSERT_EQ(kContents, shmem_contents);
536 ASSERT_EQ(kContents, shared_contents);
537 ASSERT_EQ(kContents, readonly_contents);
538
539 // |shared| should also be able to update the content.
540 memcpy(shared.memory(), ToLowerASCII(kContents).c_str(), kContents.size());
541
542 ASSERT_EQ(StringPiece(ToLowerASCII(kContents)), shmem_contents);
543 ASSERT_EQ(StringPiece(ToLowerASCII(kContents)), shared_contents);
544 ASSERT_EQ(StringPiece(ToLowerASCII(kContents)), readonly_contents);
545 }
546
TEST_P(SharedMemoryTest,MapAt)547 TEST_P(SharedMemoryTest, MapAt) {
548 ASSERT_TRUE(SysInfo::VMAllocationGranularity() >= sizeof(uint32_t));
549 const size_t kCount = SysInfo::VMAllocationGranularity();
550 const size_t kDataSize = kCount * sizeof(uint32_t);
551
552 SharedMemory memory;
553 ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
554 uint32_t* ptr = static_cast<uint32_t*>(memory.memory());
555 ASSERT_NE(ptr, static_cast<void*>(nullptr));
556
557 for (size_t i = 0; i < kCount; ++i) {
558 ptr[i] = i;
559 }
560
561 memory.Unmap();
562
563 off_t offset = SysInfo::VMAllocationGranularity();
564 ASSERT_TRUE(memory.MapAt(offset, kDataSize - offset));
565 offset /= sizeof(uint32_t);
566 ptr = static_cast<uint32_t*>(memory.memory());
567 ASSERT_NE(ptr, static_cast<void*>(nullptr));
568 for (size_t i = offset; i < kCount; ++i) {
569 EXPECT_EQ(ptr[i - offset], i);
570 }
571 }
572
TEST_P(SharedMemoryTest,MapTwice)573 TEST_P(SharedMemoryTest, MapTwice) {
574 const uint32_t kDataSize = 1024;
575 SharedMemory memory;
576 bool rv = memory.CreateAndMapAnonymous(kDataSize);
577 EXPECT_TRUE(rv);
578
579 void* old_address = memory.memory();
580
581 rv = memory.Map(kDataSize);
582 EXPECT_FALSE(rv);
583 EXPECT_EQ(old_address, memory.memory());
584 }
585
586 #if defined(OS_POSIX)
587 // This test is not applicable for iOS (crbug.com/399384).
588 #if !defined(OS_IOS)
589 // Create a shared memory object, mmap it, and mprotect it to PROT_EXEC.
TEST_P(SharedMemoryTest,AnonymousExecutable)590 TEST_P(SharedMemoryTest, AnonymousExecutable) {
591 #if defined(OS_LINUX)
592 // On Chromecast both /dev/shm and /tmp are mounted with 'noexec' option,
593 // which makes this test fail. But Chromecast doesn't use NaCL so we don't
594 // need this.
595 if (!IsPathExecutable(FilePath("/dev/shm")) &&
596 !IsPathExecutable(FilePath("/tmp"))) {
597 return;
598 }
599 #endif // OS_LINUX
600 const uint32_t kTestSize = 1 << 16;
601
602 SharedMemory shared_memory;
603 SharedMemoryCreateOptions options;
604 options.size = kTestSize;
605 options.executable = true;
606 #if defined(OS_MACOSX) && !defined(OS_IOS)
607 // The Mach functionality is tested in shared_memory_mac_unittest.cc.
608 options.type = SharedMemoryHandle::POSIX;
609 #endif
610
611 EXPECT_TRUE(shared_memory.Create(options));
612 EXPECT_TRUE(shared_memory.Map(shared_memory.requested_size()));
613
614 EXPECT_EQ(0, mprotect(shared_memory.memory(), shared_memory.requested_size(),
615 PROT_READ | PROT_EXEC));
616 }
617 #endif // !defined(OS_IOS)
618
619 #if defined(OS_ANDROID)
620 // This test is restricted to Android since there is no way on other platforms
621 // to guarantee that a region can never be mapped with PROT_EXEC. E.g. on
622 // Linux, anonymous shared regions come from /dev/shm which can be mounted
623 // without 'noexec'. In this case, anything can perform an mprotect() to
624 // change the protection mask of a given page.
TEST(SharedMemoryTest,AnonymousIsNotExecutableByDefault)625 TEST(SharedMemoryTest, AnonymousIsNotExecutableByDefault) {
626 const uint32_t kTestSize = 1 << 16;
627
628 SharedMemory shared_memory;
629 SharedMemoryCreateOptions options;
630 options.size = kTestSize;
631
632 EXPECT_TRUE(shared_memory.Create(options));
633 EXPECT_TRUE(shared_memory.Map(shared_memory.requested_size()));
634
635 errno = 0;
636 EXPECT_EQ(-1, mprotect(shared_memory.memory(), shared_memory.requested_size(),
637 PROT_READ | PROT_EXEC));
638 EXPECT_EQ(EACCES, errno);
639 }
640 #endif // OS_ANDROID
641
642 // Android supports a different permission model than POSIX for its "ashmem"
643 // shared memory implementation. So the tests about file permissions are not
644 // included on Android. Fuchsia does not use a file-backed shared memory
645 // implementation.
646
647 #if !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
648
649 // Set a umask and restore the old mask on destruction.
650 class ScopedUmaskSetter {
651 public:
ScopedUmaskSetter(mode_t target_mask)652 explicit ScopedUmaskSetter(mode_t target_mask) {
653 old_umask_ = umask(target_mask);
654 }
~ScopedUmaskSetter()655 ~ScopedUmaskSetter() { umask(old_umask_); }
656 private:
657 mode_t old_umask_;
658 DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedUmaskSetter);
659 };
660
661 // Create a shared memory object, check its permissions.
TEST_P(SharedMemoryTest,FilePermissionsAnonymous)662 TEST_P(SharedMemoryTest, FilePermissionsAnonymous) {
663 const uint32_t kTestSize = 1 << 8;
664
665 SharedMemory shared_memory;
666 SharedMemoryCreateOptions options;
667 options.size = kTestSize;
668 #if defined(OS_MACOSX) && !defined(OS_IOS)
669 // The Mach functionality is tested in shared_memory_mac_unittest.cc.
670 options.type = SharedMemoryHandle::POSIX;
671 #endif
672 // Set a file mode creation mask that gives all permissions.
673 ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
674
675 EXPECT_TRUE(shared_memory.Create(options));
676
677 int shm_fd =
678 SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
679 struct stat shm_stat;
680 EXPECT_EQ(0, fstat(shm_fd, &shm_stat));
681 // Neither the group, nor others should be able to read the shared memory
682 // file.
683 EXPECT_FALSE(shm_stat.st_mode & S_IRWXO);
684 EXPECT_FALSE(shm_stat.st_mode & S_IRWXG);
685 }
686
687 // Create a shared memory object, check its permissions.
TEST_P(SharedMemoryTest,FilePermissionsNamed)688 TEST_P(SharedMemoryTest, FilePermissionsNamed) {
689 const uint32_t kTestSize = 1 << 8;
690
691 SharedMemory shared_memory;
692 SharedMemoryCreateOptions options;
693 options.size = kTestSize;
694 #if defined(OS_MACOSX) && !defined(OS_IOS)
695 // The Mach functionality is tested in shared_memory_mac_unittest.cc.
696 options.type = SharedMemoryHandle::POSIX;
697 #endif
698
699 // Set a file mode creation mask that gives all permissions.
700 ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
701
702 EXPECT_TRUE(shared_memory.Create(options));
703
704 int fd = SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
705 struct stat shm_stat;
706 EXPECT_EQ(0, fstat(fd, &shm_stat));
707 // Neither the group, nor others should have been able to open the shared
708 // memory file while its name existed.
709 EXPECT_FALSE(shm_stat.st_mode & S_IRWXO);
710 EXPECT_FALSE(shm_stat.st_mode & S_IRWXG);
711 }
712 #endif // !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
713
714 #endif // defined(OS_POSIX)
715
716 // Map() will return addresses which are aligned to the platform page size, this
717 // varies from platform to platform though. Since we'd like to advertise a
718 // minimum alignment that callers can count on, test for it here.
TEST_P(SharedMemoryTest,MapMinimumAlignment)719 TEST_P(SharedMemoryTest, MapMinimumAlignment) {
720 static const int kDataSize = 8192;
721
722 SharedMemory shared_memory;
723 ASSERT_TRUE(shared_memory.CreateAndMapAnonymous(kDataSize));
724 EXPECT_EQ(0U, reinterpret_cast<uintptr_t>(
725 shared_memory.memory()) & (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
726 shared_memory.Close();
727 }
728
729 #if defined(OS_WIN)
TEST_P(SharedMemoryTest,UnsafeImageSection)730 TEST_P(SharedMemoryTest, UnsafeImageSection) {
731 const char kTestSectionName[] = "UnsafeImageSection";
732 wchar_t path[MAX_PATH];
733 EXPECT_GT(::GetModuleFileName(nullptr, path, arraysize(path)), 0U);
734
735 // Map the current executable image to save us creating a new PE file on disk.
736 base::win::ScopedHandle file_handle(::CreateFile(
737 path, GENERIC_READ, FILE_SHARE_READ, nullptr, OPEN_EXISTING, 0, nullptr));
738 EXPECT_TRUE(file_handle.IsValid());
739 base::win::ScopedHandle section_handle(
740 ::CreateFileMappingA(file_handle.Get(), nullptr,
741 PAGE_READONLY | SEC_IMAGE, 0, 0, kTestSectionName));
742 EXPECT_TRUE(section_handle.IsValid());
743
744 // Check direct opening by name, from handle and duplicated from handle.
745 SharedMemory shared_memory_open;
746 EXPECT_TRUE(shared_memory_open.Open(kTestSectionName, true));
747 EXPECT_FALSE(shared_memory_open.Map(1));
748 EXPECT_EQ(nullptr, shared_memory_open.memory());
749
750 SharedMemory shared_memory_handle_local(
751 SharedMemoryHandle(section_handle.Take(), 1, UnguessableToken::Create()),
752 true);
753 EXPECT_FALSE(shared_memory_handle_local.Map(1));
754 EXPECT_EQ(nullptr, shared_memory_handle_local.memory());
755
756 // Check that a handle without SECTION_QUERY also can't be mapped as it can't
757 // be checked.
758 SharedMemory shared_memory_handle_dummy;
759 SharedMemoryCreateOptions options;
760 options.size = 0x1000;
761 EXPECT_TRUE(shared_memory_handle_dummy.Create(options));
762 HANDLE handle_no_query;
763 EXPECT_TRUE(::DuplicateHandle(
764 ::GetCurrentProcess(), shared_memory_handle_dummy.handle().GetHandle(),
765 ::GetCurrentProcess(), &handle_no_query, FILE_MAP_READ, FALSE, 0));
766 SharedMemory shared_memory_handle_no_query(
767 SharedMemoryHandle(handle_no_query, options.size,
768 UnguessableToken::Create()),
769 true);
770 EXPECT_FALSE(shared_memory_handle_no_query.Map(1));
771 EXPECT_EQ(nullptr, shared_memory_handle_no_query.memory());
772 }
773 #endif // defined(OS_WIN)
774
775 // iOS does not allow multiple processes.
776 // Android ashmem does not support named shared memory.
777 // Fuchsia SharedMemory does not support named shared memory.
778 // Mac SharedMemory does not support named shared memory. crbug.com/345734
779 #if !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX) && \
780 !defined(OS_FUCHSIA)
781 // On POSIX it is especially important we test shmem across processes,
782 // not just across threads. But the test is enabled on all platforms.
783 class SharedMemoryProcessTest : public MultiProcessTest {
784 public:
CleanUp()785 static void CleanUp() {
786 SharedMemory memory;
787 memory.Delete(s_test_name_);
788 }
789
TaskTestMain()790 static int TaskTestMain() {
791 int errors = 0;
792 SharedMemory memory;
793 bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
794 EXPECT_TRUE(rv);
795 if (rv != true)
796 errors++;
797 rv = memory.Map(s_data_size_);
798 EXPECT_TRUE(rv);
799 if (rv != true)
800 errors++;
801 int* ptr = static_cast<int*>(memory.memory());
802
803 // This runs concurrently in multiple processes. Writes need to be atomic.
804 subtle::Barrier_AtomicIncrement(ptr, 1);
805 memory.Close();
806 return errors;
807 }
808
809 static const char s_test_name_[];
810 static const uint32_t s_data_size_;
811 };
812
813 const char SharedMemoryProcessTest::s_test_name_[] = "MPMem";
814 const uint32_t SharedMemoryProcessTest::s_data_size_ = 1024;
815
TEST_F(SharedMemoryProcessTest,SharedMemoryAcrossProcesses)816 TEST_F(SharedMemoryProcessTest, SharedMemoryAcrossProcesses) {
817 const int kNumTasks = 5;
818
819 SharedMemoryProcessTest::CleanUp();
820
821 // Create a shared memory region. Set the first word to 0.
822 SharedMemory memory;
823 bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
824 ASSERT_TRUE(rv);
825 rv = memory.Map(s_data_size_);
826 ASSERT_TRUE(rv);
827 int* ptr = static_cast<int*>(memory.memory());
828 *ptr = 0;
829
830 // Start |kNumTasks| processes, each of which atomically increments the first
831 // word by 1.
832 Process processes[kNumTasks];
833 for (int index = 0; index < kNumTasks; ++index) {
834 processes[index] = SpawnChild("SharedMemoryTestMain");
835 ASSERT_TRUE(processes[index].IsValid());
836 }
837
838 // Check that each process exited correctly.
839 int exit_code = 0;
840 for (int index = 0; index < kNumTasks; ++index) {
841 EXPECT_TRUE(processes[index].WaitForExit(&exit_code));
842 EXPECT_EQ(0, exit_code);
843 }
844
845 // Check that the shared memory region reflects |kNumTasks| increments.
846 ASSERT_EQ(kNumTasks, *ptr);
847
848 memory.Close();
849 SharedMemoryProcessTest::CleanUp();
850 }
851
MULTIPROCESS_TEST_MAIN(SharedMemoryTestMain)852 MULTIPROCESS_TEST_MAIN(SharedMemoryTestMain) {
853 return SharedMemoryProcessTest::TaskTestMain();
854 }
855 #endif // !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX) &&
856 // !defined(OS_FUCHSIA)
857
TEST_P(SharedMemoryTest,MappedId)858 TEST_P(SharedMemoryTest, MappedId) {
859 const uint32_t kDataSize = 1024;
860 SharedMemory memory;
861 SharedMemoryCreateOptions options;
862 options.size = kDataSize;
863 #if defined(OS_MACOSX) && !defined(OS_IOS)
864 // The Mach functionality is tested in shared_memory_mac_unittest.cc.
865 options.type = SharedMemoryHandle::POSIX;
866 #endif
867
868 EXPECT_TRUE(memory.Create(options));
869 base::UnguessableToken id = memory.handle().GetGUID();
870 EXPECT_FALSE(id.is_empty());
871 EXPECT_TRUE(memory.mapped_id().is_empty());
872
873 EXPECT_TRUE(memory.Map(kDataSize));
874 EXPECT_EQ(id, memory.mapped_id());
875
876 memory.Close();
877 EXPECT_EQ(id, memory.mapped_id());
878
879 memory.Unmap();
880 EXPECT_TRUE(memory.mapped_id().is_empty());
881 }
882
883 INSTANTIATE_TEST_CASE_P(Default,
884 SharedMemoryTest,
885 ::testing::Values(Mode::Default));
886 #if defined(OS_LINUX) && !defined(OS_CHROMEOS)
887 INSTANTIATE_TEST_CASE_P(SkipDevShm,
888 SharedMemoryTest,
889 ::testing::Values(Mode::DisableDevShm));
890 #endif // defined(OS_LINUX) && !defined(OS_CHROMEOS)
891
892 #if defined(OS_ANDROID)
TEST(SharedMemoryTest,ReadOnlyRegions)893 TEST(SharedMemoryTest, ReadOnlyRegions) {
894 const uint32_t kDataSize = 1024;
895 SharedMemory memory;
896 SharedMemoryCreateOptions options;
897 options.size = kDataSize;
898 EXPECT_TRUE(memory.Create(options));
899
900 EXPECT_FALSE(memory.handle().IsRegionReadOnly());
901
902 // Check that it is possible to map the region directly from the fd.
903 int region_fd = memory.handle().GetHandle();
904 EXPECT_GE(region_fd, 0);
905 void* address = mmap(nullptr, kDataSize, PROT_READ | PROT_WRITE, MAP_SHARED,
906 region_fd, 0);
907 bool success = address && address != MAP_FAILED;
908 ASSERT_TRUE(address);
909 ASSERT_NE(address, MAP_FAILED);
910 if (success) {
911 EXPECT_EQ(0, munmap(address, kDataSize));
912 }
913
914 ASSERT_TRUE(memory.handle().SetRegionReadOnly());
915 EXPECT_TRUE(memory.handle().IsRegionReadOnly());
916
917 // Check that it is no longer possible to map the region read/write.
918 errno = 0;
919 address = mmap(nullptr, kDataSize, PROT_READ | PROT_WRITE, MAP_SHARED,
920 region_fd, 0);
921 success = address && address != MAP_FAILED;
922 ASSERT_FALSE(success);
923 ASSERT_EQ(EPERM, errno);
924 if (success) {
925 EXPECT_EQ(0, munmap(address, kDataSize));
926 }
927 }
928
TEST(SharedMemoryTest,ReadOnlyDescriptors)929 TEST(SharedMemoryTest, ReadOnlyDescriptors) {
930 const uint32_t kDataSize = 1024;
931 SharedMemory memory;
932 SharedMemoryCreateOptions options;
933 options.size = kDataSize;
934 EXPECT_TRUE(memory.Create(options));
935
936 EXPECT_FALSE(memory.handle().IsRegionReadOnly());
937
938 // Getting a read-only descriptor should not make the region read-only itself.
939 SharedMemoryHandle ro_handle = memory.GetReadOnlyHandle();
940 EXPECT_FALSE(memory.handle().IsRegionReadOnly());
941
942 // Mapping a writable region from a read-only descriptor should not
943 // be possible, it will DCHECK() in debug builds (see test below),
944 // while returning false on release ones.
945 {
946 bool dcheck_fired = false;
947 logging::ScopedLogAssertHandler log_assert(
948 base::BindRepeating([](bool* flag, const char*, int, base::StringPiece,
949 base::StringPiece) { *flag = true; },
950 base::Unretained(&dcheck_fired)));
951
952 SharedMemory rw_region(ro_handle.Duplicate(), /* read_only */ false);
953 EXPECT_FALSE(rw_region.Map(kDataSize));
954 EXPECT_EQ(DCHECK_IS_ON() ? true : false, dcheck_fired);
955 }
956
957 // Nor shall it turn the region read-only itself.
958 EXPECT_FALSE(ro_handle.IsRegionReadOnly());
959
960 // Mapping a read-only region from a read-only descriptor should work.
961 SharedMemory ro_region(ro_handle.Duplicate(), /* read_only */ true);
962 EXPECT_TRUE(ro_region.Map(kDataSize));
963
964 // And it should turn the region read-only too.
965 EXPECT_TRUE(ro_handle.IsRegionReadOnly());
966 EXPECT_TRUE(memory.handle().IsRegionReadOnly());
967 EXPECT_FALSE(memory.Map(kDataSize));
968
969 ro_handle.Close();
970 }
971
972 #endif // OS_ANDROID
973
974 } // namespace base
975