• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/test/test_shared_memory_util.h"
6 
7 #include <gtest/gtest.h>
8 
9 #include <stddef.h>
10 #include <stdint.h>
11 
12 #include "base/logging.h"
13 #include "build/build_config.h"
14 
15 #if defined(OS_POSIX) && !defined(OS_NACL)
16 #include <errno.h>
17 #include <string.h>
18 #include <sys/mman.h>
19 #include <unistd.h>
20 #endif
21 
22 #if defined(OS_FUCHSIA)
23 #include <zircon/process.h>
24 #include <zircon/rights.h>
25 #include <zircon/syscalls.h>
26 #endif
27 
28 #if defined(OS_MACOSX) && !defined(OS_IOS)
29 #include <mach/mach_vm.h>
30 #endif
31 
32 #if defined(OS_WIN)
33 #include <aclapi.h>
34 #endif
35 
36 namespace base {
37 
38 #if !defined(OS_NACL)
39 
40 static const size_t kDataSize = 1024;
41 
42 // Common routine used with Posix file descriptors. Check that shared memory
43 // file descriptor |fd| does not allow writable mappings. Return true on
44 // success, false otherwise.
45 #if defined(OS_POSIX)
CheckReadOnlySharedMemoryFdPosix(int fd)46 static bool CheckReadOnlySharedMemoryFdPosix(int fd) {
47 // Note that the error on Android is EPERM, unlike other platforms where
48 // it will be EACCES.
49 #if defined(OS_ANDROID)
50   const int kExpectedErrno = EPERM;
51 #else
52   const int kExpectedErrno = EACCES;
53 #endif
54   errno = 0;
55   void* address =
56       mmap(nullptr, kDataSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
57   const bool success = (address != nullptr) && (address != MAP_FAILED);
58   if (success) {
59     LOG(ERROR) << "mmap() should have failed!";
60     munmap(address, kDataSize);  // Cleanup.
61     return false;
62   }
63   if (errno != kExpectedErrno) {
64     LOG(ERROR) << "Expected mmap() to return " << kExpectedErrno
65                << " but returned " << errno << ": " << strerror(errno) << "\n";
66     return false;
67   }
68   return true;
69 }
70 #endif  // OS_POSIX && !OS_FUCHSIA
71 
72 #if defined(OS_FUCHSIA)
73 // Fuchsia specific implementation.
CheckReadOnlySharedMemoryFuchsiaHandle(zx_handle_t handle)74 bool CheckReadOnlySharedMemoryFuchsiaHandle(zx_handle_t handle) {
75   const uint32_t flags = ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE;
76   uintptr_t addr;
77   const zx_handle_t root = zx_vmar_root_self();
78   const zx_status_t status =
79       zx_vmar_map(root, 0, handle, 0U, kDataSize, flags, &addr);
80   if (status == ZX_OK) {
81     LOG(ERROR) << "zx_vmar_map() should have failed!";
82     zx_vmar_unmap(root, addr, kDataSize);
83     return false;
84   }
85   if (status != ZX_ERR_ACCESS_DENIED) {
86     LOG(ERROR) << "Expected zx_vmar_map() to return " << ZX_ERR_ACCESS_DENIED
87                << " (ZX_ERR_ACCESS_DENIED) but returned " << status << "\n";
88     return false;
89   }
90   return true;
91 }
92 
93 #elif defined(OS_MACOSX) && !defined(OS_IOS)
CheckReadOnlySharedMemoryMachPort(mach_port_t memory_object)94 bool CheckReadOnlySharedMemoryMachPort(mach_port_t memory_object) {
95   mach_vm_address_t memory;
96   const kern_return_t kr = mach_vm_map(
97       mach_task_self(), &memory, kDataSize, 0, VM_FLAGS_ANYWHERE, memory_object,
98       0, FALSE, VM_PROT_READ | VM_PROT_WRITE,
99       VM_PROT_READ | VM_PROT_WRITE | VM_PROT_IS_MASK, VM_INHERIT_NONE);
100   if (kr == KERN_SUCCESS) {
101     LOG(ERROR) << "mach_vm_map() should have failed!";
102     mach_vm_deallocate(mach_task_self(), memory, kDataSize);  // Cleanup.
103     return false;
104   }
105   return true;
106 }
107 
108 #elif defined(OS_WIN)
CheckReadOnlySharedMemoryWindowsHandle(HANDLE handle)109 bool CheckReadOnlySharedMemoryWindowsHandle(HANDLE handle) {
110   void* memory =
111       MapViewOfFile(handle, FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, kDataSize);
112   if (memory != nullptr) {
113     LOG(ERROR) << "MapViewOfFile() should have failed!";
114     UnmapViewOfFile(memory);
115     return false;
116   }
117   return true;
118 }
119 #endif
120 
CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle)121 bool CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle) {
122 #if defined(OS_MACOSX) && !defined(OS_IOS)
123   // For OSX, the code has to deal with both POSIX and MACH handles.
124   if (handle.type_ == SharedMemoryHandle::POSIX)
125     return CheckReadOnlySharedMemoryFdPosix(handle.file_descriptor_.fd);
126   else
127     return CheckReadOnlySharedMemoryMachPort(handle.memory_object_);
128 #elif defined(OS_FUCHSIA)
129   return CheckReadOnlySharedMemoryFuchsiaHandle(handle.GetHandle());
130 #elif defined(OS_WIN)
131   return CheckReadOnlySharedMemoryWindowsHandle(handle.GetHandle());
132 #else
133   return CheckReadOnlySharedMemoryFdPosix(handle.GetHandle());
134 #endif
135 }
136 
CheckReadOnlyPlatformSharedMemoryRegionForTesting(subtle::PlatformSharedMemoryRegion region)137 bool CheckReadOnlyPlatformSharedMemoryRegionForTesting(
138     subtle::PlatformSharedMemoryRegion region) {
139   if (region.GetMode() != subtle::PlatformSharedMemoryRegion::Mode::kReadOnly) {
140     LOG(ERROR) << "Expected region mode is "
141                << static_cast<int>(
142                       subtle::PlatformSharedMemoryRegion::Mode::kReadOnly)
143                << " but actual is " << static_cast<int>(region.GetMode());
144     return false;
145   }
146 
147 #if defined(OS_MACOSX) && !defined(OS_IOS)
148   return CheckReadOnlySharedMemoryMachPort(region.GetPlatformHandle());
149 #elif defined(OS_FUCHSIA)
150   return CheckReadOnlySharedMemoryFuchsiaHandle(region.GetPlatformHandle());
151 #elif defined(OS_WIN)
152   return CheckReadOnlySharedMemoryWindowsHandle(region.GetPlatformHandle());
153 #elif defined(OS_ANDROID)
154   return CheckReadOnlySharedMemoryFdPosix(region.GetPlatformHandle());
155 #else
156   return CheckReadOnlySharedMemoryFdPosix(region.GetPlatformHandle().fd);
157 #endif
158 }
159 
160 #endif  // !OS_NACL
161 
MapForTesting(subtle::PlatformSharedMemoryRegion * region)162 WritableSharedMemoryMapping MapForTesting(
163     subtle::PlatformSharedMemoryRegion* region) {
164   return MapAtForTesting(region, 0, region->GetSize());
165 }
166 
MapAtForTesting(subtle::PlatformSharedMemoryRegion * region,off_t offset,size_t size)167 WritableSharedMemoryMapping MapAtForTesting(
168     subtle::PlatformSharedMemoryRegion* region,
169     off_t offset,
170     size_t size) {
171   void* memory = nullptr;
172   size_t mapped_size = 0;
173   if (!region->MapAt(offset, size, &memory, &mapped_size))
174     return {};
175 
176   return WritableSharedMemoryMapping(memory, size, mapped_size,
177                                      region->GetGUID());
178 }
179 
180 template <>
181 std::pair<ReadOnlySharedMemoryRegion, WritableSharedMemoryMapping>
CreateMappedRegion(size_t size)182 CreateMappedRegion(size_t size) {
183   MappedReadOnlyRegion mapped_region = ReadOnlySharedMemoryRegion::Create(size);
184   return {std::move(mapped_region.region), std::move(mapped_region.mapping)};
185 }
186 
187 }  // namespace base
188