1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <errno.h>
18 #include <linux/fs.h>
19 #include <stdint.h>
20 #include <string.h>
21 #include <sys/mman.h>
22 #include <sys/types.h>
23 #include <unistd.h>
24
25 #include <vector>
26
27 #include <android-base/macros.h>
28 #include <android-base/unique_fd.h>
29 #include <cutils/ashmem.h>
30 #include <gtest/gtest.h>
31
32 #include "ashmem-internal.h"
33
34 using android::base::unique_fd;
35
TestCreateRegion(size_t size,unique_fd & fd,int prot)36 static void TestCreateRegion(size_t size, unique_fd &fd, int prot) {
37 fd = unique_fd(ashmem_create_region(nullptr, size));
38 ASSERT_TRUE(fd >= 0);
39 ASSERT_TRUE(ashmem_valid(fd));
40 ASSERT_EQ(size, static_cast<size_t>(ashmem_get_size_region(fd)));
41 ASSERT_EQ(0, ashmem_set_prot_region(fd, prot));
42
43 // We've been inconsistent historically about whether or not these file
44 // descriptors were CLOEXEC. Make sure we're consistent going forward.
45 // https://issuetracker.google.com/165667331
46 ASSERT_EQ(FD_CLOEXEC, (fcntl(fd, F_GETFD) & FD_CLOEXEC));
47 }
48
TestMmap(const unique_fd & fd,size_t size,int prot,void ** region,off_t off=0)49 static void TestMmap(const unique_fd& fd, size_t size, int prot, void** region, off_t off = 0) {
50 ASSERT_TRUE(fd >= 0);
51 ASSERT_TRUE(ashmem_valid(fd));
52 *region = mmap(nullptr, size, prot, MAP_SHARED, fd, off);
53 ASSERT_NE(MAP_FAILED, *region);
54 }
55
TestProtDenied(const unique_fd & fd,size_t size,int prot)56 static void TestProtDenied(const unique_fd &fd, size_t size, int prot) {
57 ASSERT_TRUE(fd >= 0);
58 ASSERT_TRUE(ashmem_valid(fd));
59 EXPECT_EQ(MAP_FAILED, mmap(nullptr, size, prot, MAP_SHARED, fd, 0));
60 }
61
TestProtIs(const unique_fd & fd,int prot)62 static void TestProtIs(const unique_fd& fd, int prot) {
63 ASSERT_TRUE(fd >= 0);
64 ASSERT_TRUE(ashmem_valid(fd));
65 EXPECT_EQ(prot, ioctl(fd, ASHMEM_GET_PROT_MASK));
66 }
67
FillData(std::vector<uint8_t> & data)68 static void FillData(std::vector<uint8_t>& data) {
69 for (size_t i = 0; i < data.size(); i++) {
70 data[i] = i & 0xFF;
71 }
72 }
73
waitForChildProcessExit(pid_t pid)74 static void waitForChildProcessExit(pid_t pid) {
75 int exitStatus;
76 pid_t childPid = waitpid(pid, &exitStatus, 0);
77
78 ASSERT_GT(childPid, 0);
79 ASSERT_TRUE(WIFEXITED(exitStatus));
80 ASSERT_EQ(0, WEXITSTATUS(exitStatus));
81 }
82
ForkTest(const unique_fd & fd,size_t size)83 static void ForkTest(const unique_fd &fd, size_t size) {
84 void* region1 = nullptr;
85 std::vector<uint8_t> data(size);
86 FillData(data);
87
88 ASSERT_NO_FATAL_FAILURE(TestMmap(fd, size, PROT_READ | PROT_WRITE, ®ion1));
89
90 memcpy(region1, data.data(), size);
91 ASSERT_EQ(0, memcmp(region1, data.data(), size));
92 EXPECT_EQ(0, munmap(region1, size));
93
94
95 pid_t pid = fork();
96 if (!pid) {
97 if (!ashmem_valid(fd)) {
98 _exit(3);
99 }
100
101 void *region2 = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
102 if (region2 == MAP_FAILED) {
103 _exit(1);
104 } else if (memcmp(region2, data.data(), size) != 0){
105 _exit(2);
106 }
107
108 // Clear the ashmem buffer here to ensure that updates to the contents
109 // of the buffer are visible across processes with a reference to the
110 // buffer.
111 memset(region2, 0, size);
112 munmap(region2, size);
113 _exit(0);
114 } else {
115 ASSERT_GT(pid, 0);
116 ASSERT_NO_FATAL_FAILURE(waitForChildProcessExit(pid));
117 }
118
119 memset(data.data(), 0, size);
120 void *region2;
121 ASSERT_NO_FATAL_FAILURE(TestMmap(fd, size, PROT_READ | PROT_WRITE, ®ion2));
122 ASSERT_EQ(0, memcmp(region2, data.data(), size));
123 EXPECT_EQ(0, munmap(region2, size));
124 }
125
FileOperationsTest(const unique_fd & fd,size_t size)126 static void FileOperationsTest(const unique_fd &fd, size_t size) {
127 void* region = nullptr;
128
129 const size_t pageSize = getpagesize();
130 const size_t dataSize = pageSize * 2;
131 const size_t holeSize = pageSize;
132 ASSERT_NO_FATAL_FAILURE(TestMmap(fd, dataSize, PROT_READ | PROT_WRITE, ®ion, holeSize));
133
134 std::vector<uint8_t> data(dataSize);
135 FillData(data);
136 memcpy(region, data.data(), dataSize);
137
138 const off_t dataStart = holeSize;
139 const off_t dataEnd = dataStart + dataSize;
140
141 // The sequence of seeks below looks something like this:
142 //
143 // [ ][data][data][ ]
144 // --^ lseek(99, SEEK_SET)
145 // ------^ lseek(dataStart, SEEK_CUR)
146 // ------^ lseek(0, SEEK_DATA)
147 // ------------^ lseek(dataStart, SEEK_HOLE)
148 // ^-- lseek(-99, SEEK_END)
149 // ^------ lseek(-dataStart, SEEK_CUR)
150 const struct {
151 // lseek() parameters
152 off_t offset;
153 int whence;
154 // Expected lseek() return value
155 off_t ret;
156 } seeks[] = {
157 {99, SEEK_SET, 99},
158 {dataStart, SEEK_CUR, dataStart + 99},
159 {0, SEEK_DATA, dataStart},
160 {dataStart, SEEK_HOLE, dataEnd},
161 {-99, SEEK_END, static_cast<off_t>(size) - 99},
162 {-dataStart, SEEK_CUR, dataEnd - 99},
163 };
164 for (const auto& cfg : seeks) {
165 errno = 0;
166 ASSERT_TRUE(ashmem_valid(fd));
167 auto off = lseek(fd, cfg.offset, cfg.whence);
168 ASSERT_EQ(cfg.ret, off) << "lseek(" << cfg.offset << ", " << cfg.whence << ") failed"
169 << (errno ? ": " : "") << (errno ? strerror(errno) : "");
170
171 if (off >= dataStart && off < dataEnd) {
172 off_t dataOff = off - dataStart;
173 ssize_t readSize = dataSize - dataOff;
174 uint8_t buf[readSize];
175
176 ASSERT_EQ(readSize, TEMP_FAILURE_RETRY(read(fd, buf, readSize)));
177 EXPECT_EQ(0, memcmp(buf, &data[dataOff], readSize));
178 }
179 }
180
181 EXPECT_EQ(0, munmap(region, dataSize));
182 }
183
ProtTestROBuffer(const unique_fd & fd,size_t size)184 static void ProtTestROBuffer(const unique_fd &fd, size_t size) {
185 void *region;
186
187 TestProtDenied(fd, size, PROT_WRITE);
188 TestProtIs(fd, PROT_READ | PROT_EXEC);
189 ASSERT_NO_FATAL_FAILURE(TestMmap(fd, size, PROT_READ, ®ion));
190 EXPECT_EQ(0, munmap(region, size));
191 }
192
ProtTestRWBuffer(const unique_fd & fd,size_t size)193 static void ProtTestRWBuffer(const unique_fd &fd, size_t size) {
194 TestProtIs(fd, PROT_READ | PROT_WRITE | PROT_EXEC);
195 ASSERT_EQ(0, ashmem_set_prot_region(fd, PROT_READ | PROT_EXEC));
196 errno = 0;
197 ASSERT_EQ(-1, ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE |
198 PROT_EXEC))
199 << "kernel shouldn't allow adding protection bits";
200 EXPECT_EQ(EINVAL, errno);
201 TestProtIs(fd, PROT_READ | PROT_EXEC);
202 TestProtDenied(fd, size, PROT_WRITE);
203 }
204
ForkProtTest(const unique_fd & fd,size_t size)205 static void ForkProtTest(const unique_fd &fd, size_t size) {
206 pid_t pid = fork();
207 if (!pid) {
208 // Change buffer mapping permissions to read-only to ensure that
209 // updates to the buffer's mapping permissions are visible across
210 // processes that reference the buffer.
211 if (!ashmem_valid(fd)) {
212 _exit(3);
213 } else if (ashmem_set_prot_region(fd, PROT_READ) == -1) {
214 _exit(1);
215 }
216 _exit(0);
217 } else {
218 ASSERT_GT(pid, 0);
219 ASSERT_NO_FATAL_FAILURE(waitForChildProcessExit(pid));
220 }
221
222 ASSERT_NO_FATAL_FAILURE(TestProtDenied(fd, size, PROT_WRITE));
223 }
224
ForkMultiRegionTest(unique_fd fds[],int nRegions,size_t size)225 static void ForkMultiRegionTest(unique_fd fds[], int nRegions, size_t size) {
226 std::vector<uint8_t> data(size);
227 FillData(data);
228
229 for (int i = 0; i < nRegions; i++) {
230 void* region = nullptr;
231 ASSERT_NO_FATAL_FAILURE(TestMmap(fds[i], size, PROT_READ | PROT_WRITE, ®ion));
232 memcpy(region, data.data(), size);
233 ASSERT_EQ(0, memcmp(region, data.data(), size));
234 EXPECT_EQ(0, munmap(region, size));
235 }
236
237 pid_t pid = fork();
238 if (!pid) {
239 // Clear each ashmem buffer in the context of the child process to
240 // ensure that the updates are visible to the parent process later.
241 for (int i = 0; i < nRegions; i++) {
242 if (!ashmem_valid(fds[i])) {
243 _exit(3);
244 }
245 void *region = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fds[i], 0);
246 if (region == MAP_FAILED) {
247 _exit(1);
248 }
249 if (memcmp(region, data.data(), size) != 0) {
250 munmap(region, size);
251 _exit(2);
252 }
253 memset(region, 0, size);
254 munmap(region, size);
255 }
256 _exit(0);
257 } else {
258 ASSERT_GT(pid, 0);
259 ASSERT_NO_FATAL_FAILURE(waitForChildProcessExit(pid));
260 }
261
262 memset(data.data(), 0, size);
263 for (int i = 0; i < nRegions; i++) {
264 void *region;
265 ASSERT_NO_FATAL_FAILURE(TestMmap(fds[i], size, PROT_READ | PROT_WRITE, ®ion));
266 ASSERT_EQ(0, memcmp(region, data.data(), size));
267 EXPECT_EQ(0, munmap(region, size));
268 }
269
270 }
271
TEST(AshmemTest,ForkTest)272 TEST(AshmemTest, ForkTest) {
273 const size_t size = getpagesize();
274 unique_fd fd;
275
276 ASSERT_NO_FATAL_FAILURE(TestCreateRegion(size, fd, PROT_READ | PROT_WRITE));
277 ASSERT_NO_FATAL_FAILURE(ForkTest(fd, size));
278 }
279
TEST(AshmemTest,FileOperationsTest)280 TEST(AshmemTest, FileOperationsTest) {
281 const size_t pageSize = getpagesize();
282 // Allocate a 4-page buffer, but leave page-sized holes on either side in
283 // the test.
284 const size_t size = pageSize * 4;
285 unique_fd fd;
286
287 ASSERT_NO_FATAL_FAILURE(TestCreateRegion(size, fd, PROT_READ | PROT_WRITE));
288 ASSERT_NO_FATAL_FAILURE(FileOperationsTest(fd, size));
289 }
290
TEST(AshmemTest,ProtTest)291 TEST(AshmemTest, ProtTest) {
292 unique_fd fd;
293 const size_t size = getpagesize();
294
295 ASSERT_NO_FATAL_FAILURE(TestCreateRegion(size, fd, PROT_READ | PROT_EXEC));
296 ASSERT_NO_FATAL_FAILURE(ProtTestROBuffer(fd, size));
297
298 ASSERT_NO_FATAL_FAILURE(TestCreateRegion(size, fd, PROT_READ | PROT_WRITE | PROT_EXEC));
299 ASSERT_NO_FATAL_FAILURE(ProtTestRWBuffer(fd, size));
300 }
301
TEST(AshmemTest,ForkProtTest)302 TEST(AshmemTest, ForkProtTest) {
303 unique_fd fd;
304 const size_t size = getpagesize();
305
306 ASSERT_NO_FATAL_FAILURE(TestCreateRegion(size, fd, PROT_READ | PROT_WRITE));
307 ASSERT_NO_FATAL_FAILURE(ForkProtTest(fd, size));
308 }
309
TEST(AshmemTest,ForkMultiRegionTest)310 TEST(AshmemTest, ForkMultiRegionTest) {
311 const size_t size = getpagesize();
312 constexpr int nRegions = 16;
313 unique_fd fds[nRegions];
314
315 for (int i = 0; i < nRegions; i++) {
316 ASSERT_NO_FATAL_FAILURE(TestCreateRegion(size, fds[i], PROT_READ | PROT_WRITE));
317 }
318
319 ASSERT_NO_FATAL_FAILURE(ForkMultiRegionTest(fds, nRegions, size));
320 }
321
322 class AshmemTestMemfdAshmemCompat : public ::testing::Test {
323 protected:
SetUp()324 void SetUp() override {
325 if (!has_memfd_support()){
326 GTEST_SKIP() << "No memfd support; skipping memfd-ashmem compat tests";
327 }
328 }
329 };
330
TEST_F(AshmemTestMemfdAshmemCompat,SetNameTest)331 TEST_F(AshmemTestMemfdAshmemCompat, SetNameTest) {
332 unique_fd fd;
333
334 // ioctl() should fail, since memfd names cannot be changed after the buffer has been created.
335 ASSERT_NO_FATAL_FAILURE(TestCreateRegion(getpagesize(), fd, PROT_READ | PROT_WRITE |
336 PROT_EXEC));
337 ASSERT_LT(ioctl(fd, ASHMEM_SET_NAME, "invalid-command"), 0);
338 }
339
TEST_F(AshmemTestMemfdAshmemCompat,GetNameTest)340 TEST_F(AshmemTestMemfdAshmemCompat, GetNameTest) {
341 unique_fd fd;
342 ASSERT_NO_FATAL_FAILURE(TestCreateRegion(getpagesize(), fd, PROT_READ | PROT_WRITE |
343 PROT_EXEC));
344
345 char testBuf[ASHMEM_NAME_LEN];
346 ASSERT_EQ(0, ioctl(fd, ASHMEM_GET_NAME, &testBuf));
347 // ashmem_create_region(nullptr, ...) creates memfds with the name "none".
348 ASSERT_STREQ(testBuf, "none");
349 }
350
TEST_F(AshmemTestMemfdAshmemCompat,SetSizeTest)351 TEST_F(AshmemTestMemfdAshmemCompat, SetSizeTest) {
352 unique_fd fd;
353
354 // ioctl() should fail, since libcutils sets and seals the buffer size after creating it.
355 ASSERT_NO_FATAL_FAILURE(TestCreateRegion(getpagesize(), fd, PROT_READ | PROT_WRITE |
356 PROT_EXEC));
357 ASSERT_LT(ioctl(fd, ASHMEM_SET_SIZE, 2 * getpagesize()), 0);
358 }
359
TEST_F(AshmemTestMemfdAshmemCompat,GetSizeTest)360 TEST_F(AshmemTestMemfdAshmemCompat, GetSizeTest) {
361 unique_fd fd;
362 size_t bufSize = getpagesize();
363
364 ASSERT_NO_FATAL_FAILURE(TestCreateRegion(bufSize, fd, PROT_READ | PROT_WRITE | PROT_EXEC));
365 ASSERT_EQ(static_cast<int>(bufSize), ioctl(fd, ASHMEM_GET_SIZE, 0));
366 }
367
TEST_F(AshmemTestMemfdAshmemCompat,ProtMaskTest)368 TEST_F(AshmemTestMemfdAshmemCompat, ProtMaskTest) {
369 unique_fd fd;
370 ASSERT_NO_FATAL_FAILURE(TestCreateRegion(getpagesize(), fd, PROT_READ | PROT_WRITE |
371 PROT_EXEC));
372
373 // We can only change PROT_WRITE for memfds since memfd implements ashmem's prot_mask through
374 // file seals, and only write seals exist.
375 //
376 // All memfd files start off as being writable (i.e. PROT_WRITE is part of the prot_mask).
377 // Test to ensure that the implementation only clears the PROT_WRITE bit when requested.
378 ASSERT_EQ(0, ioctl(fd, ASHMEM_SET_PROT_MASK, PROT_READ | PROT_WRITE | PROT_EXEC));
379 int prot = ioctl(fd, ASHMEM_GET_PROT_MASK, 0);
380 ASSERT_NE(prot, -1);
381 ASSERT_TRUE(prot & PROT_WRITE) << prot;
382
383 ASSERT_EQ(0, ioctl(fd, ASHMEM_SET_PROT_MASK, PROT_READ | PROT_EXEC));
384 prot = ioctl(fd, ASHMEM_GET_PROT_MASK, 0);
385 ASSERT_NE(prot, -1);
386 ASSERT_TRUE(!(prot & PROT_WRITE)) << prot;
387
388 // The shim layer should implement clearing PROT_WRITE via file seals, so check the file
389 // seals to ensure that F_SEAL_FUTURE_WRITE is set.
390 int seals = fcntl(fd, F_GET_SEALS, 0);
391 ASSERT_NE(seals, -1);
392 ASSERT_TRUE(seals & F_SEAL_FUTURE_WRITE) << seals;
393
394 // Similarly, ensure that file seals affect prot_mask
395 unique_fd fd2;
396 ASSERT_NO_FATAL_FAILURE(TestCreateRegion(getpagesize(), fd2, PROT_READ | PROT_WRITE |
397 PROT_EXEC));
398 ASSERT_EQ(0, fcntl(fd2, F_ADD_SEALS, F_SEAL_FUTURE_WRITE));
399 prot = ioctl(fd2, ASHMEM_GET_PROT_MASK, 0);
400 ASSERT_NE(prot, -1);
401 ASSERT_TRUE(!(prot & PROT_WRITE)) << prot;
402
403 // And finally, ensure that adding back permissions fails
404 ASSERT_LT(ioctl(fd2, ASHMEM_SET_PROT_MASK, PROT_READ | PROT_WRITE | PROT_EXEC), 0);
405 }
406
TEST_F(AshmemTestMemfdAshmemCompat,FileIDTest)407 TEST_F(AshmemTestMemfdAshmemCompat, FileIDTest) {
408 unique_fd fd;
409 ASSERT_NO_FATAL_FAILURE(TestCreateRegion(getpagesize(), fd, PROT_READ | PROT_WRITE |
410 PROT_EXEC));
411
412 unsigned long ino;
413 ASSERT_EQ(0, ioctl(fd, ASHMEM_GET_FILE_ID, &ino));
414 struct stat st;
415 ASSERT_EQ(0, fstat(fd, &st));
416 ASSERT_EQ(ino, st.st_ino);
417 }
418
TEST_F(AshmemTestMemfdAshmemCompat,UnpinningTest)419 TEST_F(AshmemTestMemfdAshmemCompat, UnpinningTest) {
420 unique_fd fd;
421 size_t bufSize = getpagesize();
422 ASSERT_NO_FATAL_FAILURE(TestCreateRegion(getpagesize(), fd, PROT_READ | PROT_WRITE |
423 PROT_EXEC));
424
425 struct ashmem_pin pin = {
426 .offset = 0,
427 .len = static_cast<uint32_t>(bufSize),
428 };
429 ASSERT_EQ(0, ioctl(fd, ASHMEM_UNPIN, &pin));
430 // ASHMEM_UNPIN should just be a nop
431 ASSERT_EQ(ASHMEM_IS_PINNED, ioctl(fd, ASHMEM_GET_PIN_STATUS, 0));
432
433 // This shouldn't do anything; when we pin the page, it shouldn't have been purged.
434 ASSERT_EQ(0, ioctl(fd, ASHMEM_PURGE_ALL_CACHES, 0));
435 ASSERT_EQ(ASHMEM_NOT_PURGED, ioctl(fd, ASHMEM_PIN, &pin));
436 }