• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <sys/mman.h>
18 
19 #include <gtest/gtest.h>
20 
21 #include <ion/ion.h>
22 
23 #include "ion_test_fixture.h"
24 
25 class Map : public IonAllHeapsTest {
26 };
27 
TEST_F(Map,MapHandle)28 TEST_F(Map, MapHandle)
29 {
30     static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
31     for (unsigned int heapMask : m_allHeaps) {
32         for (size_t size : allocationSizes) {
33             SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
34             SCOPED_TRACE(::testing::Message() << "size " << size);
35             ion_user_handle_t handle = 0;
36 
37             ASSERT_EQ(0, ion_alloc(m_ionFd, size, 0, heapMask, 0, &handle));
38             ASSERT_TRUE(handle != 0);
39 
40             int map_fd = -1;
41             unsigned char *ptr = NULL;
42             ASSERT_EQ(0, ion_map(m_ionFd, handle, size, PROT_READ | PROT_WRITE, MAP_SHARED, 0, &ptr, &map_fd));
43             ASSERT_TRUE(ptr != NULL);
44             ASSERT_GE(map_fd, 0);
45 
46             ASSERT_EQ(0, close(map_fd));
47 
48             ASSERT_EQ(0, ion_free(m_ionFd, handle));
49 
50             memset(ptr, 0xaa, size);
51 
52             ASSERT_EQ(0, munmap(ptr, size));
53         }
54     }
55 }
56 
TEST_F(Map,MapFd)57 TEST_F(Map, MapFd)
58 {
59     static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
60     for (unsigned int heapMask : m_allHeaps) {
61         for (size_t size : allocationSizes) {
62             SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
63             SCOPED_TRACE(::testing::Message() << "size " << size);
64             int map_fd = -1;
65 
66             ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, 0, &map_fd));
67             ASSERT_GE(map_fd, 0);
68 
69             void *ptr;
70             ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
71             ASSERT_TRUE(ptr != NULL);
72 
73             ASSERT_EQ(0, close(map_fd));
74 
75             memset(ptr, 0xaa, size);
76 
77             ASSERT_EQ(0, munmap(ptr, size));
78         }
79     }
80 }
81 
TEST_F(Map,MapOffset)82 TEST_F(Map, MapOffset)
83 {
84     for (unsigned int heapMask : m_allHeaps) {
85         SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
86         int map_fd = -1;
87 
88         ASSERT_EQ(0, ion_alloc_fd(m_ionFd, PAGE_SIZE * 2, 0, heapMask, 0, &map_fd));
89         ASSERT_GE(map_fd, 0);
90 
91         unsigned char *ptr;
92         ptr = (unsigned char *)mmap(NULL, PAGE_SIZE * 2, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
93         ASSERT_TRUE(ptr != NULL);
94 
95         memset(ptr, 0, PAGE_SIZE);
96         memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
97 
98         ASSERT_EQ(0, munmap(ptr, PAGE_SIZE * 2));
99 
100         ptr = (unsigned char *)mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, PAGE_SIZE);
101         ASSERT_TRUE(ptr != NULL);
102 
103         ASSERT_EQ(ptr[0], 0xaa);
104         ASSERT_EQ(ptr[PAGE_SIZE - 1], 0xaa);
105 
106         ASSERT_EQ(0, munmap(ptr, PAGE_SIZE));
107 
108         ASSERT_EQ(0, close(map_fd));
109     }
110 }
111 
TEST_F(Map,MapCached)112 TEST_F(Map, MapCached)
113 {
114     static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
115     for (unsigned int heapMask : m_allHeaps) {
116         for (size_t size : allocationSizes) {
117             SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
118             SCOPED_TRACE(::testing::Message() << "size " << size);
119             int map_fd = -1;
120             unsigned int flags = ION_FLAG_CACHED;
121 
122             ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, flags, &map_fd));
123             ASSERT_GE(map_fd, 0);
124 
125             void *ptr;
126             ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
127             ASSERT_TRUE(ptr != NULL);
128 
129             ASSERT_EQ(0, close(map_fd));
130 
131             memset(ptr, 0xaa, size);
132 
133             ASSERT_EQ(0, munmap(ptr, size));
134         }
135     }
136 }
137 
TEST_F(Map,MapCachedNeedsSync)138 TEST_F(Map, MapCachedNeedsSync)
139 {
140     static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
141     for (unsigned int heapMask : m_allHeaps) {
142         for (size_t size : allocationSizes) {
143             SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
144             SCOPED_TRACE(::testing::Message() << "size " << size);
145             int map_fd = -1;
146             unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
147 
148             ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, flags, &map_fd));
149             ASSERT_GE(map_fd, 0);
150 
151             void *ptr;
152             ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
153             ASSERT_TRUE(ptr != NULL);
154 
155             ASSERT_EQ(0, close(map_fd));
156 
157             memset(ptr, 0xaa, size);
158 
159             ASSERT_EQ(0, munmap(ptr, size));
160         }
161     }
162 }
163