1 // Copyright 2016 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "ExecutableMemory.hpp"
16
17 #include "Debug.hpp"
18
19 #if defined(_WIN32)
20 #ifndef WIN32_LEAN_AND_MEAN
21 #define WIN32_LEAN_AND_MEAN
22 #endif
23 #include <windows.h>
24 #include <intrin.h>
25 #elif defined(__Fuchsia__)
26 #include <unistd.h>
27 #include <zircon/process.h>
28 #include <zircon/syscalls.h>
29 #else
30 #include <errno.h>
31 #include <sys/mman.h>
32 #include <stdlib.h>
33 #include <unistd.h>
34 #endif
35
36 #include <memory.h>
37
38 #undef allocate
39 #undef deallocate
40
41 #if (defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined (_M_X64)) && !defined(__x86__)
42 #define __x86__
43 #endif
44
45 namespace rr
46 {
47 namespace
48 {
49 struct Allocation
50 {
51 // size_t bytes;
52 unsigned char *block;
53 };
54
allocateRaw(size_t bytes,size_t alignment)55 void *allocateRaw(size_t bytes, size_t alignment)
56 {
57 ASSERT((alignment & (alignment - 1)) == 0); // Power of 2 alignment.
58
59 #if defined(LINUX_ENABLE_NAMED_MMAP)
60 void *allocation;
61 int result = posix_memalign(&allocation, alignment, bytes);
62 if(result != 0)
63 {
64 errno = result;
65 allocation = nullptr;
66 }
67 return allocation;
68 #else
69 unsigned char *block = new unsigned char[bytes + sizeof(Allocation) + alignment];
70 unsigned char *aligned = nullptr;
71
72 if(block)
73 {
74 aligned = (unsigned char*)((uintptr_t)(block + sizeof(Allocation) + alignment - 1) & -(intptr_t)alignment);
75 Allocation *allocation = (Allocation*)(aligned - sizeof(Allocation));
76
77 // allocation->bytes = bytes;
78 allocation->block = block;
79 }
80
81 return aligned;
82 #endif
83 }
84
85 #if defined(LINUX_ENABLE_NAMED_MMAP)
86 // Create a file descriptor for anonymous memory with the given
87 // name. Returns -1 on failure.
88 // TODO: remove once libc wrapper exists.
memfd_create(const char * name,unsigned int flags)89 int memfd_create(const char* name, unsigned int flags)
90 {
91 #if __aarch64__
92 #define __NR_memfd_create 279
93 #elif __arm__
94 #define __NR_memfd_create 279
95 #elif __powerpc64__
96 #define __NR_memfd_create 360
97 #elif __i386__
98 #define __NR_memfd_create 356
99 #elif __x86_64__
100 #define __NR_memfd_create 319
101 #endif /* __NR_memfd_create__ */
102 #ifdef __NR_memfd_create
103 // In the event of no system call this returns -1 with errno set
104 // as ENOSYS.
105 return syscall(__NR_memfd_create, name, flags);
106 #else
107 return -1;
108 #endif
109 }
110
111 // Returns a file descriptor for use with an anonymous mmap, if
112 // memfd_create fails, -1 is returned. Note, the mappings should be
113 // MAP_PRIVATE so that underlying pages aren't shared.
anonymousFd()114 int anonymousFd()
115 {
116 static int fd = memfd_create("SwiftShader JIT", 0);
117 return fd;
118 }
119
120 // Ensure there is enough space in the "anonymous" fd for length.
ensureAnonFileSize(int anonFd,size_t length)121 void ensureAnonFileSize(int anonFd, size_t length)
122 {
123 static size_t fileSize = 0;
124 if(length > fileSize)
125 {
126 ftruncate(anonFd, length);
127 fileSize = length;
128 }
129 }
130 #endif // defined(LINUX_ENABLE_NAMED_MMAP)
131
132 } // anonymous namespace
133
memoryPageSize()134 size_t memoryPageSize()
135 {
136 static int pageSize = 0;
137
138 if(pageSize == 0)
139 {
140 #if defined(_WIN32)
141 SYSTEM_INFO systemInfo;
142 GetSystemInfo(&systemInfo);
143 pageSize = systemInfo.dwPageSize;
144 #else
145 pageSize = sysconf(_SC_PAGESIZE);
146 #endif
147 }
148
149 return pageSize;
150 }
151
allocate(size_t bytes,size_t alignment)152 void *allocate(size_t bytes, size_t alignment)
153 {
154 void *memory = allocateRaw(bytes, alignment);
155
156 if(memory)
157 {
158 memset(memory, 0, bytes);
159 }
160
161 return memory;
162 }
163
deallocate(void * memory)164 void deallocate(void *memory)
165 {
166 #if defined(LINUX_ENABLE_NAMED_MMAP)
167 free(memory);
168 #else
169 if(memory)
170 {
171 unsigned char *aligned = (unsigned char*)memory;
172 Allocation *allocation = (Allocation*)(aligned - sizeof(Allocation));
173
174 delete[] allocation->block;
175 }
176 #endif
177 }
178
179 // Rounds |x| up to a multiple of |m|, where |m| is a power of 2.
roundUp(uintptr_t x,uintptr_t m)180 inline uintptr_t roundUp(uintptr_t x, uintptr_t m)
181 {
182 ASSERT(m > 0 && (m & (m - 1)) == 0); // |m| must be a power of 2.
183 return (x + m - 1) & ~(m - 1);
184 }
185
allocateExecutable(size_t bytes)186 void *allocateExecutable(size_t bytes)
187 {
188 size_t pageSize = memoryPageSize();
189 size_t length = roundUp(bytes, pageSize);
190 void *mapping;
191
192 #if defined(LINUX_ENABLE_NAMED_MMAP)
193 // Try to name the memory region for the executable code,
194 // to aid profilers.
195 int anonFd = anonymousFd();
196 if(anonFd == -1)
197 {
198 mapping = mmap(nullptr, length, PROT_READ | PROT_WRITE,
199 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
200 }
201 else
202 {
203 ensureAnonFileSize(anonFd, length);
204 mapping = mmap(nullptr, length, PROT_READ | PROT_WRITE,
205 MAP_PRIVATE, anonFd, 0);
206 }
207
208 if(mapping == MAP_FAILED)
209 {
210 mapping = nullptr;
211 }
212 #elif defined(__Fuchsia__)
213 zx_handle_t vmo;
214 if (zx_vmo_create(length, ZX_VMO_NON_RESIZABLE, &vmo) != ZX_OK) {
215 return nullptr;
216 }
217 if (zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo) != ZX_OK) {
218 return nullptr;
219 }
220 zx_vaddr_t reservation;
221 zx_status_t status = zx_vmar_map(
222 zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
223 0, vmo, 0, length, &reservation);
224 zx_handle_close(vmo);
225 if (status != ZX_OK) {
226 return nullptr;
227 }
228
229 zx_vaddr_t alignedReservation = roundUp(reservation, pageSize);
230 mapping = reinterpret_cast<void*>(alignedReservation);
231
232 // Unmap extra memory reserved before the block.
233 if (alignedReservation != reservation) {
234 size_t prefix_size = alignedReservation - reservation;
235 status =
236 zx_vmar_unmap(zx_vmar_root_self(), reservation, prefix_size);
237 ASSERT(status == ZX_OK);
238 length -= prefix_size;
239 }
240
241 // Unmap extra memory at the end.
242 if (length > bytes) {
243 status = zx_vmar_unmap(
244 zx_vmar_root_self(), alignedReservation + bytes,
245 length - bytes);
246 ASSERT(status == ZX_OK);
247 }
248 #else
249 mapping = allocate(length, pageSize);
250 #endif
251
252 return mapping;
253 }
254
markExecutable(void * memory,size_t bytes)255 void markExecutable(void *memory, size_t bytes)
256 {
257 #if defined(_WIN32)
258 unsigned long oldProtection;
259 VirtualProtect(memory, bytes, PAGE_EXECUTE_READ, &oldProtection);
260 #elif defined(__Fuchsia__)
261 zx_status_t status = zx_vmar_protect(
262 zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE,
263 reinterpret_cast<zx_vaddr_t>(memory), bytes);
264 ASSERT(status != ZX_OK);
265 #else
266 mprotect(memory, bytes, PROT_READ | PROT_EXEC);
267 #endif
268 }
269
deallocateExecutable(void * memory,size_t bytes)270 void deallocateExecutable(void *memory, size_t bytes)
271 {
272 #if defined(_WIN32)
273 unsigned long oldProtection;
274 VirtualProtect(memory, bytes, PAGE_READWRITE, &oldProtection);
275 deallocate(memory);
276 #elif defined(LINUX_ENABLE_NAMED_MMAP)
277 size_t pageSize = memoryPageSize();
278 size_t length = (bytes + pageSize - 1) & ~(pageSize - 1);
279 munmap(memory, length);
280 #elif defined(__Fuchsia__)
281 zx_vmar_unmap(zx_vmar_root_self(), reinterpret_cast<zx_vaddr_t>(memory),
282 bytes);
283 #else
284 mprotect(memory, bytes, PROT_READ | PROT_WRITE);
285 deallocate(memory);
286 #endif
287 }
288 }
289