• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //    http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "ExecutableMemory.hpp"
16 
17 #include "Debug.hpp"
18 
19 #if defined(_WIN32)
20 #	ifndef WIN32_LEAN_AND_MEAN
21 #		define WIN32_LEAN_AND_MEAN
22 #	endif
23 #	include <windows.h>
24 #	include <intrin.h>
25 #elif defined(__Fuchsia__)
26 #	include <unistd.h>
27 #	include <zircon/process.h>
28 #	include <zircon/syscalls.h>
29 #else
30 #	include <errno.h>
31 #	include <sys/mman.h>
32 #	include <stdlib.h>
33 #	include <unistd.h>
34 #endif
35 
36 #include <memory.h>
37 
38 #undef allocate
39 #undef deallocate
40 
41 #if(defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)) && !defined(__x86__)
42 #	define __x86__
43 #endif
44 
45 namespace rr {
46 namespace {
47 
48 struct Allocation
49 {
50 	//	size_t bytes;
51 	unsigned char *block;
52 };
53 
allocateRaw(size_t bytes,size_t alignment)54 void *allocateRaw(size_t bytes, size_t alignment)
55 {
56 	ASSERT((alignment & (alignment - 1)) == 0);  // Power of 2 alignment.
57 
58 #if defined(LINUX_ENABLE_NAMED_MMAP)
59 	if(alignment < sizeof(void *))
60 	{
61 		return malloc(bytes);
62 	}
63 	else
64 	{
65 		void *allocation;
66 		int result = posix_memalign(&allocation, alignment, bytes);
67 		if(result != 0)
68 		{
69 			errno = result;
70 			allocation = nullptr;
71 		}
72 		return allocation;
73 	}
74 #else
75 	unsigned char *block = new unsigned char[bytes + sizeof(Allocation) + alignment];
76 	unsigned char *aligned = nullptr;
77 
78 	if(block)
79 	{
80 		aligned = (unsigned char *)((uintptr_t)(block + sizeof(Allocation) + alignment - 1) & -(intptr_t)alignment);
81 		Allocation *allocation = (Allocation *)(aligned - sizeof(Allocation));
82 
83 		//	allocation->bytes = bytes;
84 		allocation->block = block;
85 	}
86 
87 	return aligned;
88 #endif
89 }
90 
91 #if defined(_WIN32)
permissionsToProtectMode(int permissions)92 DWORD permissionsToProtectMode(int permissions)
93 {
94 	switch(permissions)
95 	{
96 		case PERMISSION_READ:
97 			return PAGE_READONLY;
98 		case PERMISSION_EXECUTE:
99 			return PAGE_EXECUTE;
100 		case PERMISSION_READ | PERMISSION_WRITE:
101 			return PAGE_READWRITE;
102 		case PERMISSION_READ | PERMISSION_EXECUTE:
103 			return PAGE_EXECUTE_READ;
104 		case PERMISSION_READ | PERMISSION_WRITE | PERMISSION_EXECUTE:
105 			return PAGE_EXECUTE_READWRITE;
106 	}
107 	return PAGE_NOACCESS;
108 }
109 #endif
110 
111 #if !defined(_WIN32) && !defined(__Fuchsia__)
permissionsToMmapProt(int permissions)112 int permissionsToMmapProt(int permissions)
113 {
114 	int result = 0;
115 	if(permissions & PERMISSION_READ)
116 	{
117 		result |= PROT_READ;
118 	}
119 	if(permissions & PERMISSION_WRITE)
120 	{
121 		result |= PROT_WRITE;
122 	}
123 	if(permissions & PERMISSION_EXECUTE)
124 	{
125 		result |= PROT_EXEC;
126 	}
127 	return result;
128 }
129 #endif  // !defined(_WIN32) && !defined(__Fuchsia__)
130 
131 #if defined(LINUX_ENABLE_NAMED_MMAP)
132 // Create a file descriptor for anonymous memory with the given
133 // name. Returns -1 on failure.
134 // TODO: remove once libc wrapper exists.
memfd_create(const char * name,unsigned int flags)135 int memfd_create(const char *name, unsigned int flags)
136 {
137 #	if __aarch64__
138 #		define __NR_memfd_create 279
139 #	elif __arm__
140 #		define __NR_memfd_create 279
141 #	elif __powerpc64__
142 #		define __NR_memfd_create 360
143 #	elif __i386__
144 #		define __NR_memfd_create 356
145 #	elif __x86_64__
146 #		define __NR_memfd_create 319
147 #	endif /* __NR_memfd_create__ */
148 #	ifdef __NR_memfd_create
149 	// In the event of no system call this returns -1 with errno set
150 	// as ENOSYS.
151 	return syscall(__NR_memfd_create, name, flags);
152 #	else
153 	return -1;
154 #	endif
155 }
156 
157 // Returns a file descriptor for use with an anonymous mmap, if
158 // memfd_create fails, -1 is returned. Note, the mappings should be
159 // MAP_PRIVATE so that underlying pages aren't shared.
anonymousFd()160 int anonymousFd()
161 {
162 	static int fd = memfd_create("SwiftShader JIT", 0);
163 	return fd;
164 }
165 
166 // Ensure there is enough space in the "anonymous" fd for length.
ensureAnonFileSize(int anonFd,size_t length)167 void ensureAnonFileSize(int anonFd, size_t length)
168 {
169 	static size_t fileSize = 0;
170 	if(length > fileSize)
171 	{
172 		ftruncate(anonFd, length);
173 		fileSize = length;
174 	}
175 }
176 #endif  // defined(LINUX_ENABLE_NAMED_MMAP)
177 
178 #if defined(__Fuchsia__)
permissionsToZxVmOptions(int permissions)179 zx_vm_option_t permissionsToZxVmOptions(int permissions)
180 {
181 	zx_vm_option_t result = 0;
182 	if(permissions & PERMISSION_READ)
183 	{
184 		result |= ZX_VM_PERM_READ;
185 	}
186 	if(permissions & PERMISSION_WRITE)
187 	{
188 		result |= ZX_VM_PERM_WRITE;
189 	}
190 	if(permissions & PERMISSION_EXECUTE)
191 	{
192 		result |= ZX_VM_PERM_EXECUTE;
193 	}
194 	return result;
195 }
196 #endif  // defined(__Fuchsia__)
197 
198 }  // anonymous namespace
199 
memoryPageSize()200 size_t memoryPageSize()
201 {
202 	static int pageSize = 0;
203 
204 	if(pageSize == 0)
205 	{
206 #if defined(_WIN32)
207 		SYSTEM_INFO systemInfo;
208 		GetSystemInfo(&systemInfo);
209 		pageSize = systemInfo.dwPageSize;
210 #else
211 		pageSize = sysconf(_SC_PAGESIZE);
212 #endif
213 	}
214 
215 	return pageSize;
216 }
217 
allocate(size_t bytes,size_t alignment)218 void *allocate(size_t bytes, size_t alignment)
219 {
220 	void *memory = allocateRaw(bytes, alignment);
221 
222 	if(memory)
223 	{
224 		memset(memory, 0, bytes);
225 	}
226 
227 	return memory;
228 }
229 
deallocate(void * memory)230 void deallocate(void *memory)
231 {
232 #if defined(LINUX_ENABLE_NAMED_MMAP)
233 	free(memory);
234 #else
235 	if(memory)
236 	{
237 		unsigned char *aligned = (unsigned char *)memory;
238 		Allocation *allocation = (Allocation *)(aligned - sizeof(Allocation));
239 
240 		delete[] allocation->block;
241 	}
242 #endif
243 }
244 
245 // Rounds |x| up to a multiple of |m|, where |m| is a power of 2.
roundUp(uintptr_t x,uintptr_t m)246 inline uintptr_t roundUp(uintptr_t x, uintptr_t m)
247 {
248 	ASSERT(m > 0 && (m & (m - 1)) == 0);  // |m| must be a power of 2.
249 	return (x + m - 1) & ~(m - 1);
250 }
251 
allocateMemoryPages(size_t bytes,int permissions,bool need_exec)252 void *allocateMemoryPages(size_t bytes, int permissions, bool need_exec)
253 {
254 	size_t pageSize = memoryPageSize();
255 	size_t length = roundUp(bytes, pageSize);
256 	void *mapping = nullptr;
257 
258 #if defined(LINUX_ENABLE_NAMED_MMAP)
259 	int flags = MAP_PRIVATE;
260 
261 	// Try to name the memory region for the executable code,
262 	// to aid profilers.
263 	int anonFd = anonymousFd();
264 	if(anonFd == -1)
265 	{
266 		flags |= MAP_ANONYMOUS;
267 	}
268 	else
269 	{
270 		ensureAnonFileSize(anonFd, length);
271 	}
272 
273 	mapping = mmap(
274 	    nullptr, length, permissionsToMmapProt(permissions), flags, anonFd, 0);
275 
276 	if(mapping == MAP_FAILED)
277 	{
278 		mapping = nullptr;
279 	}
280 #elif defined(__Fuchsia__)
281 	zx_handle_t vmo;
282 	if(zx_vmo_create(length, 0, &vmo) != ZX_OK)
283 	{
284 		return nullptr;
285 	}
286 	if(need_exec &&
287 	   zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo) != ZX_OK)
288 	{
289 		return nullptr;
290 	}
291 	zx_vaddr_t reservation;
292 	zx_status_t status = zx_vmar_map(
293 	    zx_vmar_root_self(), permissionsToZxVmOptions(permissions), 0, vmo,
294 	    0, length, &reservation);
295 	zx_handle_close(vmo);
296 	if(status != ZX_OK)
297 	{
298 		return nullptr;
299 	}
300 
301 	// zx_vmar_map() returns page-aligned address.
302 	ASSERT(roundUp(reservation, pageSize) == reservation);
303 
304 	mapping = reinterpret_cast<void *>(reservation);
305 #elif defined(__APPLE__)
306 	int prot = permissionsToMmapProt(permissions);
307 	int flags = MAP_PRIVATE | MAP_ANONYMOUS;
308 	// On macOS 10.14 and higher, executables that are code signed with the
309 	// "runtime" option cannot execute writable memory by default. They can opt
310 	// into this capability by specifying the "com.apple.security.cs.allow-jit"
311 	// code signing entitlement and allocating the region with the MAP_JIT flag.
312 	mapping = mmap(nullptr, length, prot, flags | MAP_JIT, -1, 0);
313 
314 	if(mapping == MAP_FAILED)
315 	{
316 		// Retry without MAP_JIT (for older macOS versions).
317 		mapping = mmap(nullptr, length, prot, flags, -1, 0);
318 	}
319 
320 	if(mapping == MAP_FAILED)
321 	{
322 		mapping = nullptr;
323 	}
324 #else
325 	mapping = allocate(length, pageSize);
326 	protectMemoryPages(mapping, length, permissions);
327 #endif
328 
329 	return mapping;
330 }
331 
protectMemoryPages(void * memory,size_t bytes,int permissions)332 void protectMemoryPages(void *memory, size_t bytes, int permissions)
333 {
334 	if(bytes == 0)
335 		return;
336 	bytes = roundUp(bytes, memoryPageSize());
337 
338 #if defined(_WIN32)
339 	unsigned long oldProtection;
340 	BOOL result =
341 	    VirtualProtect(memory, bytes, permissionsToProtectMode(permissions),
342 	                   &oldProtection);
343 	ASSERT(result);
344 #elif defined(__Fuchsia__)
345 	zx_status_t status = zx_vmar_protect(
346 	    zx_vmar_root_self(), permissionsToZxVmOptions(permissions),
347 	    reinterpret_cast<zx_vaddr_t>(memory), bytes);
348 	ASSERT(status == ZX_OK);
349 #else
350 	int result =
351 	    mprotect(memory, bytes, permissionsToMmapProt(permissions));
352 	ASSERT(result == 0);
353 #endif
354 }
355 
deallocateMemoryPages(void * memory,size_t bytes)356 void deallocateMemoryPages(void *memory, size_t bytes)
357 {
358 #if defined(_WIN32)
359 	unsigned long oldProtection;
360 	BOOL result =
361 	    VirtualProtect(memory, bytes, PAGE_READWRITE, &oldProtection);
362 	ASSERT(result);
363 	deallocate(memory);
364 #elif defined(LINUX_ENABLE_NAMED_MMAP) || defined(__APPLE__)
365 	size_t pageSize = memoryPageSize();
366 	size_t length = (bytes + pageSize - 1) & ~(pageSize - 1);
367 	int result = munmap(memory, length);
368 	ASSERT(result == 0);
369 #elif defined(__Fuchsia__)
370 	size_t pageSize = memoryPageSize();
371 	size_t length = roundUp(bytes, pageSize);
372 	zx_status_t status = zx_vmar_unmap(
373 	    zx_vmar_root_self(), reinterpret_cast<zx_vaddr_t>(memory), length);
374 	ASSERT(status == ZX_OK);
375 #else
376 	int result = mprotect(memory, bytes, PROT_READ | PROT_WRITE);
377 	ASSERT(result == 0);
378 	deallocate(memory);
379 #endif
380 }
381 
382 }  // namespace rr
383