• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //    http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "ExecutableMemory.hpp"
16 
17 #include "Debug.hpp"
18 
19 #if defined(_WIN32)
20 #	ifndef WIN32_LEAN_AND_MEAN
21 #		define WIN32_LEAN_AND_MEAN
22 #	endif
23 #	include <Windows.h>
24 #	include <intrin.h>
25 #elif defined(__Fuchsia__)
26 #	include <unistd.h>
27 #	include <zircon/process.h>
28 #	include <zircon/syscalls.h>
29 #else
30 #	include <errno.h>
31 #	include <sys/mman.h>
32 #	include <stdlib.h>
33 #	include <unistd.h>
34 #endif
35 
36 #if defined(__ANDROID__) && !defined(ANDROID_HOST_BUILD) && !defined(ANDROID_NDK_BUILD)
37 #	include <sys/prctl.h>
38 #endif
39 
40 #include <memory.h>
41 
42 #undef allocate
43 #undef deallocate
44 
45 #if(defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)) && !defined(__x86__)
46 #	define __x86__
47 #endif
48 
49 #define STRINGIFY(x) #x
50 #define MACRO_STRINGIFY(x) STRINGIFY(x)
51 
52 namespace rr {
53 namespace {
54 
55 struct Allocation
56 {
57 	//	size_t bytes;
58 	unsigned char *block;
59 };
60 
allocateRaw(size_t bytes,size_t alignment)61 void *allocateRaw(size_t bytes, size_t alignment)
62 {
63 	ASSERT((alignment & (alignment - 1)) == 0);  // Power of 2 alignment.
64 
65 #if defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME)
66 	if(alignment < sizeof(void *))
67 	{
68 		return malloc(bytes);
69 	}
70 	else
71 	{
72 		void *allocation;
73 		int result = posix_memalign(&allocation, alignment, bytes);
74 		if(result != 0)
75 		{
76 			errno = result;
77 			allocation = nullptr;
78 		}
79 		return allocation;
80 	}
81 #else
82 	unsigned char *block = new unsigned char[bytes + sizeof(Allocation) + alignment];
83 	unsigned char *aligned = nullptr;
84 
85 	if(block)
86 	{
87 		aligned = (unsigned char *)((uintptr_t)(block + sizeof(Allocation) + alignment - 1) & -(intptr_t)alignment);
88 		Allocation *allocation = (Allocation *)(aligned - sizeof(Allocation));
89 
90 		//	allocation->bytes = bytes;
91 		allocation->block = block;
92 	}
93 
94 	return aligned;
95 #endif
96 }
97 
98 #if defined(_WIN32)
permissionsToProtectMode(int permissions)99 DWORD permissionsToProtectMode(int permissions)
100 {
101 	switch(permissions)
102 	{
103 		case PERMISSION_READ:
104 			return PAGE_READONLY;
105 		case PERMISSION_EXECUTE:
106 			return PAGE_EXECUTE;
107 		case PERMISSION_READ | PERMISSION_WRITE:
108 			return PAGE_READWRITE;
109 		case PERMISSION_READ | PERMISSION_EXECUTE:
110 			return PAGE_EXECUTE_READ;
111 		case PERMISSION_READ | PERMISSION_WRITE | PERMISSION_EXECUTE:
112 			return PAGE_EXECUTE_READWRITE;
113 	}
114 	return PAGE_NOACCESS;
115 }
116 #endif
117 
118 #if !defined(_WIN32) && !defined(__Fuchsia__)
permissionsToMmapProt(int permissions)119 int permissionsToMmapProt(int permissions)
120 {
121 	int result = 0;
122 	if(permissions & PERMISSION_READ)
123 	{
124 		result |= PROT_READ;
125 	}
126 	if(permissions & PERMISSION_WRITE)
127 	{
128 		result |= PROT_WRITE;
129 	}
130 	if(permissions & PERMISSION_EXECUTE)
131 	{
132 		result |= PROT_EXEC;
133 	}
134 	return result;
135 }
136 #endif  // !defined(_WIN32) && !defined(__Fuchsia__)
137 
138 #if defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME)
139 #	if !defined(__ANDROID__) || defined(ANDROID_HOST_BUILD) || defined(ANDROID_NDK_BUILD)
140 // Create a file descriptor for anonymous memory with the given
141 // name. Returns -1 on failure.
142 // TODO: remove once libc wrapper exists.
memfd_create(const char * name,unsigned int flags)143 static int memfd_create(const char *name, unsigned int flags)
144 {
145 #		if __aarch64__
146 #			define __NR_memfd_create 279
147 #		elif __arm__
148 #			define __NR_memfd_create 279
149 #		elif __powerpc64__
150 #			define __NR_memfd_create 360
151 #		elif __i386__
152 #			define __NR_memfd_create 356
153 #		elif __x86_64__
154 #			define __NR_memfd_create 319
155 #		endif /* __NR_memfd_create__ */
156 #		ifdef __NR_memfd_create
157 	// In the event of no system call this returns -1 with errno set
158 	// as ENOSYS.
159 	return syscall(__NR_memfd_create, name, flags);
160 #		else
161 	return -1;
162 #		endif
163 }
164 
165 // Returns a file descriptor for use with an anonymous mmap, if
166 // memfd_create fails, -1 is returned. Note, the mappings should be
167 // MAP_PRIVATE so that underlying pages aren't shared.
anonymousFd()168 int anonymousFd()
169 {
170 	static int fd = memfd_create(MACRO_STRINGIFY(REACTOR_ANONYMOUS_MMAP_NAME), 0);
171 	return fd;
172 }
173 #	else   // __ANDROID__ && !ANDROID_HOST_BUILD && !ANDROID_NDK_BUILD
anonymousFd()174 int anonymousFd()
175 {
176 	return -1;
177 }
178 #	endif  // __ANDROID__ && !ANDROID_HOST_BUILD && !ANDROID_NDK_BUILD
179 
180 // Ensure there is enough space in the "anonymous" fd for length.
ensureAnonFileSize(int anonFd,size_t length)181 void ensureAnonFileSize(int anonFd, size_t length)
182 {
183 	static size_t fileSize = 0;
184 	if(length > fileSize)
185 	{
186 		[[maybe_unused]] int result = ftruncate(anonFd, length);
187 		ASSERT(result == 0);
188 		fileSize = length;
189 	}
190 }
191 #endif  // defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME)
192 
193 #if defined(__Fuchsia__)
permissionsToZxVmOptions(int permissions)194 zx_vm_option_t permissionsToZxVmOptions(int permissions)
195 {
196 	zx_vm_option_t result = 0;
197 	if(permissions & PERMISSION_READ)
198 	{
199 		result |= ZX_VM_PERM_READ;
200 	}
201 	if(permissions & PERMISSION_WRITE)
202 	{
203 		result |= ZX_VM_PERM_WRITE;
204 	}
205 	if(permissions & PERMISSION_EXECUTE)
206 	{
207 		result |= ZX_VM_PERM_EXECUTE;
208 	}
209 	return result;
210 }
211 #endif  // defined(__Fuchsia__)
212 
213 }  // anonymous namespace
214 
memoryPageSize()215 size_t memoryPageSize()
216 {
217 	static int pageSize = [] {
218 #if defined(_WIN32)
219 		SYSTEM_INFO systemInfo;
220 		GetSystemInfo(&systemInfo);
221 		return systemInfo.dwPageSize;
222 #else
223 		return sysconf(_SC_PAGESIZE);
224 #endif
225 	}();
226 
227 	return pageSize;
228 }
229 
allocate(size_t bytes,size_t alignment)230 void *allocate(size_t bytes, size_t alignment)
231 {
232 	void *memory = allocateRaw(bytes, alignment);
233 
234 	if(memory)
235 	{
236 		memset(memory, 0, bytes);
237 	}
238 
239 	return memory;
240 }
241 
deallocate(void * memory)242 void deallocate(void *memory)
243 {
244 #if defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME)
245 	free(memory);
246 #else
247 	if(memory)
248 	{
249 		unsigned char *aligned = (unsigned char *)memory;
250 		Allocation *allocation = (Allocation *)(aligned - sizeof(Allocation));
251 
252 		delete[] allocation->block;
253 	}
254 #endif
255 }
256 
257 // Rounds |x| up to a multiple of |m|, where |m| is a power of 2.
roundUp(uintptr_t x,uintptr_t m)258 inline uintptr_t roundUp(uintptr_t x, uintptr_t m)
259 {
260 	ASSERT(m > 0 && (m & (m - 1)) == 0);  // |m| must be a power of 2.
261 	return (x + m - 1) & ~(m - 1);
262 }
263 
allocateMemoryPages(size_t bytes,int permissions,bool need_exec)264 void *allocateMemoryPages(size_t bytes, int permissions, bool need_exec)
265 {
266 	size_t pageSize = memoryPageSize();
267 	size_t length = roundUp(bytes, pageSize);
268 	void *mapping = nullptr;
269 
270 #if defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME)
271 	int flags = MAP_PRIVATE;
272 
273 	// Try to name the memory region for the executable code,
274 	// to aid profilers.
275 	int anonFd = anonymousFd();
276 	if(anonFd == -1)
277 	{
278 		flags |= MAP_ANONYMOUS;
279 	}
280 	else
281 	{
282 		ensureAnonFileSize(anonFd, length);
283 	}
284 
285 	mapping = mmap(
286 	    nullptr, length, permissionsToMmapProt(permissions), flags, anonFd, 0);
287 
288 	if(mapping == MAP_FAILED)
289 	{
290 		mapping = nullptr;
291 	}
292 #	if defined(__ANDROID__) && !defined(ANDROID_HOST_BUILD) && !defined(ANDROID_NDK_BUILD)
293 	else
294 	{
295 		// On Android, prefer to use a non-standard prctl called
296 		// PR_SET_VMA_ANON_NAME to set the name of a private anonymous
297 		// mapping, as Android restricts EXECUTE permission on
298 		// CoW/shared anonymous mappings with sepolicy neverallows.
299 		prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, mapping, length,
300 		      MACRO_STRINGIFY(REACTOR_ANONYMOUS_MMAP_NAME));
301 	}
302 #	endif  // __ANDROID__ && !ANDROID_HOST_BUILD && !ANDROID_NDK_BUILD
303 #elif defined(__Fuchsia__)
304 	zx_handle_t vmo;
305 	if(zx_vmo_create(length, 0, &vmo) != ZX_OK)
306 	{
307 		return nullptr;
308 	}
309 	if(need_exec &&
310 	   zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo) != ZX_OK)
311 	{
312 		return nullptr;
313 	}
314 	zx_vaddr_t reservation;
315 	zx_status_t status = zx_vmar_map(
316 	    zx_vmar_root_self(), permissionsToZxVmOptions(permissions), 0, vmo,
317 	    0, length, &reservation);
318 	zx_handle_close(vmo);
319 	if(status != ZX_OK)
320 	{
321 		return nullptr;
322 	}
323 
324 	// zx_vmar_map() returns page-aligned address.
325 	ASSERT(roundUp(reservation, pageSize) == reservation);
326 
327 	mapping = reinterpret_cast<void *>(reservation);
328 #elif defined(__APPLE__)
329 	int prot = permissionsToMmapProt(permissions);
330 	int flags = MAP_PRIVATE | MAP_ANONYMOUS;
331 	// On macOS 10.14 and higher, executables that are code signed with the
332 	// "runtime" option cannot execute writable memory by default. They can opt
333 	// into this capability by specifying the "com.apple.security.cs.allow-jit"
334 	// code signing entitlement and allocating the region with the MAP_JIT flag.
335 	mapping = mmap(nullptr, length, prot, flags | MAP_JIT, -1, 0);
336 
337 	if(mapping == MAP_FAILED)
338 	{
339 		// Retry without MAP_JIT (for older macOS versions).
340 		mapping = mmap(nullptr, length, prot, flags, -1, 0);
341 	}
342 
343 	if(mapping == MAP_FAILED)
344 	{
345 		mapping = nullptr;
346 	}
347 #else
348 	mapping = allocate(length, pageSize);
349 	protectMemoryPages(mapping, length, permissions);
350 #endif
351 
352 	return mapping;
353 }
354 
protectMemoryPages(void * memory,size_t bytes,int permissions)355 void protectMemoryPages(void *memory, size_t bytes, int permissions)
356 {
357 	if(bytes == 0)
358 	{
359 		return;
360 	}
361 
362 	bytes = roundUp(bytes, memoryPageSize());
363 
364 #if defined(_WIN32)
365 	unsigned long oldProtection;
366 	BOOL result =
367 	    VirtualProtect(memory, bytes, permissionsToProtectMode(permissions),
368 	                   &oldProtection);
369 	ASSERT(result);
370 #elif defined(__Fuchsia__)
371 	zx_status_t status = zx_vmar_protect(
372 	    zx_vmar_root_self(), permissionsToZxVmOptions(permissions),
373 	    reinterpret_cast<zx_vaddr_t>(memory), bytes);
374 	ASSERT(status == ZX_OK);
375 #else
376 	int result =
377 	    mprotect(memory, bytes, permissionsToMmapProt(permissions));
378 	ASSERT(result == 0);
379 #endif
380 }
381 
deallocateMemoryPages(void * memory,size_t bytes)382 void deallocateMemoryPages(void *memory, size_t bytes)
383 {
384 #if defined(_WIN32)
385 	unsigned long oldProtection;
386 	BOOL result =
387 	    VirtualProtect(memory, bytes, PAGE_READWRITE, &oldProtection);
388 	ASSERT(result);
389 	deallocate(memory);
390 #elif defined(__APPLE__) || (defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME))
391 	size_t pageSize = memoryPageSize();
392 	size_t length = (bytes + pageSize - 1) & ~(pageSize - 1);
393 	int result = munmap(memory, length);
394 	ASSERT(result == 0);
395 #elif defined(__Fuchsia__)
396 	size_t pageSize = memoryPageSize();
397 	size_t length = roundUp(bytes, pageSize);
398 	zx_status_t status = zx_vmar_unmap(
399 	    zx_vmar_root_self(), reinterpret_cast<zx_vaddr_t>(memory), length);
400 	ASSERT(status == ZX_OK);
401 #else
402 	int result = mprotect(memory, bytes, PROT_READ | PROT_WRITE);
403 	ASSERT(result == 0);
404 	deallocate(memory);
405 #endif
406 }
407 
408 }  // namespace rr
409