• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //    http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "ExecutableMemory.hpp"
16 
17 #include "Debug.hpp"
18 
19 #if defined(_WIN32)
20 #	ifndef WIN32_LEAN_AND_MEAN
21 #		define WIN32_LEAN_AND_MEAN
22 #	endif
23 #	include <Windows.h>
24 #	include <intrin.h>
25 #elif defined(__Fuchsia__)
26 #	include <unistd.h>
27 #	include <zircon/process.h>
28 #	include <zircon/syscalls.h>
29 #else
30 #	include <errno.h>
31 #	include <sys/mman.h>
32 #	include <stdlib.h>
33 #	include <unistd.h>
34 #endif
35 
36 #if defined(__ANDROID__) && !defined(ANDROID_HOST_BUILD) && !defined(ANDROID_NDK_BUILD)
37 #	include <sys/prctl.h>
38 #endif
39 
40 #include <memory.h>
41 
42 #undef allocate
43 #undef deallocate
44 
45 #if(defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)) && !defined(__x86__)
46 #	define __x86__
47 #endif
48 
49 #define STRINGIFY(x) #x
50 #define MACRO_STRINGIFY(x) STRINGIFY(x)
51 
52 // A Clang extension to determine compiler features.
53 // We use it to detect Sanitizer builds (e.g. -fsanitize=memory).
54 #ifndef __has_feature
55 #	define __has_feature(x) 0
56 #endif
57 
58 namespace rr {
59 namespace {
60 
61 struct Allocation
62 {
63 	// size_t bytes;
64 	unsigned char *block;
65 };
66 
allocateRaw(size_t bytes,size_t alignment)67 void *allocateRaw(size_t bytes, size_t alignment)
68 {
69 	ASSERT((alignment & (alignment - 1)) == 0);  // Power of 2 alignment.
70 
71 #if defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME)
72 	if(alignment < sizeof(void *))
73 	{
74 		return malloc(bytes);
75 	}
76 	else
77 	{
78 		void *allocation;
79 		int result = posix_memalign(&allocation, alignment, bytes);
80 		if(result != 0)
81 		{
82 			errno = result;
83 			allocation = nullptr;
84 		}
85 		return allocation;
86 	}
87 #else
88 	unsigned char *block = new unsigned char[bytes + sizeof(Allocation) + alignment];
89 	unsigned char *aligned = nullptr;
90 
91 	if(block)
92 	{
93 		aligned = (unsigned char *)((uintptr_t)(block + sizeof(Allocation) + alignment - 1) & -(intptr_t)alignment);
94 		Allocation *allocation = (Allocation *)(aligned - sizeof(Allocation));
95 
96 		// allocation->bytes = bytes;
97 		allocation->block = block;
98 	}
99 
100 	return aligned;
101 #endif
102 }
103 
104 #if defined(_WIN32)
permissionsToProtectMode(int permissions)105 DWORD permissionsToProtectMode(int permissions)
106 {
107 	switch(permissions)
108 	{
109 	case PERMISSION_READ:
110 		return PAGE_READONLY;
111 	case PERMISSION_EXECUTE:
112 		return PAGE_EXECUTE;
113 	case PERMISSION_READ | PERMISSION_WRITE:
114 		return PAGE_READWRITE;
115 	case PERMISSION_READ | PERMISSION_EXECUTE:
116 		return PAGE_EXECUTE_READ;
117 	case PERMISSION_READ | PERMISSION_WRITE | PERMISSION_EXECUTE:
118 		return PAGE_EXECUTE_READWRITE;
119 	}
120 	return PAGE_NOACCESS;
121 }
122 #endif
123 
124 #if !defined(_WIN32) && !defined(__Fuchsia__)
permissionsToMmapProt(int permissions)125 int permissionsToMmapProt(int permissions)
126 {
127 	int result = 0;
128 	if(permissions & PERMISSION_READ)
129 	{
130 		result |= PROT_READ;
131 	}
132 	if(permissions & PERMISSION_WRITE)
133 	{
134 		result |= PROT_WRITE;
135 	}
136 	if(permissions & PERMISSION_EXECUTE)
137 	{
138 		result |= PROT_EXEC;
139 	}
140 	return result;
141 }
142 #endif  // !defined(_WIN32) && !defined(__Fuchsia__)
143 
144 #if defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME)
145 #	if !defined(__ANDROID__) || defined(ANDROID_HOST_BUILD) || defined(ANDROID_NDK_BUILD)
146 // Create a file descriptor for anonymous memory with the given
147 // name. Returns -1 on failure.
148 // TODO: remove once libc wrapper exists.
memfd_create(const char * name,unsigned int flags)149 static int memfd_create(const char *name, unsigned int flags)
150 {
151 #		if __aarch64__
152 #			define __NR_memfd_create 279
153 #		elif __arm__
154 #			define __NR_memfd_create 279
155 #		elif __powerpc64__
156 #			define __NR_memfd_create 360
157 #		elif __i386__
158 #			define __NR_memfd_create 356
159 #		elif __x86_64__
160 #			define __NR_memfd_create 319
161 #		endif /* __NR_memfd_create__ */
162 #		ifdef __NR_memfd_create
163 	// In the event of no system call this returns -1 with errno set
164 	// as ENOSYS.
165 	return syscall(__NR_memfd_create, name, flags);
166 #		else
167 	return -1;
168 #		endif
169 }
170 
171 // Returns a file descriptor for use with an anonymous mmap, if
172 // memfd_create fails, -1 is returned. Note, the mappings should be
173 // MAP_PRIVATE so that underlying pages aren't shared.
anonymousFd()174 int anonymousFd()
175 {
176 	static int fd = memfd_create(MACRO_STRINGIFY(REACTOR_ANONYMOUS_MMAP_NAME), 0);
177 	return fd;
178 }
179 #	else   // __ANDROID__ && !ANDROID_HOST_BUILD && !ANDROID_NDK_BUILD
anonymousFd()180 int anonymousFd()
181 {
182 	return -1;
183 }
184 #	endif  // __ANDROID__ && !ANDROID_HOST_BUILD && !ANDROID_NDK_BUILD
185 
186 // Ensure there is enough space in the "anonymous" fd for length.
ensureAnonFileSize(int anonFd,size_t length)187 void ensureAnonFileSize(int anonFd, size_t length)
188 {
189 	static size_t fileSize = 0;
190 	if(length > fileSize)
191 	{
192 		[[maybe_unused]] int result = ftruncate(anonFd, length);
193 		ASSERT(result == 0);
194 		fileSize = length;
195 	}
196 }
197 #endif  // defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME)
198 
199 #if defined(__Fuchsia__)
permissionsToZxVmOptions(int permissions)200 zx_vm_option_t permissionsToZxVmOptions(int permissions)
201 {
202 	zx_vm_option_t result = 0;
203 	if(permissions & PERMISSION_READ)
204 	{
205 		result |= ZX_VM_PERM_READ;
206 	}
207 	if(permissions & PERMISSION_WRITE)
208 	{
209 		result |= ZX_VM_PERM_WRITE;
210 	}
211 	if(permissions & PERMISSION_EXECUTE)
212 	{
213 		result |= ZX_VM_PERM_EXECUTE;
214 	}
215 	return result;
216 }
217 #endif  // defined(__Fuchsia__)
218 
219 }  // anonymous namespace
220 
memoryPageSize()221 size_t memoryPageSize()
222 {
223 	static int pageSize = [] {
224 #if defined(_WIN32)
225 		SYSTEM_INFO systemInfo;
226 		GetSystemInfo(&systemInfo);
227 		return systemInfo.dwPageSize;
228 #else
229 		return sysconf(_SC_PAGESIZE);
230 #endif
231 	}();
232 
233 	return pageSize;
234 }
235 
allocate(size_t bytes,size_t alignment)236 void *allocate(size_t bytes, size_t alignment)
237 {
238 	void *memory = allocateRaw(bytes, alignment);
239 
240 	// Zero-initialize the memory, for security reasons.
241 	// MemorySanitizer builds skip this so that we can detect when we
242 	// inadvertently rely on this, which would indicate a bug.
243 	if(memory && !__has_feature(memory_sanitizer))
244 	{
245 		memset(memory, 0, bytes);
246 	}
247 
248 	return memory;
249 }
250 
deallocate(void * memory)251 void deallocate(void *memory)
252 {
253 #if defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME)
254 	free(memory);
255 #else
256 	if(memory)
257 	{
258 		unsigned char *aligned = (unsigned char *)memory;
259 		Allocation *allocation = (Allocation *)(aligned - sizeof(Allocation));
260 
261 		delete[] allocation->block;
262 	}
263 #endif
264 }
265 
266 // Rounds |x| up to a multiple of |m|, where |m| is a power of 2.
roundUp(uintptr_t x,uintptr_t m)267 inline uintptr_t roundUp(uintptr_t x, uintptr_t m)
268 {
269 	ASSERT(m > 0 && (m & (m - 1)) == 0);  // |m| must be a power of 2.
270 	return (x + m - 1) & ~(m - 1);
271 }
272 
allocateMemoryPages(size_t bytes,int permissions,bool need_exec)273 void *allocateMemoryPages(size_t bytes, int permissions, bool need_exec)
274 {
275 	size_t pageSize = memoryPageSize();
276 	size_t length = roundUp(bytes, pageSize);
277 	void *mapping = nullptr;
278 
279 #if defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME)
280 	int flags = MAP_PRIVATE;
281 
282 	// Try to name the memory region for the executable code,
283 	// to aid profilers.
284 	int anonFd = anonymousFd();
285 	if(anonFd == -1)
286 	{
287 		flags |= MAP_ANONYMOUS;
288 	}
289 	else
290 	{
291 		ensureAnonFileSize(anonFd, length);
292 	}
293 
294 	mapping = mmap(
295 	    nullptr, length, permissionsToMmapProt(permissions), flags, anonFd, 0);
296 
297 	if(mapping == MAP_FAILED)
298 	{
299 		mapping = nullptr;
300 	}
301 #	if defined(__ANDROID__) && !defined(ANDROID_HOST_BUILD) && !defined(ANDROID_NDK_BUILD)
302 	else
303 	{
304 		// On Android, prefer to use a non-standard prctl called
305 		// PR_SET_VMA_ANON_NAME to set the name of a private anonymous
306 		// mapping, as Android restricts EXECUTE permission on
307 		// CoW/shared anonymous mappings with sepolicy neverallows.
308 		prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, mapping, length,
309 		      MACRO_STRINGIFY(REACTOR_ANONYMOUS_MMAP_NAME));
310 	}
311 #	endif  // __ANDROID__ && !ANDROID_HOST_BUILD && !ANDROID_NDK_BUILD
312 #elif defined(__Fuchsia__)
313 	zx_handle_t vmo;
314 	if(zx_vmo_create(length, 0, &vmo) != ZX_OK)
315 	{
316 		return nullptr;
317 	}
318 	if(need_exec &&
319 	   zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo) != ZX_OK)
320 	{
321 		return nullptr;
322 	}
323 	zx_vaddr_t reservation;
324 	zx_status_t status = zx_vmar_map(
325 	    zx_vmar_root_self(), permissionsToZxVmOptions(permissions), 0, vmo,
326 	    0, length, &reservation);
327 	zx_handle_close(vmo);
328 	if(status != ZX_OK)
329 	{
330 		return nullptr;
331 	}
332 
333 	// zx_vmar_map() returns page-aligned address.
334 	ASSERT(roundUp(reservation, pageSize) == reservation);
335 
336 	mapping = reinterpret_cast<void *>(reservation);
337 #elif defined(__APPLE__)
338 	int prot = permissionsToMmapProt(permissions);
339 	int flags = MAP_PRIVATE | MAP_ANONYMOUS;
340 	// On macOS 10.14 and higher, executables that are code signed with the
341 	// "runtime" option cannot execute writable memory by default. They can opt
342 	// into this capability by specifying the "com.apple.security.cs.allow-jit"
343 	// code signing entitlement and allocating the region with the MAP_JIT flag.
344 	mapping = mmap(nullptr, length, prot, flags | MAP_JIT, -1, 0);
345 
346 	if(mapping == MAP_FAILED)
347 	{
348 		// Retry without MAP_JIT (for older macOS versions).
349 		mapping = mmap(nullptr, length, prot, flags, -1, 0);
350 	}
351 
352 	if(mapping == MAP_FAILED)
353 	{
354 		mapping = nullptr;
355 	}
356 #else
357 	mapping = allocate(length, pageSize);
358 	protectMemoryPages(mapping, length, permissions);
359 #endif
360 
361 	return mapping;
362 }
363 
protectMemoryPages(void * memory,size_t bytes,int permissions)364 void protectMemoryPages(void *memory, size_t bytes, int permissions)
365 {
366 	if(bytes == 0)
367 	{
368 		return;
369 	}
370 
371 	bytes = roundUp(bytes, memoryPageSize());
372 
373 #if defined(_WIN32)
374 	unsigned long oldProtection;
375 	BOOL result =
376 	    VirtualProtect(memory, bytes, permissionsToProtectMode(permissions),
377 	                   &oldProtection);
378 	ASSERT(result);
379 #elif defined(__Fuchsia__)
380 	zx_status_t status = zx_vmar_protect(
381 	    zx_vmar_root_self(), permissionsToZxVmOptions(permissions),
382 	    reinterpret_cast<zx_vaddr_t>(memory), bytes);
383 	ASSERT(status == ZX_OK);
384 #else
385 	int result =
386 	    mprotect(memory, bytes, permissionsToMmapProt(permissions));
387 	ASSERT(result == 0);
388 #endif
389 }
390 
deallocateMemoryPages(void * memory,size_t bytes)391 void deallocateMemoryPages(void *memory, size_t bytes)
392 {
393 #if defined(_WIN32)
394 	unsigned long oldProtection;
395 	BOOL result =
396 	    VirtualProtect(memory, bytes, PAGE_READWRITE, &oldProtection);
397 	ASSERT(result);
398 	deallocate(memory);
399 #elif defined(__APPLE__) || (defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME))
400 	size_t pageSize = memoryPageSize();
401 	size_t length = (bytes + pageSize - 1) & ~(pageSize - 1);
402 	int result = munmap(memory, length);
403 	ASSERT(result == 0);
404 #elif defined(__Fuchsia__)
405 	size_t pageSize = memoryPageSize();
406 	size_t length = roundUp(bytes, pageSize);
407 	zx_status_t status = zx_vmar_unmap(
408 	    zx_vmar_root_self(), reinterpret_cast<zx_vaddr_t>(memory), length);
409 	ASSERT(status == ZX_OK);
410 #else
411 	int result = mprotect(memory, bytes, PROT_READ | PROT_WRITE);
412 	ASSERT(result == 0);
413 	deallocate(memory);
414 #endif
415 }
416 
417 }  // namespace rr
418