• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* -----------------------------------------------------------------------
2    closures.c - Copyright (c) 2019 Anthony Green
3                 Copyright (c) 2007, 2009, 2010 Red Hat, Inc.
4                 Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc
5                 Copyright (c) 2011 Plausible Labs Cooperative, Inc.
6 
7    Code to allocate and deallocate memory for closures.
8 
9    Permission is hereby granted, free of charge, to any person obtaining
10    a copy of this software and associated documentation files (the
11    ``Software''), to deal in the Software without restriction, including
12    without limitation the rights to use, copy, modify, merge, publish,
13    distribute, sublicense, and/or sell copies of the Software, and to
14    permit persons to whom the Software is furnished to do so, subject to
15    the following conditions:
16 
17    The above copyright notice and this permission notice shall be included
18    in all copies or substantial portions of the Software.
19 
20    THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
21    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22    MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23    NONINFRINGEMENT.  IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
24    HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
25    WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26    OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
27    DEALINGS IN THE SOFTWARE.
28    ----------------------------------------------------------------------- */
29 
30 #if defined __linux__ && !defined _GNU_SOURCE
31 #define _GNU_SOURCE 1
32 #endif
33 
34 #include <fficonfig.h>
35 #include <ffi.h>
36 #include <ffi_common.h>
37 
38 #ifdef __NetBSD__
39 #include <sys/param.h>
40 #endif
41 
42 #if __NetBSD_Version__ - 0 >= 799007200
43 /* NetBSD with PROT_MPROTECT */
44 #include <sys/mman.h>
45 
46 #include <stddef.h>
47 #include <unistd.h>
48 
49 static const size_t overhead =
50   (sizeof(max_align_t) > sizeof(void *) + sizeof(size_t)) ?
51     sizeof(max_align_t)
52     : sizeof(void *) + sizeof(size_t);
53 
54 #define ADD_TO_POINTER(p, d) ((void *)((uintptr_t)(p) + (d)))
55 
56 void *
ffi_closure_alloc(size_t size,void ** code)57 ffi_closure_alloc (size_t size, void **code)
58 {
59   static size_t page_size;
60   size_t rounded_size;
61   void *codeseg, *dataseg;
62   int prot;
63 
64   /* Expect that PAX mprotect is active and a separate code mapping is necessary. */
65   if (!code)
66     return NULL;
67 
68   /* Obtain system page size. */
69   if (!page_size)
70     page_size = sysconf(_SC_PAGESIZE);
71 
72   /* Round allocation size up to the next page, keeping in mind the size field and pointer to code map. */
73   rounded_size = (size + overhead + page_size - 1) & ~(page_size - 1);
74 
75   /* Primary mapping is RW, but request permission to switch to PROT_EXEC later. */
76   prot = PROT_READ | PROT_WRITE | PROT_MPROTECT(PROT_EXEC);
77   dataseg = mmap(NULL, rounded_size, prot, MAP_ANON | MAP_PRIVATE, -1, 0);
78   if (dataseg == MAP_FAILED)
79     return NULL;
80 
81   /* Create secondary mapping and switch it to RX. */
82   codeseg = mremap(dataseg, rounded_size, NULL, rounded_size, MAP_REMAPDUP);
83   if (codeseg == MAP_FAILED) {
84     munmap(dataseg, rounded_size);
85     return NULL;
86   }
87   if (mprotect(codeseg, rounded_size, PROT_READ | PROT_EXEC) == -1) {
88     munmap(codeseg, rounded_size);
89     munmap(dataseg, rounded_size);
90     return NULL;
91   }
92 
93   /* Remember allocation size and location of the secondary mapping for ffi_closure_free. */
94   memcpy(dataseg, &rounded_size, sizeof(rounded_size));
95   memcpy(ADD_TO_POINTER(dataseg, sizeof(size_t)), &codeseg, sizeof(void *));
96   *code = ADD_TO_POINTER(codeseg, overhead);
97   return ADD_TO_POINTER(dataseg, overhead);
98 }
99 
100 void
ffi_closure_free(void * ptr)101 ffi_closure_free (void *ptr)
102 {
103   void *codeseg, *dataseg;
104   size_t rounded_size;
105 
106   dataseg = ADD_TO_POINTER(ptr, -overhead);
107   memcpy(&rounded_size, dataseg, sizeof(rounded_size));
108   memcpy(&codeseg, ADD_TO_POINTER(dataseg, sizeof(size_t)), sizeof(void *));
109   munmap(dataseg, rounded_size);
110   munmap(codeseg, rounded_size);
111 }
112 #else /* !NetBSD with PROT_MPROTECT */
113 
114 #if !FFI_MMAP_EXEC_WRIT && !FFI_EXEC_TRAMPOLINE_TABLE
115 # if __linux__ && !defined(__ANDROID__)
116 /* This macro indicates it may be forbidden to map anonymous memory
117    with both write and execute permission.  Code compiled when this
118    option is defined will attempt to map such pages once, but if it
119    fails, it falls back to creating a temporary file in a writable and
120    executable filesystem and mapping pages from it into separate
121    locations in the virtual memory space, one location writable and
122    another executable.  */
123 #  define FFI_MMAP_EXEC_WRIT 1
124 #  define HAVE_MNTENT 1
125 # endif
126 # if defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__)
127 /* Windows systems may have Data Execution Protection (DEP) enabled,
128    which requires the use of VirtualMalloc/VirtualFree to alloc/free
129    executable memory. */
130 #  define FFI_MMAP_EXEC_WRIT 1
131 # endif
132 #endif
133 
134 #if FFI_MMAP_EXEC_WRIT && !defined FFI_MMAP_EXEC_SELINUX
135 # if defined(__linux__) && !defined(__ANDROID__)
136 /* When defined to 1 check for SELinux and if SELinux is active,
137    don't attempt PROT_EXEC|PROT_WRITE mapping at all, as that
138    might cause audit messages.  */
139 #  define FFI_MMAP_EXEC_SELINUX 1
140 # endif
141 #endif
142 
143 #if FFI_CLOSURES
144 
145 #if FFI_EXEC_TRAMPOLINE_TABLE
146 
147 #ifdef __MACH__
148 
149 #include <mach/mach.h>
150 #include <pthread.h>
151 #include <stdio.h>
152 #include <stdlib.h>
153 
154 extern void *ffi_closure_trampoline_table_page;
155 
156 typedef struct ffi_trampoline_table ffi_trampoline_table;
157 typedef struct ffi_trampoline_table_entry ffi_trampoline_table_entry;
158 
159 struct ffi_trampoline_table
160 {
161   /* contiguous writable and executable pages */
162   vm_address_t config_page;
163   vm_address_t trampoline_page;
164 
165   /* free list tracking */
166   uint16_t free_count;
167   ffi_trampoline_table_entry *free_list;
168   ffi_trampoline_table_entry *free_list_pool;
169 
170   ffi_trampoline_table *prev;
171   ffi_trampoline_table *next;
172 };
173 
174 struct ffi_trampoline_table_entry
175 {
176   void *(*trampoline) (void);
177   ffi_trampoline_table_entry *next;
178 };
179 
180 /* Total number of trampolines that fit in one trampoline table */
181 #define FFI_TRAMPOLINE_COUNT (PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE)
182 
183 static pthread_mutex_t ffi_trampoline_lock = PTHREAD_MUTEX_INITIALIZER;
184 static ffi_trampoline_table *ffi_trampoline_tables = NULL;
185 
186 static ffi_trampoline_table *
ffi_trampoline_table_alloc(void)187 ffi_trampoline_table_alloc (void)
188 {
189   ffi_trampoline_table *table;
190   vm_address_t config_page;
191   vm_address_t trampoline_page;
192   vm_address_t trampoline_page_template;
193   vm_prot_t cur_prot;
194   vm_prot_t max_prot;
195   kern_return_t kt;
196   uint16_t i;
197 
198   /* Allocate two pages -- a config page and a placeholder page */
199   config_page = 0x0;
200   kt = vm_allocate (mach_task_self (), &config_page, PAGE_MAX_SIZE * 2,
201 		    VM_FLAGS_ANYWHERE);
202   if (kt != KERN_SUCCESS)
203     return NULL;
204 
205   /* Remap the trampoline table on top of the placeholder page */
206   trampoline_page = config_page + PAGE_MAX_SIZE;
207   trampoline_page_template = (vm_address_t)&ffi_closure_trampoline_table_page;
208 #ifdef __arm__
209   /* ffi_closure_trampoline_table_page can be thumb-biased on some ARM archs */
210   trampoline_page_template &= ~1UL;
211 #endif
212   kt = vm_remap (mach_task_self (), &trampoline_page, PAGE_MAX_SIZE, 0x0,
213 		 VM_FLAGS_OVERWRITE, mach_task_self (), trampoline_page_template,
214 		 FALSE, &cur_prot, &max_prot, VM_INHERIT_SHARE);
215   if (kt != KERN_SUCCESS)
216     {
217       vm_deallocate (mach_task_self (), config_page, PAGE_MAX_SIZE * 2);
218       return NULL;
219     }
220 
221   /* We have valid trampoline and config pages */
222   table = calloc (1, sizeof (ffi_trampoline_table));
223   table->free_count = FFI_TRAMPOLINE_COUNT;
224   table->config_page = config_page;
225   table->trampoline_page = trampoline_page;
226 
227   /* Create and initialize the free list */
228   table->free_list_pool =
229     calloc (FFI_TRAMPOLINE_COUNT, sizeof (ffi_trampoline_table_entry));
230 
231   for (i = 0; i < table->free_count; i++)
232     {
233       ffi_trampoline_table_entry *entry = &table->free_list_pool[i];
234       entry->trampoline =
235 	(void *) (table->trampoline_page + (i * FFI_TRAMPOLINE_SIZE));
236 
237       if (i < table->free_count - 1)
238 	entry->next = &table->free_list_pool[i + 1];
239     }
240 
241   table->free_list = table->free_list_pool;
242 
243   return table;
244 }
245 
246 static void
ffi_trampoline_table_free(ffi_trampoline_table * table)247 ffi_trampoline_table_free (ffi_trampoline_table *table)
248 {
249   /* Remove from the list */
250   if (table->prev != NULL)
251     table->prev->next = table->next;
252 
253   if (table->next != NULL)
254     table->next->prev = table->prev;
255 
256   /* Deallocate pages */
257   vm_deallocate (mach_task_self (), table->config_page, PAGE_MAX_SIZE * 2);
258 
259   /* Deallocate free list */
260   free (table->free_list_pool);
261   free (table);
262 }
263 
264 void *
ffi_closure_alloc(size_t size,void ** code)265 ffi_closure_alloc (size_t size, void **code)
266 {
267   /* Create the closure */
268   ffi_closure *closure = malloc (size);
269   if (closure == NULL)
270     return NULL;
271 
272   pthread_mutex_lock (&ffi_trampoline_lock);
273 
274   /* Check for an active trampoline table with available entries. */
275   ffi_trampoline_table *table = ffi_trampoline_tables;
276   if (table == NULL || table->free_list == NULL)
277     {
278       table = ffi_trampoline_table_alloc ();
279       if (table == NULL)
280 	{
281 	  pthread_mutex_unlock (&ffi_trampoline_lock);
282 	  free (closure);
283 	  return NULL;
284 	}
285 
286       /* Insert the new table at the top of the list */
287       table->next = ffi_trampoline_tables;
288       if (table->next != NULL)
289 	table->next->prev = table;
290 
291       ffi_trampoline_tables = table;
292     }
293 
294   /* Claim the free entry */
295   ffi_trampoline_table_entry *entry = ffi_trampoline_tables->free_list;
296   ffi_trampoline_tables->free_list = entry->next;
297   ffi_trampoline_tables->free_count--;
298   entry->next = NULL;
299 
300   pthread_mutex_unlock (&ffi_trampoline_lock);
301 
302   /* Initialize the return values */
303   *code = entry->trampoline;
304   closure->trampoline_table = table;
305   closure->trampoline_table_entry = entry;
306 
307   return closure;
308 }
309 
310 void
ffi_closure_free(void * ptr)311 ffi_closure_free (void *ptr)
312 {
313   ffi_closure *closure = ptr;
314 
315   pthread_mutex_lock (&ffi_trampoline_lock);
316 
317   /* Fetch the table and entry references */
318   ffi_trampoline_table *table = closure->trampoline_table;
319   ffi_trampoline_table_entry *entry = closure->trampoline_table_entry;
320 
321   /* Return the entry to the free list */
322   entry->next = table->free_list;
323   table->free_list = entry;
324   table->free_count++;
325 
326   /* If all trampolines within this table are free, and at least one other table exists, deallocate
327    * the table */
328   if (table->free_count == FFI_TRAMPOLINE_COUNT
329       && ffi_trampoline_tables != table)
330     {
331       ffi_trampoline_table_free (table);
332     }
333   else if (ffi_trampoline_tables != table)
334     {
335       /* Otherwise, bump this table to the top of the list */
336       table->prev = NULL;
337       table->next = ffi_trampoline_tables;
338       if (ffi_trampoline_tables != NULL)
339 	ffi_trampoline_tables->prev = table;
340 
341       ffi_trampoline_tables = table;
342     }
343 
344   pthread_mutex_unlock (&ffi_trampoline_lock);
345 
346   /* Free the closure */
347   free (closure);
348 }
349 
350 #endif
351 
352 // Per-target implementation; It's unclear what can reasonable be shared between two OS/architecture implementations.
353 
354 #elif FFI_MMAP_EXEC_WRIT /* !FFI_EXEC_TRAMPOLINE_TABLE */
355 
356 #define USE_LOCKS 1
357 #define USE_DL_PREFIX 1
358 #ifdef __GNUC__
359 #ifndef USE_BUILTIN_FFS
360 #define USE_BUILTIN_FFS 1
361 #endif
362 #endif
363 
364 /* We need to use mmap, not sbrk.  */
365 #define HAVE_MORECORE 0
366 
367 /* We could, in theory, support mremap, but it wouldn't buy us anything.  */
368 #define HAVE_MREMAP 0
369 
370 /* We have no use for this, so save some code and data.  */
371 #define NO_MALLINFO 1
372 
373 /* We need all allocations to be in regular segments, otherwise we
374    lose track of the corresponding code address.  */
375 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
376 
377 /* Don't allocate more than a page unless needed.  */
378 #define DEFAULT_GRANULARITY ((size_t)malloc_getpagesize)
379 
380 #include <sys/types.h>
381 #include <sys/stat.h>
382 #include <fcntl.h>
383 #include <errno.h>
384 #ifndef _MSC_VER
385 #include <unistd.h>
386 #endif
387 #include <string.h>
388 #include <stdio.h>
389 #if !defined(X86_WIN32) && !defined(X86_WIN64) && !defined(_M_ARM64)
390 #ifdef HAVE_MNTENT
391 #include <mntent.h>
392 #endif /* HAVE_MNTENT */
393 #include <sys/param.h>
394 #include <pthread.h>
395 
396 /* We don't want sys/mman.h to be included after we redefine mmap and
397    dlmunmap.  */
398 #include <sys/mman.h>
399 #define LACKS_SYS_MMAN_H 1
400 
401 #if FFI_MMAP_EXEC_SELINUX
402 #include <sys/statfs.h>
403 #include <stdlib.h>
404 
405 static int selinux_enabled = -1;
406 
407 static int
selinux_enabled_check(void)408 selinux_enabled_check (void)
409 {
410   struct statfs sfs;
411   FILE *f;
412   char *buf = NULL;
413   size_t len = 0;
414 
415   if (statfs ("/selinux", &sfs) >= 0
416       && (unsigned int) sfs.f_type == 0xf97cff8cU)
417     return 1;
418   f = fopen ("/proc/mounts", "r");
419   if (f == NULL)
420     return 0;
421   while (getline (&buf, &len, f) >= 0)
422     {
423       char *p = strchr (buf, ' ');
424       if (p == NULL)
425         break;
426       p = strchr (p + 1, ' ');
427       if (p == NULL)
428         break;
429       if (strncmp (p + 1, "selinuxfs ", 10) == 0)
430         {
431           free (buf);
432           fclose (f);
433           return 1;
434         }
435     }
436   free (buf);
437   fclose (f);
438   return 0;
439 }
440 
441 #define is_selinux_enabled() (selinux_enabled >= 0 ? selinux_enabled \
442 			      : (selinux_enabled = selinux_enabled_check ()))
443 
444 #else
445 
446 #define is_selinux_enabled() 0
447 
448 #endif /* !FFI_MMAP_EXEC_SELINUX */
449 
450 /* On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. */
451 #ifdef FFI_MMAP_EXEC_EMUTRAMP_PAX
452 #include <stdlib.h>
453 
454 static int emutramp_enabled = -1;
455 
456 static int
emutramp_enabled_check(void)457 emutramp_enabled_check (void)
458 {
459   char *buf = NULL;
460   size_t len = 0;
461   FILE *f;
462   int ret;
463   f = fopen ("/proc/self/status", "r");
464   if (f == NULL)
465     return 0;
466   ret = 0;
467 
468   while (getline (&buf, &len, f) != -1)
469     if (!strncmp (buf, "PaX:", 4))
470       {
471         char emutramp;
472         if (sscanf (buf, "%*s %*c%c", &emutramp) == 1)
473           ret = (emutramp == 'E');
474         break;
475       }
476   free (buf);
477   fclose (f);
478   return ret;
479 }
480 
481 #define is_emutramp_enabled() (emutramp_enabled >= 0 ? emutramp_enabled \
482                                : (emutramp_enabled = emutramp_enabled_check ()))
483 #endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
484 
485 #elif defined (__CYGWIN__) || defined(__INTERIX)
486 
487 #include <sys/mman.h>
488 
489 /* Cygwin is Linux-like, but not quite that Linux-like.  */
490 #define is_selinux_enabled() 0
491 
492 #endif /* !defined(X86_WIN32) && !defined(X86_WIN64) */
493 
494 #ifndef FFI_MMAP_EXEC_EMUTRAMP_PAX
495 #define is_emutramp_enabled() 0
496 #endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
497 
498 /* Declare all functions defined in dlmalloc.c as static.  */
499 static void *dlmalloc(size_t);
500 static void dlfree(void*);
501 static void *dlcalloc(size_t, size_t) MAYBE_UNUSED;
502 static void *dlrealloc(void *, size_t) MAYBE_UNUSED;
503 static void *dlmemalign(size_t, size_t) MAYBE_UNUSED;
504 static void *dlvalloc(size_t) MAYBE_UNUSED;
505 static int dlmallopt(int, int) MAYBE_UNUSED;
506 static size_t dlmalloc_footprint(void) MAYBE_UNUSED;
507 static size_t dlmalloc_max_footprint(void) MAYBE_UNUSED;
508 static void** dlindependent_calloc(size_t, size_t, void**) MAYBE_UNUSED;
509 static void** dlindependent_comalloc(size_t, size_t*, void**) MAYBE_UNUSED;
510 static void *dlpvalloc(size_t) MAYBE_UNUSED;
511 static int dlmalloc_trim(size_t) MAYBE_UNUSED;
512 static size_t dlmalloc_usable_size(void*) MAYBE_UNUSED;
513 static void dlmalloc_stats(void) MAYBE_UNUSED;
514 
515 #if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
516 /* Use these for mmap and munmap within dlmalloc.c.  */
517 static void *dlmmap(void *, size_t, int, int, int, off_t);
518 static int dlmunmap(void *, size_t);
519 #endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
520 
521 #define mmap dlmmap
522 #define munmap dlmunmap
523 
524 #include "dlmalloc.c"
525 
526 #undef mmap
527 #undef munmap
528 
529 #if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
530 
531 /* A mutex used to synchronize access to *exec* variables in this file.  */
532 static pthread_mutex_t open_temp_exec_file_mutex = PTHREAD_MUTEX_INITIALIZER;
533 
534 /* A file descriptor of a temporary file from which we'll map
535    executable pages.  */
536 static int execfd = -1;
537 
538 /* The amount of space already allocated from the temporary file.  */
539 static size_t execsize = 0;
540 
541 /* Open a temporary file name, and immediately unlink it.  */
542 static int
open_temp_exec_file_name(char * name,int flags)543 open_temp_exec_file_name (char *name, int flags)
544 {
545   int fd;
546 
547 #ifdef HAVE_MKOSTEMP
548   fd = mkostemp (name, flags);
549 #else
550   fd = mkstemp (name);
551 #endif
552 
553   if (fd != -1)
554     unlink (name);
555 
556   return fd;
557 }
558 
559 /* Open a temporary file in the named directory.  */
560 static int
open_temp_exec_file_dir(const char * dir)561 open_temp_exec_file_dir (const char *dir)
562 {
563   static const char suffix[] = "/ffiXXXXXX";
564   int lendir, flags;
565   char *tempname;
566 #ifdef O_TMPFILE
567   int fd;
568 #endif
569 
570 #ifdef O_CLOEXEC
571   flags = O_CLOEXEC;
572 #else
573   flags = 0;
574 #endif
575 
576 #ifdef O_TMPFILE
577   fd = open (dir, flags | O_RDWR | O_EXCL | O_TMPFILE, 0700);
578   /* If the running system does not support the O_TMPFILE flag then retry without it. */
579   if (fd != -1 || (errno != EINVAL && errno != EISDIR && errno != EOPNOTSUPP)) {
580     return fd;
581   } else {
582     errno = 0;
583   }
584 #endif
585 
586   lendir = (int) strlen (dir);
587   tempname = __builtin_alloca (lendir + sizeof (suffix));
588 
589   if (!tempname)
590     return -1;
591 
592   memcpy (tempname, dir, lendir);
593   memcpy (tempname + lendir, suffix, sizeof (suffix));
594 
595   return open_temp_exec_file_name (tempname, flags);
596 }
597 
598 /* Open a temporary file in the directory in the named environment
599    variable.  */
600 static int
open_temp_exec_file_env(const char * envvar)601 open_temp_exec_file_env (const char *envvar)
602 {
603   const char *value = getenv (envvar);
604 
605   if (!value)
606     return -1;
607 
608   return open_temp_exec_file_dir (value);
609 }
610 
611 #ifdef HAVE_MNTENT
612 /* Open a temporary file in an executable and writable mount point
613    listed in the mounts file.  Subsequent calls with the same mounts
614    keep searching for mount points in the same file.  Providing NULL
615    as the mounts file closes the file.  */
616 static int
open_temp_exec_file_mnt(const char * mounts)617 open_temp_exec_file_mnt (const char *mounts)
618 {
619   static const char *last_mounts;
620   static FILE *last_mntent;
621 
622   if (mounts != last_mounts)
623     {
624       if (last_mntent)
625 	endmntent (last_mntent);
626 
627       last_mounts = mounts;
628 
629       if (mounts)
630 	last_mntent = setmntent (mounts, "r");
631       else
632 	last_mntent = NULL;
633     }
634 
635   if (!last_mntent)
636     return -1;
637 
638   for (;;)
639     {
640       int fd;
641       struct mntent mnt;
642       char buf[MAXPATHLEN * 3];
643 
644       if (getmntent_r (last_mntent, &mnt, buf, sizeof (buf)) == NULL)
645 	return -1;
646 
647       if (hasmntopt (&mnt, "ro")
648 	  || hasmntopt (&mnt, "noexec")
649 	  || access (mnt.mnt_dir, W_OK))
650 	continue;
651 
652       fd = open_temp_exec_file_dir (mnt.mnt_dir);
653 
654       if (fd != -1)
655 	return fd;
656     }
657 }
658 #endif /* HAVE_MNTENT */
659 
660 /* Instructions to look for a location to hold a temporary file that
661    can be mapped in for execution.  */
662 static struct
663 {
664   int (*func)(const char *);
665   const char *arg;
666   int repeat;
667 } open_temp_exec_file_opts[] = {
668   { open_temp_exec_file_env, "TMPDIR", 0 },
669   { open_temp_exec_file_dir, "/tmp", 0 },
670   { open_temp_exec_file_dir, "/var/tmp", 0 },
671   { open_temp_exec_file_dir, "/dev/shm", 0 },
672   { open_temp_exec_file_env, "HOME", 0 },
673 #ifdef HAVE_MNTENT
674   { open_temp_exec_file_mnt, "/etc/mtab", 1 },
675   { open_temp_exec_file_mnt, "/proc/mounts", 1 },
676 #endif /* HAVE_MNTENT */
677 };
678 
679 /* Current index into open_temp_exec_file_opts.  */
680 static int open_temp_exec_file_opts_idx = 0;
681 
682 /* Reset a current multi-call func, then advances to the next entry.
683    If we're at the last, go back to the first and return nonzero,
684    otherwise return zero.  */
685 static int
open_temp_exec_file_opts_next(void)686 open_temp_exec_file_opts_next (void)
687 {
688   if (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat)
689     open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func (NULL);
690 
691   open_temp_exec_file_opts_idx++;
692   if (open_temp_exec_file_opts_idx
693       == (sizeof (open_temp_exec_file_opts)
694 	  / sizeof (*open_temp_exec_file_opts)))
695     {
696       open_temp_exec_file_opts_idx = 0;
697       return 1;
698     }
699 
700   return 0;
701 }
702 
703 /* Return a file descriptor of a temporary zero-sized file in a
704    writable and executable filesystem.  */
705 static int
open_temp_exec_file(void)706 open_temp_exec_file (void)
707 {
708   int fd;
709 
710   do
711     {
712       fd = open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func
713 	(open_temp_exec_file_opts[open_temp_exec_file_opts_idx].arg);
714 
715       if (!open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat
716 	  || fd == -1)
717 	{
718 	  if (open_temp_exec_file_opts_next ())
719 	    break;
720 	}
721     }
722   while (fd == -1);
723 
724   return fd;
725 }
726 
727 /* We need to allocate space in a file that will be backing a writable
728    mapping.  Several problems exist with the usual approaches:
729    - fallocate() is Linux-only
730    - posix_fallocate() is not available on all platforms
731    - ftruncate() does not allocate space on filesystems with sparse files
732    Failure to allocate the space will cause SIGBUS to be thrown when
733    the mapping is subsequently written to.  */
734 static int
allocate_space(int fd,off_t offset,off_t len)735 allocate_space (int fd, off_t offset, off_t len)
736 {
737   static size_t page_size;
738 
739   /* Obtain system page size. */
740   if (!page_size)
741     page_size = sysconf(_SC_PAGESIZE);
742 
743   unsigned char buf[page_size];
744   memset (buf, 0, page_size);
745 
746   while (len > 0)
747     {
748       off_t to_write = (len < page_size) ? len : page_size;
749       if (write (fd, buf, to_write) < to_write)
750         return -1;
751       len -= to_write;
752     }
753 
754   return 0;
755 }
756 
757 /* Map in a chunk of memory from the temporary exec file into separate
758    locations in the virtual memory address space, one writable and one
759    executable.  Returns the address of the writable portion, after
760    storing an offset to the corresponding executable portion at the
761    last word of the requested chunk.  */
762 static void *
dlmmap_locked(void * start,size_t length,int prot,int flags,off_t offset)763 dlmmap_locked (void *start, size_t length, int prot, int flags, off_t offset)
764 {
765   void *ptr;
766 
767   if (execfd == -1)
768     {
769       open_temp_exec_file_opts_idx = 0;
770     retry_open:
771       execfd = open_temp_exec_file ();
772       if (execfd == -1)
773 	return MFAIL;
774     }
775 
776   offset = execsize;
777 
778   if (allocate_space (execfd, offset, length))
779     return MFAIL;
780 
781   flags &= ~(MAP_PRIVATE | MAP_ANONYMOUS);
782   flags |= MAP_SHARED;
783 
784   ptr = mmap (NULL, length, (prot & ~PROT_WRITE) | PROT_EXEC,
785 	      flags, execfd, offset);
786   if (ptr == MFAIL)
787     {
788       if (!offset)
789 	{
790 	  close (execfd);
791 	  goto retry_open;
792 	}
793       if (ftruncate (execfd, offset) != 0)
794       {
795         /* Fixme : Error logs can be added here. Returning an error for
796          * ftruncte() will not add any advantage as it is being
797          * validating in the error case. */
798       }
799 
800       return MFAIL;
801     }
802   else if (!offset
803 	   && open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat)
804     open_temp_exec_file_opts_next ();
805 
806   start = mmap (start, length, prot, flags, execfd, offset);
807 
808   if (start == MFAIL)
809     {
810       munmap (ptr, length);
811       if (ftruncate (execfd, offset) != 0)
812       {
813         /* Fixme : Error logs can be added here. Returning an error for
814          * ftruncte() will not add any advantage as it is being
815          * validating in the error case. */
816       }
817       return start;
818     }
819 
820   mmap_exec_offset ((char *)start, length) = (char*)ptr - (char*)start;
821 
822   execsize += length;
823 
824   return start;
825 }
826 
827 /* Map in a writable and executable chunk of memory if possible.
828    Failing that, fall back to dlmmap_locked.  */
829 static void *
dlmmap(void * start,size_t length,int prot,int flags,int fd,off_t offset)830 dlmmap (void *start, size_t length, int prot,
831 	int flags, int fd, off_t offset)
832 {
833   void *ptr;
834 
835   assert (start == NULL && length % malloc_getpagesize == 0
836 	  && prot == (PROT_READ | PROT_WRITE)
837 	  && flags == (MAP_PRIVATE | MAP_ANONYMOUS)
838 	  && fd == -1 && offset == 0);
839 
840   if (execfd == -1 && is_emutramp_enabled ())
841     {
842       ptr = mmap (start, length, prot & ~PROT_EXEC, flags, fd, offset);
843       return ptr;
844     }
845 
846   if (execfd == -1 && !is_selinux_enabled ())
847     {
848       ptr = mmap (start, length, prot | PROT_EXEC, flags, fd, offset);
849 
850       if (ptr != MFAIL || (errno != EPERM && errno != EACCES))
851 	/* Cool, no need to mess with separate segments.  */
852 	return ptr;
853 
854       /* If MREMAP_DUP is ever introduced and implemented, try mmap
855 	 with ((prot & ~PROT_WRITE) | PROT_EXEC) and mremap with
856 	 MREMAP_DUP and prot at this point.  */
857     }
858 
859   if (execsize == 0 || execfd == -1)
860     {
861       pthread_mutex_lock (&open_temp_exec_file_mutex);
862       ptr = dlmmap_locked (start, length, prot, flags, offset);
863       pthread_mutex_unlock (&open_temp_exec_file_mutex);
864 
865       return ptr;
866     }
867 
868   return dlmmap_locked (start, length, prot, flags, offset);
869 }
870 
871 /* Release memory at the given address, as well as the corresponding
872    executable page if it's separate.  */
873 static int
dlmunmap(void * start,size_t length)874 dlmunmap (void *start, size_t length)
875 {
876   /* We don't bother decreasing execsize or truncating the file, since
877      we can't quite tell whether we're unmapping the end of the file.
878      We don't expect frequent deallocation anyway.  If we did, we
879      could locate pages in the file by writing to the pages being
880      deallocated and checking that the file contents change.
881      Yuck.  */
882   msegmentptr seg = segment_holding (gm, start);
883   void *code;
884 
885   if (seg && (code = add_segment_exec_offset (start, seg)) != start)
886     {
887       int ret = munmap (code, length);
888       if (ret)
889 	return ret;
890     }
891 
892   return munmap (start, length);
893 }
894 
895 #if FFI_CLOSURE_FREE_CODE
896 /* Return segment holding given code address.  */
897 static msegmentptr
segment_holding_code(mstate m,char * addr)898 segment_holding_code (mstate m, char* addr)
899 {
900   msegmentptr sp = &m->seg;
901   for (;;) {
902     if (addr >= add_segment_exec_offset (sp->base, sp)
903 	&& addr < add_segment_exec_offset (sp->base, sp) + sp->size)
904       return sp;
905     if ((sp = sp->next) == 0)
906       return 0;
907   }
908 }
909 #endif
910 
911 #endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
912 
913 /* Allocate a chunk of memory with the given size.  Returns a pointer
914    to the writable address, and sets *CODE to the executable
915    corresponding virtual address.  */
916 void *
ffi_closure_alloc(size_t size,void ** code)917 ffi_closure_alloc (size_t size, void **code)
918 {
919   void *ptr;
920 
921   if (!code)
922     return NULL;
923 
924   ptr = dlmalloc (size);
925 
926   if (ptr)
927     {
928       msegmentptr seg = segment_holding (gm, ptr);
929 
930       *code = add_segment_exec_offset (ptr, seg);
931     }
932 
933   return ptr;
934 }
935 
936 void *
ffi_data_to_code_pointer(void * data)937 ffi_data_to_code_pointer (void *data)
938 {
939   msegmentptr seg = segment_holding (gm, data);
940   /* We expect closures to be allocated with ffi_closure_alloc(), in
941      which case seg will be non-NULL.  However, some users take on the
942      burden of managing this memory themselves, in which case this
943      we'll just return data. */
944   if (seg)
945     return add_segment_exec_offset (data, seg);
946   else
947     return data;
948 }
949 
950 /* Release a chunk of memory allocated with ffi_closure_alloc.  If
951    FFI_CLOSURE_FREE_CODE is nonzero, the given address can be the
952    writable or the executable address given.  Otherwise, only the
953    writable address can be provided here.  */
954 void
ffi_closure_free(void * ptr)955 ffi_closure_free (void *ptr)
956 {
957 #if FFI_CLOSURE_FREE_CODE
958   msegmentptr seg = segment_holding_code (gm, ptr);
959 
960   if (seg)
961     ptr = sub_segment_exec_offset (ptr, seg);
962 #endif
963 
964   dlfree (ptr);
965 }
966 
967 # else /* ! FFI_MMAP_EXEC_WRIT */
968 
969 /* On many systems, memory returned by malloc is writable and
970    executable, so just use it.  */
971 
972 #include <stdlib.h>
973 
974 void *
ffi_closure_alloc(size_t size,void ** code)975 ffi_closure_alloc (size_t size, void **code)
976 {
977   if (!code)
978     return NULL;
979 
980   return *code = malloc (size);
981 }
982 
983 void
ffi_closure_free(void * ptr)984 ffi_closure_free (void *ptr)
985 {
986   free (ptr);
987 }
988 
989 void *
ffi_data_to_code_pointer(void * data)990 ffi_data_to_code_pointer (void *data)
991 {
992   return data;
993 }
994 
995 # endif /* ! FFI_MMAP_EXEC_WRIT */
996 #endif /* FFI_CLOSURES */
997 
998 #endif /* NetBSD with PROT_MPROTECT */
999