• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is from CPython's Modules/_ctypes/malloc_closure.c
3  * and has received some edits.
4  */
5 
6 #include <ffi.h>
7 #ifdef MS_WIN32
8 #include <windows.h>
9 #else
10 #include <sys/mman.h>
11 #include <unistd.h>
12 # if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
13 #  define MAP_ANONYMOUS MAP_ANON
14 # endif
15 #endif
16 
17 /* On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC.
18 
19    This is, apparently, an undocumented change to ffi_prep_closure():
20    depending on the Linux kernel we're running on, we must give it a
21    mmap that is either PROT_READ|PROT_WRITE|PROT_EXEC or only
22    PROT_READ|PROT_WRITE.  In the latter case, just trying to obtain a
23    mmap with PROT_READ|PROT_WRITE|PROT_EXEC would kill our process(!),
24    but in that situation libffi is fine with only PROT_READ|PROT_WRITE.
25    There is nothing in the libffi API to know that, though, so we have
26    to guess by parsing /proc/self/status.  "Meh."
27  */
28 #ifdef __linux__
29 #include <stdlib.h>
30 
31 static int emutramp_enabled = -1;
32 
33 static int
emutramp_enabled_check(void)34 emutramp_enabled_check (void)
35 {
36     char *buf = NULL;
37     size_t len = 0;
38     FILE *f;
39     int ret;
40     f = fopen ("/proc/self/status", "r");
41     if (f == NULL)
42         return 0;
43     ret = 0;
44 
45     while (getline (&buf, &len, f) != -1)
46         if (!strncmp (buf, "PaX:", 4))
47             {
48                 char emutramp;
49                 if (sscanf (buf, "%*s %*c%c", &emutramp) == 1)
50                     ret = (emutramp == 'E');
51                 break;
52             }
53     free (buf);
54     fclose (f);
55     return ret;
56 }
57 
58 #define is_emutramp_enabled() (emutramp_enabled >= 0 ? emutramp_enabled \
59         : (emutramp_enabled = emutramp_enabled_check ()))
60 #else
61 #define is_emutramp_enabled() 0
62 #endif
63 
64 
65 /* 'allocate_num_pages' is dynamically adjusted starting from one
66    page.  It grows by a factor of PAGE_ALLOCATION_GROWTH_RATE.  This is
67    meant to handle both the common case of not needing a lot of pages,
68    and the rare case of needing many of them.  Systems in general have a
69    limit of how many mmap'd blocks can be open.
70 */
71 
72 #define PAGE_ALLOCATION_GROWTH_RATE  1.3
73 
74 static Py_ssize_t allocate_num_pages = 0;
75 
76 /* #define MALLOC_CLOSURE_DEBUG */ /* enable for some debugging output */
77 
78 /******************************************************************/
79 
80 union mmaped_block {
81     ffi_closure closure;
82     union mmaped_block *next;
83 };
84 
85 static union mmaped_block *free_list = 0;
86 static Py_ssize_t _pagesize = 0;
87 
more_core(void)88 static void more_core(void)
89 {
90     union mmaped_block *item;
91     Py_ssize_t count, i;
92 
93 /* determine the pagesize */
94 #ifdef MS_WIN32
95     if (!_pagesize) {
96         SYSTEM_INFO systeminfo;
97         GetSystemInfo(&systeminfo);
98         _pagesize = systeminfo.dwPageSize;
99     }
100 #else
101     if (!_pagesize) {
102 #ifdef _SC_PAGESIZE
103         _pagesize = sysconf(_SC_PAGESIZE);
104 #else
105         _pagesize = getpagesize();
106 #endif
107     }
108 #endif
109     if (_pagesize <= 0)
110         _pagesize = 4096;
111 
112     /* bump 'allocate_num_pages' */
113     allocate_num_pages = 1 + (
114         (Py_ssize_t)(allocate_num_pages * PAGE_ALLOCATION_GROWTH_RATE));
115 
116     /* calculate the number of mmaped_blocks to allocate */
117     count = (allocate_num_pages * _pagesize) / sizeof(union mmaped_block);
118 
119     /* allocate a memory block */
120 #ifdef MS_WIN32
121     item = (union mmaped_block *)VirtualAlloc(NULL,
122                                            count * sizeof(union mmaped_block),
123                                            MEM_COMMIT,
124                                            PAGE_EXECUTE_READWRITE);
125     if (item == NULL)
126         return;
127 #else
128     {
129     int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
130     if (is_emutramp_enabled ())
131         prot &= ~PROT_EXEC;
132     item = (union mmaped_block *)mmap(NULL,
133                         allocate_num_pages * _pagesize,
134                         prot,
135                         MAP_PRIVATE | MAP_ANONYMOUS,
136                         -1,
137                         0);
138     if (item == (void *)MAP_FAILED)
139         return;
140     }
141 #endif
142 
143 #ifdef MALLOC_CLOSURE_DEBUG
144     printf("block at %p allocated (%ld bytes), %ld mmaped_blocks\n",
145            item, (long)(allocate_num_pages * _pagesize), (long)count);
146 #endif
147     /* put them into the free list */
148     for (i = 0; i < count; ++i) {
149         item->next = free_list;
150         free_list = item;
151         ++item;
152     }
153 }
154 
155 /******************************************************************/
156 
157 /* put the item back into the free list */
cffi_closure_free(ffi_closure * p)158 static void cffi_closure_free(ffi_closure *p)
159 {
160     union mmaped_block *item = (union mmaped_block *)p;
161     item->next = free_list;
162     free_list = item;
163 }
164 
165 /* return one item from the free list, allocating more if needed */
cffi_closure_alloc(void)166 static ffi_closure *cffi_closure_alloc(void)
167 {
168     union mmaped_block *item;
169     if (!free_list)
170         more_core();
171     if (!free_list)
172         return NULL;
173     item = free_list;
174     free_list = item->next;
175     return &item->closure;
176 }
177