• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *    Stack-less Just-In-Time compiler
3  *
4  *    Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without modification, are
7  * permitted provided that the following conditions are met:
8  *
9  *   1. Redistributions of source code must retain the above copyright notice, this list of
10  *      conditions and the following disclaimer.
11  *
12  *   2. Redistributions in binary form must reproduce the above copyright notice, this list
13  *      of conditions and the following disclaimer in the documentation and/or other materials
14  *      provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19  * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*
28    This file contains a simple executable memory allocator
29 
30    It is assumed, that executable code blocks are usually medium (or sometimes
31    large) memory blocks, and the allocator is not too frequently called (less
32    optimized than other allocators). Thus, using it as a generic allocator is
33    not suggested.
34 
35    How does it work:
36      Memory is allocated in continuous memory areas called chunks by alloc_chunk()
37      Chunk format:
38      [ block ][ block ] ... [ block ][ block terminator ]
39 
40    All blocks and the block terminator is started with block_header. The block
41    header contains the size of the previous and the next block. These sizes
42    can also contain special values.
43      Block size:
44        0 - The block is a free_block, with a different size member.
45        1 - The block is a block terminator.
46        n - The block is used at the moment, and the value contains its size.
47      Previous block size:
48        0 - This is the first block of the memory chunk.
49        n - The size of the previous block.
50 
51    Using these size values we can go forward or backward on the block chain.
52    The unused blocks are stored in a chain list pointed by free_blocks. This
53    list is useful if we need to find a suitable memory area when the allocator
54    is called.
55 
56    When a block is freed, the new free block is connected to its adjacent free
57    blocks if possible.
58 
59      [ free block ][ used block ][ free block ]
60    and "used block" is freed, the three blocks are connected together:
61      [           one big free block           ]
62 */
63 
64 /* --------------------------------------------------------------------- */
65 /*  System (OS) functions                                                */
66 /* --------------------------------------------------------------------- */
67 
68 /* 64 KByte. */
69 #define CHUNK_SIZE	0x10000
70 
71 /*
72    alloc_chunk / free_chunk :
73      * allocate executable system memory chunks
74      * the size is always divisible by CHUNK_SIZE
75    SLJIT_ALLOCATOR_LOCK / SLJIT_ALLOCATOR_UNLOCK :
76      * provided as part of sljitUtils
77      * only the allocator requires this lock, sljit is fully thread safe
78        as it only uses local variables
79 */
80 
81 #ifdef _WIN32
82 
alloc_chunk(sljit_uw size)83 static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
84 {
85 	return VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
86 }
87 
free_chunk(void * chunk,sljit_uw size)88 static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
89 {
90 	SLJIT_UNUSED_ARG(size);
91 	VirtualFree(chunk, 0, MEM_RELEASE);
92 }
93 
94 #else
95 
96 #ifdef __APPLE__
97 #ifdef MAP_ANON
98 /* Configures TARGET_OS_OSX when appropriate */
99 #include <TargetConditionals.h>
100 
101 #if TARGET_OS_OSX && defined(MAP_JIT)
102 #include <sys/utsname.h>
103 #endif /* TARGET_OS_OSX && MAP_JIT */
104 
105 #ifdef MAP_JIT
106 
107 /*
108    On macOS systems, returns MAP_JIT if it is defined _and_ we're running on a
109    version where it's OK to have more than one JIT block.
110    On non-macOS systems, returns MAP_JIT if it is defined.
111 */
get_map_jit_flag()112 static SLJIT_INLINE int get_map_jit_flag()
113 {
114 #if TARGET_OS_OSX
115 	sljit_sw page_size = get_page_alignment() + 1;
116 	void *ptr;
117 	static int map_jit_flag = -1;
118 
119 	/*
120 	  The following code is thread safe because multiple initialization
121 	  sets map_jit_flag to the same value and the code has no side-effects.
122 	  Changing the kernel version witout system restart is (very) unlikely.
123 	*/
124 	if (map_jit_flag == -1) {
125 		struct utsname name;
126 
127 		map_jit_flag = 0;
128 		uname(&name);
129 
130 		/* Kernel version for 10.14.0 (Mojave) */
131 		if (atoi(name.release) >= 18) {
132 			/* Only use MAP_JIT if a hardened runtime is used */
133 
134 			ptr = mmap(NULL, page_size, PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANON, -1, 0);
135 
136 			if (ptr == MAP_FAILED) {
137 				map_jit_flag = MAP_JIT;
138 			} else {
139 				munmap(ptr, page_size);
140 			}
141 		}
142 	}
143 
144 	return map_jit_flag;
145 #else /* !TARGET_OS_OSX */
146 	return MAP_JIT;
147 #endif /* TARGET_OS_OSX */
148 }
149 
150 #endif /* MAP_JIT */
151 #endif /* MAP_ANON */
152 #endif /* __APPLE__ */
153 
alloc_chunk(sljit_uw size)154 static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
155 {
156 	void *retval;
157 	const int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
158 
159 #ifdef MAP_ANON
160 
161 	int flags = MAP_PRIVATE | MAP_ANON;
162 
163 #ifdef MAP_JIT
164 	flags |= get_map_jit_flag();
165 #endif
166 
167 	retval = mmap(NULL, size, prot, flags, -1, 0);
168 #else /* !MAP_ANON */
169 	if (SLJIT_UNLIKELY((dev_zero < 0) && open_dev_zero()))
170 		return NULL;
171 
172 	retval = mmap(NULL, size, prot, MAP_PRIVATE, dev_zero, 0);
173 #endif /* MAP_ANON */
174 
175 	if (retval == MAP_FAILED)
176 		retval = NULL;
177 	else {
178 		if (mprotect(retval, size, prot) < 0) {
179 			munmap(retval, size);
180 			retval = NULL;
181 		}
182 	}
183 
184 	return retval;
185 }
186 
free_chunk(void * chunk,sljit_uw size)187 static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
188 {
189 	munmap(chunk, size);
190 }
191 
192 #endif
193 
194 /* --------------------------------------------------------------------- */
195 /*  Common functions                                                     */
196 /* --------------------------------------------------------------------- */
197 
198 #define CHUNK_MASK	(~(CHUNK_SIZE - 1))
199 
200 struct block_header {
201 	sljit_uw size;
202 	sljit_uw prev_size;
203 };
204 
205 struct free_block {
206 	struct block_header header;
207 	struct free_block *next;
208 	struct free_block *prev;
209 	sljit_uw size;
210 };
211 
212 #define AS_BLOCK_HEADER(base, offset) \
213 	((struct block_header*)(((sljit_u8*)base) + offset))
214 #define AS_FREE_BLOCK(base, offset) \
215 	((struct free_block*)(((sljit_u8*)base) + offset))
216 #define MEM_START(base)		((void*)(((sljit_u8*)base) + sizeof(struct block_header)))
217 #define ALIGN_SIZE(size)	(((size) + sizeof(struct block_header) + 7) & ~7)
218 
219 static struct free_block* free_blocks;
220 static sljit_uw allocated_size;
221 static sljit_uw total_size;
222 
sljit_insert_free_block(struct free_block * free_block,sljit_uw size)223 static SLJIT_INLINE void sljit_insert_free_block(struct free_block *free_block, sljit_uw size)
224 {
225 	free_block->header.size = 0;
226 	free_block->size = size;
227 
228 	free_block->next = free_blocks;
229 	free_block->prev = NULL;
230 	if (free_blocks)
231 		free_blocks->prev = free_block;
232 	free_blocks = free_block;
233 }
234 
sljit_remove_free_block(struct free_block * free_block)235 static SLJIT_INLINE void sljit_remove_free_block(struct free_block *free_block)
236 {
237 	if (free_block->next)
238 		free_block->next->prev = free_block->prev;
239 
240 	if (free_block->prev)
241 		free_block->prev->next = free_block->next;
242 	else {
243 		SLJIT_ASSERT(free_blocks == free_block);
244 		free_blocks = free_block->next;
245 	}
246 }
247 
sljit_malloc_exec(sljit_uw size)248 SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
249 {
250 	struct block_header *header;
251 	struct block_header *next_header;
252 	struct free_block *free_block;
253 	sljit_uw chunk_size;
254 
255 	SLJIT_ALLOCATOR_LOCK();
256 	if (size < (64 - sizeof(struct block_header)))
257 		size = (64 - sizeof(struct block_header));
258 	size = ALIGN_SIZE(size);
259 
260 	free_block = free_blocks;
261 	while (free_block) {
262 		if (free_block->size >= size) {
263 			chunk_size = free_block->size;
264 			if (chunk_size > size + 64) {
265 				/* We just cut a block from the end of the free block. */
266 				chunk_size -= size;
267 				free_block->size = chunk_size;
268 				header = AS_BLOCK_HEADER(free_block, chunk_size);
269 				header->prev_size = chunk_size;
270 				AS_BLOCK_HEADER(header, size)->prev_size = size;
271 			}
272 			else {
273 				sljit_remove_free_block(free_block);
274 				header = (struct block_header*)free_block;
275 				size = chunk_size;
276 			}
277 			allocated_size += size;
278 			header->size = size;
279 			SLJIT_ALLOCATOR_UNLOCK();
280 			return MEM_START(header);
281 		}
282 		free_block = free_block->next;
283 	}
284 
285 	chunk_size = (size + sizeof(struct block_header) + CHUNK_SIZE - 1) & CHUNK_MASK;
286 	header = (struct block_header*)alloc_chunk(chunk_size);
287 	if (!header) {
288 		SLJIT_ALLOCATOR_UNLOCK();
289 		return NULL;
290 	}
291 
292 	chunk_size -= sizeof(struct block_header);
293 	total_size += chunk_size;
294 
295 	header->prev_size = 0;
296 	if (chunk_size > size + 64) {
297 		/* Cut the allocated space into a free and a used block. */
298 		allocated_size += size;
299 		header->size = size;
300 		chunk_size -= size;
301 
302 		free_block = AS_FREE_BLOCK(header, size);
303 		free_block->header.prev_size = size;
304 		sljit_insert_free_block(free_block, chunk_size);
305 		next_header = AS_BLOCK_HEADER(free_block, chunk_size);
306 	}
307 	else {
308 		/* All space belongs to this allocation. */
309 		allocated_size += chunk_size;
310 		header->size = chunk_size;
311 		next_header = AS_BLOCK_HEADER(header, chunk_size);
312 	}
313 	next_header->size = 1;
314 	next_header->prev_size = chunk_size;
315 	SLJIT_ALLOCATOR_UNLOCK();
316 	return MEM_START(header);
317 }
318 
sljit_free_exec(void * ptr)319 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr)
320 {
321 	struct block_header *header;
322 	struct free_block* free_block;
323 
324 	SLJIT_ALLOCATOR_LOCK();
325 	header = AS_BLOCK_HEADER(ptr, -(sljit_sw)sizeof(struct block_header));
326 	allocated_size -= header->size;
327 
328 	/* Connecting free blocks together if possible. */
329 
330 	/* If header->prev_size == 0, free_block will equal to header.
331 	   In this case, free_block->header.size will be > 0. */
332 	free_block = AS_FREE_BLOCK(header, -(sljit_sw)header->prev_size);
333 	if (SLJIT_UNLIKELY(!free_block->header.size)) {
334 		free_block->size += header->size;
335 		header = AS_BLOCK_HEADER(free_block, free_block->size);
336 		header->prev_size = free_block->size;
337 	}
338 	else {
339 		free_block = (struct free_block*)header;
340 		sljit_insert_free_block(free_block, header->size);
341 	}
342 
343 	header = AS_BLOCK_HEADER(free_block, free_block->size);
344 	if (SLJIT_UNLIKELY(!header->size)) {
345 		free_block->size += ((struct free_block*)header)->size;
346 		sljit_remove_free_block((struct free_block*)header);
347 		header = AS_BLOCK_HEADER(free_block, free_block->size);
348 		header->prev_size = free_block->size;
349 	}
350 
351 	/* The whole chunk is free. */
352 	if (SLJIT_UNLIKELY(!free_block->header.prev_size && header->size == 1)) {
353 		/* If this block is freed, we still have (allocated_size / 2) free space. */
354 		if (total_size - free_block->size > (allocated_size * 3 / 2)) {
355 			total_size -= free_block->size;
356 			sljit_remove_free_block(free_block);
357 			free_chunk(free_block, free_block->size + sizeof(struct block_header));
358 		}
359 	}
360 
361 	SLJIT_ALLOCATOR_UNLOCK();
362 }
363 
sljit_free_unused_memory_exec(void)364 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void)
365 {
366 	struct free_block* free_block;
367 	struct free_block* next_free_block;
368 
369 	SLJIT_ALLOCATOR_LOCK();
370 
371 	free_block = free_blocks;
372 	while (free_block) {
373 		next_free_block = free_block->next;
374 		if (!free_block->header.prev_size &&
375 				AS_BLOCK_HEADER(free_block, free_block->size)->size == 1) {
376 			total_size -= free_block->size;
377 			sljit_remove_free_block(free_block);
378 			free_chunk(free_block, free_block->size + sizeof(struct block_header));
379 		}
380 		free_block = next_free_block;
381 	}
382 
383 	SLJIT_ASSERT((total_size && free_blocks) || (!total_size && !free_blocks));
384 	SLJIT_ALLOCATOR_UNLOCK();
385 }
386