1 /* Copyright JS Foundation and other contributors, http://js.foundation
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 /**
17 * Heap implementation
18 */
19
20 #include "ecma-gc.h"
21 #include "jcontext.h"
22 #include "jmem.h"
23 #include "jrt-bit-fields.h"
24 #include "jrt-libc-includes.h"
25
26 #define JMEM_ALLOCATOR_INTERNAL
27 #include "jmem-allocator-internal.h"
28
29 #ifdef _WIN32
30 # ifdef _WIN64
31 # define PRI_SIZET "lu"
32 # define MSG_SIZE_TYPE unsigned long
33 # else
34 # define PRI_SIZET "zu"
35 # define MSG_SIZE_TYPE size_t
36 # endif
37 #else
38 # define PRI_SIZET "zu"
39 # define MSG_SIZE_TYPE size_t
40 #endif
41
42 /** \addtogroup mem Memory allocation
43 * @{
44 *
45 * \addtogroup heap Heap
46 * @{
47 */
48
49 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
50 /**
51 * End of list marker.
52 */
53 #define JMEM_HEAP_END_OF_LIST ((uint32_t) 0xffffffff)
54
55 /**
56 * @{
57 */
58 #ifdef ECMA_VALUE_CAN_STORE_UINTPTR_VALUE_DIRECTLY
59 /* In this case we simply store the pointer, since it fits anyway. */
60 #define JMEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) (p))
61 #define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *) (u))
62 #else /* !ECMA_VALUE_CAN_STORE_UINTPTR_VALUE_DIRECTLY */
63 #define JMEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) ((uint8_t *) (p) - JERRY_HEAP_CONTEXT (area)))
64 #define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *) (JERRY_HEAP_CONTEXT (area) + (u)))
65 #endif /* ECMA_VALUE_CAN_STORE_UINTPTR_VALUE_DIRECTLY */
66 /**
67 * @}
68 */
69
70 /**
71 * Get end of region
72 *
73 * @return pointer to the end of the region
74 */
75 static inline jmem_heap_free_t * JERRY_ATTR_ALWAYS_INLINE JERRY_ATTR_PURE
jmem_heap_get_region_end(jmem_heap_free_t * curr_p)76 jmem_heap_get_region_end (jmem_heap_free_t *curr_p) /**< current region */
77 {
78 return (jmem_heap_free_t *) ((uint8_t *) curr_p + curr_p->size);
79 } /* jmem_heap_get_region_end */
80 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
81
82 /**
83 * Startup initialization of heap
84 */
85 void
jmem_heap_init(void)86 jmem_heap_init (void)
87 {
88 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
89 #if !ENABLED (JERRY_CPOINTER_32_BIT)
90 /* the maximum heap size for 16bit compressed pointers should be 512K */
91 JERRY_ASSERT (((UINT16_MAX + 1) << JMEM_ALIGNMENT_LOG) >= JMEM_HEAP_SIZE);
92 #endif /* !ENABLED (JERRY_CPOINTER_32_BIT) */
93 JERRY_ASSERT ((uintptr_t) JERRY_HEAP_CONTEXT (area) % JMEM_ALIGNMENT == 0);
94
95 JERRY_CONTEXT (jmem_heap_limit) = CONFIG_GC_LIMIT;
96
97 jmem_heap_free_t *const region_p = (jmem_heap_free_t *) JERRY_HEAP_CONTEXT (area);
98
99 region_p->size = JMEM_HEAP_AREA_SIZE;
100 region_p->next_offset = JMEM_HEAP_END_OF_LIST;
101
102 JERRY_HEAP_CONTEXT (first).size = 0;
103 JERRY_HEAP_CONTEXT (first).next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (region_p);
104
105 JERRY_CONTEXT (jmem_heap_list_skip_p) = &JERRY_HEAP_CONTEXT (first);
106
107 JMEM_VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
108 JMEM_VALGRIND_NOACCESS_SPACE (JERRY_HEAP_CONTEXT (area), JMEM_HEAP_SIZE);
109
110 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
111 JMEM_HEAP_STAT_INIT ();
112 } /* jmem_heap_init */
113
114 /**
115 * Finalize heap
116 */
117 void
jmem_heap_finalize(void)118 jmem_heap_finalize (void)
119 {
120 JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_allocated_size) == 0);
121 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
122 JMEM_VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), JMEM_HEAP_SIZE);
123 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
124 } /* jmem_heap_finalize */
125
126 /**
127 * Allocation of memory region.
128 *
129 * See also:
130 * jmem_heap_alloc_block
131 *
132 * @return pointer to allocated memory block - if allocation is successful,
133 * NULL - if there is not enough memory.
134 */
135 static void * JERRY_ATTR_HOT
jmem_heap_alloc(const size_t size)136 jmem_heap_alloc (const size_t size) /**< size of requested block */
137 {
138 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
139 /* Align size. */
140 const size_t required_size = ((size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT) * JMEM_ALIGNMENT;
141 jmem_heap_free_t *data_space_p = NULL;
142
143 JMEM_VALGRIND_DEFINED_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
144
145 /* Fast path for 8 byte chunks, first region is guaranteed to be sufficient. */
146 if (required_size == JMEM_ALIGNMENT
147 && JERRY_LIKELY (JERRY_HEAP_CONTEXT (first).next_offset != JMEM_HEAP_END_OF_LIST))
148 {
149 data_space_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset);
150 JERRY_ASSERT (jmem_is_heap_pointer (data_space_p));
151
152 JMEM_VALGRIND_DEFINED_SPACE (data_space_p, sizeof (jmem_heap_free_t));
153 JERRY_CONTEXT (jmem_heap_allocated_size) += JMEM_ALIGNMENT;
154
155 if (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit))
156 {
157 JERRY_CONTEXT (jmem_heap_limit) += CONFIG_GC_LIMIT;
158 }
159
160 if (data_space_p->size == JMEM_ALIGNMENT)
161 {
162 JERRY_HEAP_CONTEXT (first).next_offset = data_space_p->next_offset;
163 }
164 else
165 {
166 JERRY_ASSERT (data_space_p->size > JMEM_ALIGNMENT);
167
168 jmem_heap_free_t *remaining_p;
169 remaining_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset) + 1;
170
171 JMEM_VALGRIND_DEFINED_SPACE (remaining_p, sizeof (jmem_heap_free_t));
172 remaining_p->size = data_space_p->size - JMEM_ALIGNMENT;
173 remaining_p->next_offset = data_space_p->next_offset;
174 JMEM_VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (jmem_heap_free_t));
175
176 JERRY_HEAP_CONTEXT (first).next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p);
177 }
178
179 JMEM_VALGRIND_NOACCESS_SPACE (data_space_p, sizeof (jmem_heap_free_t));
180
181 if (JERRY_UNLIKELY (data_space_p == JERRY_CONTEXT (jmem_heap_list_skip_p)))
182 {
183 JERRY_CONTEXT (jmem_heap_list_skip_p) = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset);
184 }
185 }
186 /* Slow path for larger regions. */
187 else
188 {
189 uint32_t current_offset = JERRY_HEAP_CONTEXT (first).next_offset;
190 jmem_heap_free_t *prev_p = &JERRY_HEAP_CONTEXT (first);
191
192 while (JERRY_LIKELY (current_offset != JMEM_HEAP_END_OF_LIST))
193 {
194 jmem_heap_free_t *current_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (current_offset);
195 JERRY_ASSERT (jmem_is_heap_pointer (current_p));
196 JMEM_VALGRIND_DEFINED_SPACE (current_p, sizeof (jmem_heap_free_t));
197
198 const uint32_t next_offset = current_p->next_offset;
199 JERRY_ASSERT (next_offset == JMEM_HEAP_END_OF_LIST
200 || jmem_is_heap_pointer (JMEM_HEAP_GET_ADDR_FROM_OFFSET (next_offset)));
201
202 if (current_p->size >= required_size)
203 {
204 /* Region is sufficiently big, store address. */
205 data_space_p = current_p;
206
207 /* Region was larger than necessary. */
208 if (current_p->size > required_size)
209 {
210 /* Get address of remaining space. */
211 jmem_heap_free_t *const remaining_p = (jmem_heap_free_t *) ((uint8_t *) current_p + required_size);
212
213 /* Update metadata. */
214 JMEM_VALGRIND_DEFINED_SPACE (remaining_p, sizeof (jmem_heap_free_t));
215 remaining_p->size = current_p->size - (uint32_t) required_size;
216 remaining_p->next_offset = next_offset;
217 JMEM_VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (jmem_heap_free_t));
218
219 /* Update list. */
220 JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
221 prev_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p);
222 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
223 }
224 /* Block is an exact fit. */
225 else
226 {
227 /* Remove the region from the list. */
228 JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
229 prev_p->next_offset = next_offset;
230 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
231 }
232
233 JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p;
234
235 /* Found enough space. */
236 JERRY_CONTEXT (jmem_heap_allocated_size) += required_size;
237
238 while (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit))
239 {
240 JERRY_CONTEXT (jmem_heap_limit) += CONFIG_GC_LIMIT;
241 }
242
243 break;
244 }
245
246 JMEM_VALGRIND_NOACCESS_SPACE (current_p, sizeof (jmem_heap_free_t));
247 /* Next in list. */
248 prev_p = current_p;
249 current_offset = next_offset;
250 }
251 }
252
253 JMEM_VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
254
255 JERRY_ASSERT ((uintptr_t) data_space_p % JMEM_ALIGNMENT == 0);
256 JMEM_VALGRIND_MALLOCLIKE_SPACE (data_space_p, size);
257
258 return (void *) data_space_p;
259 #else /* ENABLED (JERRY_SYSTEM_ALLOCATOR) */
260 JERRY_CONTEXT (jmem_heap_allocated_size) += size;
261
262 while (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit))
263 {
264 JERRY_CONTEXT (jmem_heap_limit) += CONFIG_GC_LIMIT;
265 }
266
267 return malloc (size);
268 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
269 } /* jmem_heap_alloc */
270
271 /**
272 * Allocation of memory block, reclaiming memory if the request cannot be fulfilled.
273 *
274 * Note:
275 * Each failed allocation attempt tries to reclaim memory with an increasing pressure,
276 * up to 'max_pressure', or until a sufficient memory block is found. When JMEM_PRESSURE_FULL
277 * is reached, the engine is terminated with ERR_OUT_OF_MEMORY. The `max_pressure` argument
278 * can be used to limit the maximum pressure, and prevent the engine from terminating.
279 *
280 * @return NULL, if the required memory size is 0 or not enough memory
281 * pointer to the allocated memory block, if allocation is successful
282 */
283 static void *
jmem_heap_gc_and_alloc_block(const size_t size,jmem_pressure_t max_pressure)284 jmem_heap_gc_and_alloc_block (const size_t size, /**< required memory size */
285 jmem_pressure_t max_pressure) /**< pressure limit */
286 {
287 if (JERRY_UNLIKELY (size == 0))
288 {
289 return NULL;
290 }
291
292 jmem_pressure_t pressure = JMEM_PRESSURE_NONE;
293
294 #if !ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC)
295 if (JERRY_CONTEXT (jmem_heap_allocated_size) + size >= JERRY_CONTEXT (jmem_heap_limit))
296 #endif /* !ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC) */
297 {
298 pressure = JMEM_PRESSURE_LOW;
299 ecma_free_unused_memory (pressure);
300 }
301
302 void *data_space_p = jmem_heap_alloc (size);
303
304 /* cppcheck-suppress memleak */
305 while (JERRY_UNLIKELY (data_space_p == NULL) && JERRY_LIKELY (pressure < max_pressure))
306 {
307 pressure++;
308 ecma_free_unused_memory (pressure);
309 data_space_p = jmem_heap_alloc (size);
310 }
311
312 return data_space_p;
313 } /* jmem_heap_gc_and_alloc_block */
314
315 /**
316 * Internal method for allocating a memory block.
317 */
318 inline void * JERRY_ATTR_HOT JERRY_ATTR_ALWAYS_INLINE
jmem_heap_alloc_block_internal(const size_t size)319 jmem_heap_alloc_block_internal (const size_t size) /**< required memory size */
320 {
321 return jmem_heap_gc_and_alloc_block (size, JMEM_PRESSURE_FULL);
322 } /* jmem_heap_alloc_block_internal */
323
324 /**
325 * Allocation of memory block, reclaiming unused memory if there is not enough.
326 *
327 * Note:
328 * If a sufficiently sized block can't be found, the engine will be terminated with ERR_OUT_OF_MEMORY.
329 *
330 * @return NULL, if the required memory is 0
331 * pointer to allocated memory block, otherwise
332 */
333 extern inline void * JERRY_ATTR_HOT JERRY_ATTR_ALWAYS_INLINE
jmem_heap_alloc_block(const size_t size)334 jmem_heap_alloc_block (const size_t size) /**< required memory size */
335 {
336 void *block_p = jmem_heap_gc_and_alloc_block (size, JMEM_PRESSURE_FULL);
337 JMEM_HEAP_STAT_ALLOC (size);
338 return block_p;
339 } /* jmem_heap_alloc_block */
340
341 /**
342 * Allocation of memory block, reclaiming unused memory if there is not enough.
343 *
344 * Note:
345 * If a sufficiently sized block can't be found, NULL will be returned.
346 *
347 * @return NULL, if the required memory size is 0
348 * also NULL, if the allocation has failed
349 * pointer to the allocated memory block, otherwise
350 */
351 inline void * JERRY_ATTR_HOT JERRY_ATTR_ALWAYS_INLINE
jmem_heap_alloc_block_null_on_error(const size_t size)352 jmem_heap_alloc_block_null_on_error (const size_t size) /**< required memory size */
353 {
354 void *block_p = jmem_heap_gc_and_alloc_block (size, JMEM_PRESSURE_HIGH);
355
356 #if ENABLED (JERRY_MEM_STATS)
357 if (block_p != NULL)
358 {
359 JMEM_HEAP_STAT_ALLOC (size);
360 }
361 #endif /* ENABLED (JERRY_MEM_STATS) */
362
363 return block_p;
364 } /* jmem_heap_alloc_block_null_on_error */
365
366 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
367 /**
368 * Finds the block in the free block list which preceeds the argument block
369 *
370 * @return pointer to the preceeding block
371 */
372 static jmem_heap_free_t *
jmem_heap_find_prev(const jmem_heap_free_t * const block_p)373 jmem_heap_find_prev (const jmem_heap_free_t * const block_p) /**< which memory block's predecessor we're looking for */
374 {
375 const jmem_heap_free_t *prev_p;
376
377 if (block_p > JERRY_CONTEXT (jmem_heap_list_skip_p))
378 {
379 prev_p = JERRY_CONTEXT (jmem_heap_list_skip_p);
380 }
381 else
382 {
383 prev_p = &JERRY_HEAP_CONTEXT (first);
384 }
385
386 JERRY_ASSERT (jmem_is_heap_pointer (block_p));
387 const uint32_t block_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (block_p);
388
389 JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
390 /* Find position of region in the list. */
391 while (prev_p->next_offset < block_offset)
392 {
393 const jmem_heap_free_t * const next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset);
394 JERRY_ASSERT (jmem_is_heap_pointer (next_p));
395
396 JMEM_VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t));
397 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
398 prev_p = next_p;
399 }
400
401 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
402 return (jmem_heap_free_t *) prev_p;
403 } /* jmem_heap_find_prev */
404
405 /**
406 * Inserts the block into the free chain after a specified block.
407 *
408 * Note:
409 * 'jmem_heap_find_prev' can and should be used to find the previous free block
410 */
411 static void
jmem_heap_insert_block(jmem_heap_free_t * block_p,jmem_heap_free_t * prev_p,const size_t size)412 jmem_heap_insert_block (jmem_heap_free_t *block_p, /**< block to insert */
413 jmem_heap_free_t *prev_p, /**< the free block after which to insert 'block_p' */
414 const size_t size) /**< size of the inserted block */
415 {
416 JERRY_ASSERT ((uintptr_t) block_p % JMEM_ALIGNMENT == 0);
417 JERRY_ASSERT (size % JMEM_ALIGNMENT == 0);
418
419 JMEM_VALGRIND_NOACCESS_SPACE (block_p, size);
420
421 JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
422 jmem_heap_free_t *next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset);
423 JMEM_VALGRIND_DEFINED_SPACE (block_p, sizeof (jmem_heap_free_t));
424 JMEM_VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t));
425
426 const uint32_t block_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (block_p);
427
428 /* Update prev. */
429 if (jmem_heap_get_region_end (prev_p) == block_p)
430 {
431 /* Can be merged. */
432 prev_p->size += (uint32_t) size;
433 JMEM_VALGRIND_NOACCESS_SPACE (block_p, sizeof (jmem_heap_free_t));
434 block_p = prev_p;
435 }
436 else
437 {
438 block_p->size = (uint32_t) size;
439 prev_p->next_offset = block_offset;
440 }
441
442 /* Update next. */
443 if (jmem_heap_get_region_end (block_p) == next_p)
444 {
445 /* Can be merged. */
446 block_p->size += next_p->size;
447 block_p->next_offset = next_p->next_offset;
448 }
449 else
450 {
451 block_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (next_p);
452 }
453
454 JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p;
455
456 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
457 JMEM_VALGRIND_NOACCESS_SPACE (block_p, sizeof (jmem_heap_free_t));
458 JMEM_VALGRIND_NOACCESS_SPACE (next_p, sizeof (jmem_heap_free_t));
459 } /* jmem_heap_insert_block */
460 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
461
462 /**
463 * Internal method for freeing a memory block.
464 */
465 void JERRY_ATTR_HOT
jmem_heap_free_block_internal(void * ptr,const size_t size)466 jmem_heap_free_block_internal (void *ptr, /**< pointer to beginning of data space of the block */
467 const size_t size) /**< size of allocated region */
468 {
469 JERRY_ASSERT (size > 0);
470 JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size));
471 JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_allocated_size) > 0);
472
473 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
474 /* checking that ptr points to the heap */
475 JERRY_ASSERT (jmem_is_heap_pointer (ptr));
476 JERRY_ASSERT ((uintptr_t) ptr % JMEM_ALIGNMENT == 0);
477
478 const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
479
480 jmem_heap_free_t *const block_p = (jmem_heap_free_t *) ptr;
481 jmem_heap_free_t *const prev_p = jmem_heap_find_prev (block_p);
482 jmem_heap_insert_block (block_p, prev_p, aligned_size);
483
484 JERRY_CONTEXT (jmem_heap_allocated_size) -= aligned_size;
485
486 JMEM_VALGRIND_FREELIKE_SPACE (ptr);
487 #else /* ENABLED (JERRY_SYSTEM_ALLOCATOR) */
488 JERRY_CONTEXT (jmem_heap_allocated_size) -= size;
489 free (ptr);
490 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
491 while (JERRY_CONTEXT (jmem_heap_allocated_size) + CONFIG_GC_LIMIT <= JERRY_CONTEXT (jmem_heap_limit))
492 {
493 JERRY_CONTEXT (jmem_heap_limit) -= CONFIG_GC_LIMIT;
494 }
495
496 JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size));
497 } /* jmem_heap_free_block_internal */
498
499 /**
500 * Reallocates the memory region pointed to by 'ptr', changing the size of the allocated region.
501 *
502 * @return pointer to the reallocated region
503 */
504 void * JERRY_ATTR_HOT
jmem_heap_realloc_block(void * ptr,const size_t old_size,const size_t new_size)505 jmem_heap_realloc_block (void *ptr, /**< memory region to reallocate */
506 const size_t old_size, /**< current size of the region */
507 const size_t new_size) /**< desired new size */
508 {
509 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
510 JERRY_ASSERT (jmem_is_heap_pointer (ptr));
511 JERRY_ASSERT ((uintptr_t) ptr % JMEM_ALIGNMENT == 0);
512 JERRY_ASSERT (old_size != 0);
513 JERRY_ASSERT (new_size != 0);
514
515 jmem_heap_free_t * const block_p = (jmem_heap_free_t *) ptr;
516 const size_t aligned_new_size = (new_size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
517 const size_t aligned_old_size = (old_size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
518
519 if (aligned_old_size == aligned_new_size)
520 {
521 JMEM_VALGRIND_RESIZE_SPACE (block_p, old_size, new_size);
522 JMEM_HEAP_STAT_FREE (old_size);
523 JMEM_HEAP_STAT_ALLOC (new_size);
524 return block_p;
525 }
526
527 if (aligned_new_size < aligned_old_size)
528 {
529 JMEM_VALGRIND_RESIZE_SPACE (block_p, old_size, new_size);
530 JMEM_HEAP_STAT_FREE (old_size);
531 JMEM_HEAP_STAT_ALLOC (new_size);
532 jmem_heap_insert_block ((jmem_heap_free_t *) ((uint8_t *) block_p + aligned_new_size),
533 jmem_heap_find_prev (block_p),
534 aligned_old_size - aligned_new_size);
535
536 JERRY_CONTEXT (jmem_heap_allocated_size) -= (aligned_old_size - aligned_new_size);
537 while (JERRY_CONTEXT (jmem_heap_allocated_size) + CONFIG_GC_LIMIT <= JERRY_CONTEXT (jmem_heap_limit))
538 {
539 JERRY_CONTEXT (jmem_heap_limit) -= CONFIG_GC_LIMIT;
540 }
541
542 return block_p;
543 }
544
545 void *ret_block_p = NULL;
546 const size_t required_size = aligned_new_size - aligned_old_size;
547
548 #if !ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC)
549 if (JERRY_CONTEXT (jmem_heap_allocated_size) + required_size >= JERRY_CONTEXT (jmem_heap_limit))
550 #endif /* !ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC) */
551 {
552 ecma_free_unused_memory (JMEM_PRESSURE_LOW);
553 }
554
555 jmem_heap_free_t *prev_p = jmem_heap_find_prev (block_p);
556 JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
557 jmem_heap_free_t * const next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset);
558
559 /* Check if block can be extended at the end */
560 if (((jmem_heap_free_t *) ((uint8_t *) block_p + aligned_old_size)) == next_p)
561 {
562 JMEM_VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t));
563
564 if (required_size <= next_p->size)
565 {
566 /* Block can be extended, update the list. */
567 if (required_size == next_p->size)
568 {
569 prev_p->next_offset = next_p->next_offset;
570 }
571 else
572 {
573 jmem_heap_free_t *const new_next_p = (jmem_heap_free_t *) ((uint8_t *) next_p + required_size);
574 JMEM_VALGRIND_DEFINED_SPACE (new_next_p, sizeof (jmem_heap_free_t));
575 new_next_p->next_offset = next_p->next_offset;
576 new_next_p->size = (uint32_t) (next_p->size - required_size);
577 JMEM_VALGRIND_NOACCESS_SPACE (new_next_p, sizeof (jmem_heap_free_t));
578 prev_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (new_next_p);
579 }
580
581 /* next_p will be marked as undefined space. */
582 JMEM_VALGRIND_RESIZE_SPACE (block_p, old_size, new_size);
583 ret_block_p = block_p;
584 }
585 else
586 {
587 JMEM_VALGRIND_NOACCESS_SPACE (next_p, sizeof (jmem_heap_free_t));
588 }
589
590 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
591 }
592 /*
593 * Check if block can be extended at the front.
594 * This is less optimal because we need to copy the data, but still better than allocting a new block.
595 */
596 else if (jmem_heap_get_region_end (prev_p) == block_p)
597 {
598 if (required_size <= prev_p->size)
599 {
600 if (required_size == prev_p->size)
601 {
602 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
603 prev_p = jmem_heap_find_prev (prev_p);
604 JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
605 prev_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (next_p);
606 }
607 else
608 {
609 prev_p->size = (uint32_t) (prev_p->size - required_size);
610 }
611
612 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
613
614 ret_block_p = (uint8_t *) block_p - required_size;
615
616 /* Mark the the new block as undefined so that we are able to write to it. */
617 JMEM_VALGRIND_UNDEFINED_SPACE (ret_block_p, old_size);
618 /* The blocks are likely to overlap, so mark the old block as defined memory again. */
619 JMEM_VALGRIND_DEFINED_SPACE (block_p, old_size);
620 memmove (ret_block_p, block_p, old_size);
621
622 JMEM_VALGRIND_FREELIKE_SPACE (block_p);
623 JMEM_VALGRIND_MALLOCLIKE_SPACE (ret_block_p, new_size);
624 JMEM_VALGRIND_DEFINED_SPACE (ret_block_p, old_size);
625 }
626 else
627 {
628 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
629 }
630 }
631
632 if (ret_block_p != NULL)
633 {
634 /* Managed to extend the block. Update memory usage and the skip pointer. */
635 JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p;
636 JERRY_CONTEXT (jmem_heap_allocated_size) += required_size;
637
638 while (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit))
639 {
640 JERRY_CONTEXT (jmem_heap_limit) += CONFIG_GC_LIMIT;
641 }
642 }
643 else
644 {
645 /* Could not extend block. Allocate new region and copy the data. */
646 /* jmem_heap_alloc_block_internal will adjust the allocated_size, but insert_block will not,
647 so we reduce it here first, so that the limit calculation remains consistent. */
648 JERRY_CONTEXT (jmem_heap_allocated_size) -= aligned_old_size;
649 ret_block_p = jmem_heap_alloc_block_internal (new_size);
650
651 /* jmem_heap_alloc_block_internal may trigger garbage collection, which can create new free blocks
652 * in the heap structure, so we need to look up the previous block again. */
653 prev_p = jmem_heap_find_prev (block_p);
654
655 memcpy (ret_block_p, block_p, old_size);
656 jmem_heap_insert_block (block_p, prev_p, aligned_old_size);
657 /* jmem_heap_alloc_block_internal will call JMEM_VALGRIND_MALLOCLIKE_SPACE */
658 JMEM_VALGRIND_FREELIKE_SPACE (block_p);
659 }
660
661 JMEM_HEAP_STAT_FREE (old_size);
662 JMEM_HEAP_STAT_ALLOC (new_size);
663 return ret_block_p;
664 #else /* ENABLED (JERRY_SYSTEM_ALLOCATOR) */
665 const size_t required_size = new_size - old_size;
666 #if !ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC)
667 if (JERRY_CONTEXT (jmem_heap_allocated_size) + required_size >= JERRY_CONTEXT (jmem_heap_limit))
668 #endif /* !ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC) */
669 {
670 ecma_free_unused_memory (JMEM_PRESSURE_LOW);
671 }
672
673 JERRY_CONTEXT (jmem_heap_allocated_size) += required_size;
674
675 while (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit))
676 {
677 JERRY_CONTEXT (jmem_heap_limit) += CONFIG_GC_LIMIT;
678 }
679
680 while (JERRY_CONTEXT (jmem_heap_allocated_size) + CONFIG_GC_LIMIT <= JERRY_CONTEXT (jmem_heap_limit))
681 {
682 JERRY_CONTEXT (jmem_heap_limit) -= CONFIG_GC_LIMIT;
683 }
684
685 JMEM_HEAP_STAT_FREE (old_size);
686 JMEM_HEAP_STAT_ALLOC (new_size);
687 return realloc (ptr, new_size);
688 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
689 } /* jmem_heap_realloc_block */
690
691 /**
692 * Free memory block
693 */
694 extern inline void JERRY_ATTR_HOT JERRY_ATTR_ALWAYS_INLINE
jmem_heap_free_block(void * ptr,const size_t size)695 jmem_heap_free_block (void *ptr, /**< pointer to beginning of data space of the block */
696 const size_t size) /**< size of allocated region */
697 {
698 jmem_heap_free_block_internal (ptr, size);
699 JMEM_HEAP_STAT_FREE (size);
700 return;
701 } /* jmem_heap_free_block */
702
703 #ifndef JERRY_NDEBUG
704 /**
705 * Check whether the pointer points to the heap
706 *
707 * Note:
708 * the routine should be used only for assertion checks
709 *
710 * @return true - if pointer points to the heap,
711 * false - otherwise
712 */
713 bool
jmem_is_heap_pointer(const void * pointer)714 jmem_is_heap_pointer (const void *pointer) /**< pointer */
715 {
716 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
717 return ((uint8_t *) pointer >= JERRY_HEAP_CONTEXT (area)
718 && (uint8_t *) pointer <= (JERRY_HEAP_CONTEXT (area) + JMEM_HEAP_AREA_SIZE));
719 #else /* ENABLED (JERRY_SYSTEM_ALLOCATOR) */
720 JERRY_UNUSED (pointer);
721 return true;
722 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
723 } /* jmem_is_heap_pointer */
724 #endif /* !JERRY_NDEBUG */
725
726 #if ENABLED (JERRY_MEM_STATS)
727 /**
728 * Get heap memory usage statistics
729 */
730 void
jmem_heap_get_stats(jmem_heap_stats_t * out_heap_stats_p)731 jmem_heap_get_stats (jmem_heap_stats_t *out_heap_stats_p) /**< [out] heap stats */
732 {
733 JERRY_ASSERT (out_heap_stats_p != NULL);
734
735 *out_heap_stats_p = JERRY_CONTEXT (jmem_heap_stats);
736 } /* jmem_heap_get_stats */
737
738 /**
739 * Print heap memory usage statistics
740 */
741 void
jmem_heap_stats_print(void)742 jmem_heap_stats_print (void)
743 {
744 jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats);
745
746 JERRY_DEBUG_MSG ("Heap stats:\n");
747 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
748 JERRY_DEBUG_MSG (" Heap size = %"PRI_SIZET" bytes\n",
749 (MSG_SIZE_TYPE)(heap_stats->size));
750 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
751 JERRY_DEBUG_MSG (" Allocated = %"PRI_SIZET" bytes\n"
752 " Peak allocated = %"PRI_SIZET" bytes\n"
753 " Waste = %"PRI_SIZET" bytes\n"
754 " Peak waste = %"PRI_SIZET" bytes\n"
755 " Allocated byte code data = %"PRI_SIZET" bytes\n"
756 " Peak allocated byte code data = %"PRI_SIZET" bytes\n"
757 " Allocated string data = %"PRI_SIZET" bytes\n"
758 " Peak allocated string data = %"PRI_SIZET" bytes\n"
759 " Allocated object data = %"PRI_SIZET" bytes\n"
760 " Peak allocated object data = %"PRI_SIZET" bytes\n"
761 " Allocated property data = %"PRI_SIZET" bytes\n"
762 " Peak allocated property data = %"PRI_SIZET" bytes\n",
763 (MSG_SIZE_TYPE)(heap_stats->allocated_bytes),
764 (MSG_SIZE_TYPE)(heap_stats->peak_allocated_bytes),
765 (MSG_SIZE_TYPE)(heap_stats->waste_bytes),
766 (MSG_SIZE_TYPE)(heap_stats->peak_waste_bytes),
767 (MSG_SIZE_TYPE)(heap_stats->byte_code_bytes),
768 (MSG_SIZE_TYPE)(heap_stats->peak_byte_code_bytes),
769 (MSG_SIZE_TYPE)(heap_stats->string_bytes),
770 (MSG_SIZE_TYPE)(heap_stats->peak_string_bytes),
771 (MSG_SIZE_TYPE)(heap_stats->object_bytes),
772 (MSG_SIZE_TYPE)(heap_stats->peak_object_bytes),
773 (MSG_SIZE_TYPE)(heap_stats->property_bytes),
774 (MSG_SIZE_TYPE)(heap_stats->peak_property_bytes));
775 } /* jmem_heap_stats_print */
776
777 /**
778 * Initalize heap memory usage statistics account structure
779 */
780 void
jmem_heap_stat_init(void)781 jmem_heap_stat_init (void)
782 {
783 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
784 JERRY_CONTEXT (jmem_heap_stats).size = JMEM_HEAP_AREA_SIZE;
785 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
786 } /* jmem_heap_stat_init */
787
788 /**
789 * Account allocation
790 */
791 void
jmem_heap_stat_alloc(size_t size)792 jmem_heap_stat_alloc (size_t size) /**< Size of allocated block */
793 {
794 const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
795 const size_t waste_bytes = aligned_size - size;
796
797 jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats);
798
799 heap_stats->allocated_bytes += aligned_size;
800 heap_stats->waste_bytes += waste_bytes;
801
802 if (heap_stats->allocated_bytes > heap_stats->peak_allocated_bytes)
803 {
804 heap_stats->peak_allocated_bytes = heap_stats->allocated_bytes;
805 }
806
807 if (heap_stats->waste_bytes > heap_stats->peak_waste_bytes)
808 {
809 heap_stats->peak_waste_bytes = heap_stats->waste_bytes;
810 }
811 } /* jmem_heap_stat_alloc */
812
813 /**
814 * Account freeing
815 */
816 void
jmem_heap_stat_free(size_t size)817 jmem_heap_stat_free (size_t size) /**< Size of freed block */
818 {
819 const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
820 const size_t waste_bytes = aligned_size - size;
821
822 jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats);
823
824 heap_stats->allocated_bytes -= aligned_size;
825 heap_stats->waste_bytes -= waste_bytes;
826 } /* jmem_heap_stat_free */
827
828 #endif /* ENABLED (JERRY_MEM_STATS) */
829
830 /**
831 * @}
832 * @}
833 */
834