• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright JS Foundation and other contributors, http://js.foundation
2  *
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /**
17  * Heap implementation
18  */
19 
20 #include "ecma-gc.h"
21 #include "jcontext.h"
22 #include "jmem.h"
23 #include "jrt-bit-fields.h"
24 #include "jrt-libc-includes.h"
25 
JerryHeapMalloc(uint32_t size)26 void *JerryHeapMalloc(uint32_t size)
27 {
28    return NULL;
29 }
JerryHeapFree(void * addr)30 bool JerryHeapFree(void *addr)
31 {
32    return false;
33 }
34 
JerryHeapInit()35 void JerryHeapInit() {}
36 
37 #define JMEM_ALLOCATOR_INTERNAL
38 #include "jmem-allocator-internal.h"
39 
40 #ifdef _WIN32
41 #  ifdef _WIN64
42 #    define PRI_SIZET "lu"
43 #    define MSG_SIZE_TYPE unsigned long
44 #  else
45 #    define PRI_SIZET "zu"
46 #    define MSG_SIZE_TYPE size_t
47 #  endif
48 #else
49 #  define PRI_SIZET "zu"
50 #  define MSG_SIZE_TYPE size_t
51 #endif
52 
53 /** \addtogroup mem Memory allocation
54  * @{
55  *
56  * \addtogroup heap Heap
57  * @{
58  */
59 
60 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
61 /**
62  * End of list marker.
63  */
64 #define JMEM_HEAP_END_OF_LIST ((uint32_t) 0xffffffff)
65 
66 /**
67  * @{
68  */
69 #ifdef ECMA_VALUE_CAN_STORE_UINTPTR_VALUE_DIRECTLY
70 /* In this case we simply store the pointer, since it fits anyway. */
71 #define JMEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) (p))
72 #define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *) (u))
73 #else /* !ECMA_VALUE_CAN_STORE_UINTPTR_VALUE_DIRECTLY */
74 #define JMEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) ((uint8_t *) (p) - JERRY_HEAP_CONTEXT (area)))
75 #define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *) (JERRY_HEAP_CONTEXT (area) + (u)))
76 #endif /* ECMA_VALUE_CAN_STORE_UINTPTR_VALUE_DIRECTLY */
77 /**
78  * @}
79  */
80 
81 /**
82  * Get end of region
83  *
84  * @return pointer to the end of the region
85  */
86 static inline jmem_heap_free_t *  JERRY_ATTR_ALWAYS_INLINE JERRY_ATTR_PURE
jmem_heap_get_region_end(jmem_heap_free_t * curr_p)87 jmem_heap_get_region_end (jmem_heap_free_t *curr_p) /**< current region */
88 {
89   return (jmem_heap_free_t *) ((uint8_t *) curr_p + curr_p->size);
90 } /* jmem_heap_get_region_end */
91 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
92 
93 /**
94  * Startup initialization of heap
95  */
96 void
jmem_heap_init(void)97 jmem_heap_init (void)
98 {
99 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
100 #if !ENABLED (JERRY_CPOINTER_32_BIT)
101   /* the maximum heap size for 16bit compressed pointers should be 512K */
102   JERRY_ASSERT (((UINT16_MAX + 1) << JMEM_ALIGNMENT_LOG) >= JMEM_HEAP_SIZE);
103 #endif /* !ENABLED (JERRY_CPOINTER_32_BIT) */
104   JERRY_ASSERT ((uintptr_t) JERRY_HEAP_CONTEXT (area) % JMEM_ALIGNMENT == 0);
105 
106   JERRY_CONTEXT (jmem_heap_limit) = CONFIG_GC_LIMIT;
107 
108   jmem_heap_free_t *const region_p = (jmem_heap_free_t *) JERRY_HEAP_CONTEXT (area);
109 
110   region_p->size = JMEM_HEAP_AREA_SIZE;
111   region_p->next_offset = JMEM_HEAP_END_OF_LIST;
112 
113   JERRY_HEAP_CONTEXT (first).size = 0;
114   JERRY_HEAP_CONTEXT (first).next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (region_p);
115 
116   JERRY_CONTEXT (jmem_heap_list_skip_p) = &JERRY_HEAP_CONTEXT (first);
117 
118   JMEM_VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
119   JMEM_VALGRIND_NOACCESS_SPACE (JERRY_HEAP_CONTEXT (area), JMEM_HEAP_SIZE);
120 
121 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
122   JMEM_HEAP_STAT_INIT ();
123 } /* jmem_heap_init */
124 
125 /**
126  * Finalize heap
127  */
128 void
jmem_heap_finalize(void)129 jmem_heap_finalize (void)
130 {
131   JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_allocated_size) == 0);
132 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
133   JMEM_VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), JMEM_HEAP_SIZE);
134 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
135 } /* jmem_heap_finalize */
136 
137 /**
138  * Allocation of memory region.
139  *
140  * See also:
141  *          jmem_heap_alloc_block
142  *
143  * @return pointer to allocated memory block - if allocation is successful,
144  *         NULL - if there is not enough memory.
145  */
146 static void * JERRY_ATTR_HOT
jmem_heap_alloc(const size_t size)147 jmem_heap_alloc (const size_t size) /**< size of requested block */
148 {
149   if (size > 8 && size <= 24) {
150    void *data_space_p = JerryHeapMalloc (size);
151    if (data_space_p != NULL)
152    {
153      JERRY_CONTEXT (jmem_heap_allocated_size) += size;
154      while(JERRY_CONTEXT(jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit))
155      {
156        JERRY_CONTEXT (jmem_heap_limit) += CONFIG_GC_LIMIT;
157      }
158      return data_space_p;
159    }
160   }
161 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
162   /* Align size. */
163   const size_t required_size = ((size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT) * JMEM_ALIGNMENT;
164   jmem_heap_free_t *data_space_p = NULL;
165 
166   JMEM_VALGRIND_DEFINED_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
167 
168   /* Fast path for 8 byte chunks, first region is guaranteed to be sufficient. */
169   if (required_size == JMEM_ALIGNMENT
170       && JERRY_LIKELY (JERRY_HEAP_CONTEXT (first).next_offset != JMEM_HEAP_END_OF_LIST))
171   {
172     data_space_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset);
173     JERRY_ASSERT (jmem_is_heap_pointer (data_space_p));
174 
175     JMEM_VALGRIND_DEFINED_SPACE (data_space_p, sizeof (jmem_heap_free_t));
176     JERRY_CONTEXT (jmem_heap_allocated_size) += JMEM_ALIGNMENT;
177 
178     if (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit))
179     {
180       JERRY_CONTEXT (jmem_heap_limit) += CONFIG_GC_LIMIT;
181     }
182 
183     if (data_space_p->size == JMEM_ALIGNMENT)
184     {
185       JERRY_HEAP_CONTEXT (first).next_offset = data_space_p->next_offset;
186     }
187     else
188     {
189       JERRY_ASSERT (data_space_p->size > JMEM_ALIGNMENT);
190 
191       jmem_heap_free_t *remaining_p;
192       remaining_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset) + 1;
193 
194       JMEM_VALGRIND_DEFINED_SPACE (remaining_p, sizeof (jmem_heap_free_t));
195       remaining_p->size = data_space_p->size - JMEM_ALIGNMENT;
196       remaining_p->next_offset = data_space_p->next_offset;
197       JMEM_VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (jmem_heap_free_t));
198 
199       JERRY_HEAP_CONTEXT (first).next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p);
200     }
201 
202     JMEM_VALGRIND_NOACCESS_SPACE (data_space_p, sizeof (jmem_heap_free_t));
203 
204     if (JERRY_UNLIKELY (data_space_p == JERRY_CONTEXT (jmem_heap_list_skip_p)))
205     {
206       JERRY_CONTEXT (jmem_heap_list_skip_p) = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset);
207     }
208   }
209   /* Slow path for larger regions. */
210   else
211   {
212     uint32_t current_offset = JERRY_HEAP_CONTEXT (first).next_offset;
213     jmem_heap_free_t *prev_p = &JERRY_HEAP_CONTEXT (first);
214 
215     while (JERRY_LIKELY (current_offset != JMEM_HEAP_END_OF_LIST))
216     {
217       jmem_heap_free_t *current_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (current_offset);
218       JERRY_ASSERT (jmem_is_heap_pointer (current_p));
219       JMEM_VALGRIND_DEFINED_SPACE (current_p, sizeof (jmem_heap_free_t));
220 
221       const uint32_t next_offset = current_p->next_offset;
222       JERRY_ASSERT (next_offset == JMEM_HEAP_END_OF_LIST
223                     || jmem_is_heap_pointer (JMEM_HEAP_GET_ADDR_FROM_OFFSET (next_offset)));
224 
225       if (current_p->size >= required_size)
226       {
227         /* Region is sufficiently big, store address. */
228         data_space_p = current_p;
229 
230         /* Region was larger than necessary. */
231         if (current_p->size > required_size)
232         {
233           /* Get address of remaining space. */
234           jmem_heap_free_t *const remaining_p = (jmem_heap_free_t *) ((uint8_t *) current_p + required_size);
235 
236           /* Update metadata. */
237           JMEM_VALGRIND_DEFINED_SPACE (remaining_p, sizeof (jmem_heap_free_t));
238           remaining_p->size = current_p->size - (uint32_t) required_size;
239           remaining_p->next_offset = next_offset;
240           JMEM_VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (jmem_heap_free_t));
241 
242           /* Update list. */
243           JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
244           prev_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p);
245           JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
246         }
247         /* Block is an exact fit. */
248         else
249         {
250           /* Remove the region from the list. */
251           JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
252           prev_p->next_offset = next_offset;
253           JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
254         }
255 
256         JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p;
257 
258         /* Found enough space. */
259         JERRY_CONTEXT (jmem_heap_allocated_size) += required_size;
260 
261         while (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit))
262         {
263           JERRY_CONTEXT (jmem_heap_limit) += CONFIG_GC_LIMIT;
264         }
265 
266         break;
267       }
268 
269       JMEM_VALGRIND_NOACCESS_SPACE (current_p, sizeof (jmem_heap_free_t));
270       /* Next in list. */
271       prev_p = current_p;
272       current_offset = next_offset;
273     }
274   }
275 
276   JMEM_VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
277 
278   JERRY_ASSERT ((uintptr_t) data_space_p % JMEM_ALIGNMENT == 0);
279   JMEM_VALGRIND_MALLOCLIKE_SPACE (data_space_p, size);
280 
281   return (void *) data_space_p;
282 #else /* ENABLED (JERRY_SYSTEM_ALLOCATOR) */
283   JERRY_CONTEXT (jmem_heap_allocated_size) += size;
284 
285   while (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit))
286   {
287     JERRY_CONTEXT (jmem_heap_limit) += CONFIG_GC_LIMIT;
288   }
289 
290   return malloc (size);
291 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
292 } /* jmem_heap_alloc */
293 
294 /**
295  * Allocation of memory block, reclaiming memory if the request cannot be fulfilled.
296  *
297  * Note:
298  *    Each failed allocation attempt tries to reclaim memory with an increasing pressure,
299  *    up to 'max_pressure', or until a sufficient memory block is found. When JMEM_PRESSURE_FULL
300  *    is reached, the engine is terminated with ERR_OUT_OF_MEMORY. The `max_pressure` argument
301  *    can be used to limit the maximum pressure, and prevent the engine from terminating.
302  *
303  * @return NULL, if the required memory size is 0 or not enough memory
304  *         pointer to the allocated memory block, if allocation is successful
305  */
306 static void *
jmem_heap_gc_and_alloc_block(const size_t size,jmem_pressure_t max_pressure)307 jmem_heap_gc_and_alloc_block (const size_t size, /**< required memory size */
308                               jmem_pressure_t max_pressure) /**< pressure limit */
309 {
310   if (JERRY_UNLIKELY (size == 0))
311   {
312     return NULL;
313   }
314 
315   jmem_pressure_t pressure = JMEM_PRESSURE_NONE;
316 
317 #if !ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC)
318   if (JERRY_CONTEXT (jmem_heap_allocated_size) + size >= JERRY_CONTEXT (jmem_heap_limit))
319 #endif /* !ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC) */
320   {
321     pressure = JMEM_PRESSURE_LOW;
322     ecma_free_unused_memory (pressure);
323   }
324 
325   void *data_space_p = jmem_heap_alloc (size);
326 
327   /* cppcheck-suppress memleak */
328   while (JERRY_UNLIKELY (data_space_p == NULL) && JERRY_LIKELY (pressure < max_pressure))
329   {
330     pressure++;
331     ecma_free_unused_memory (pressure);
332     data_space_p = jmem_heap_alloc (size);
333   }
334 
335   return data_space_p;
336 } /* jmem_heap_gc_and_alloc_block */
337 
338 /**
339  * Internal method for allocating a memory block.
340  */
341 inline void * JERRY_ATTR_HOT JERRY_ATTR_ALWAYS_INLINE
jmem_heap_alloc_block_internal(const size_t size)342 jmem_heap_alloc_block_internal (const size_t size) /**< required memory size */
343 {
344   return jmem_heap_gc_and_alloc_block (size, JMEM_PRESSURE_FULL);
345 } /* jmem_heap_alloc_block_internal */
346 
347 /**
348  * Allocation of memory block, reclaiming unused memory if there is not enough.
349  *
350  * Note:
351  *      If a sufficiently sized block can't be found, the engine will be terminated with ERR_OUT_OF_MEMORY.
352  *
353  * @return NULL, if the required memory is 0
354  *         pointer to allocated memory block, otherwise
355  */
356 extern inline void * JERRY_ATTR_HOT JERRY_ATTR_ALWAYS_INLINE
jmem_heap_alloc_block(const size_t size)357 jmem_heap_alloc_block (const size_t size) /**< required memory size */
358 {
359   void *block_p = jmem_heap_gc_and_alloc_block (size, JMEM_PRESSURE_FULL);
360   JMEM_HEAP_STAT_ALLOC (size);
361   return block_p;
362 } /* jmem_heap_alloc_block */
363 
364 /**
365  * Allocation of memory block, reclaiming unused memory if there is not enough.
366  *
367  * Note:
368  *      If a sufficiently sized block can't be found, NULL will be returned.
369  *
370  * @return NULL, if the required memory size is 0
371  *         also NULL, if the allocation has failed
372  *         pointer to the allocated memory block, otherwise
373  */
374 inline void * JERRY_ATTR_HOT JERRY_ATTR_ALWAYS_INLINE
jmem_heap_alloc_block_null_on_error(const size_t size)375 jmem_heap_alloc_block_null_on_error (const size_t size) /**< required memory size */
376 {
377   void *block_p = jmem_heap_gc_and_alloc_block (size, JMEM_PRESSURE_HIGH);
378 
379 #if ENABLED (JERRY_MEM_STATS)
380   if (block_p != NULL)
381   {
382     JMEM_HEAP_STAT_ALLOC (size);
383   }
384 #endif /* ENABLED (JERRY_MEM_STATS) */
385 
386   return block_p;
387 } /* jmem_heap_alloc_block_null_on_error */
388 
389 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
390 /**
391  * Finds the block in the free block list which preceeds the argument block
392  *
393  * @return pointer to the preceeding block
394  */
395 static jmem_heap_free_t *
jmem_heap_find_prev(const jmem_heap_free_t * const block_p)396 jmem_heap_find_prev (const jmem_heap_free_t * const block_p) /**< which memory block's predecessor we're looking for */
397 {
398   const jmem_heap_free_t *prev_p;
399 
400   if (block_p > JERRY_CONTEXT (jmem_heap_list_skip_p))
401   {
402     prev_p = JERRY_CONTEXT (jmem_heap_list_skip_p);
403   }
404   else
405   {
406     prev_p = &JERRY_HEAP_CONTEXT (first);
407   }
408 
409   JERRY_ASSERT (jmem_is_heap_pointer (block_p));
410   const uint32_t block_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (block_p);
411 
412   JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
413   /* Find position of region in the list. */
414   while (prev_p->next_offset < block_offset)
415   {
416     const jmem_heap_free_t * const next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset);
417     JERRY_ASSERT (jmem_is_heap_pointer (next_p));
418 
419     JMEM_VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t));
420     JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
421     prev_p = next_p;
422   }
423 
424   JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
425   return (jmem_heap_free_t *) prev_p;
426 } /* jmem_heap_find_prev */
427 
428 /**
429  * Inserts the block into the free chain after a specified block.
430  *
431  * Note:
432  *     'jmem_heap_find_prev' can and should be used to find the previous free block
433  */
434 static void
jmem_heap_insert_block(jmem_heap_free_t * block_p,jmem_heap_free_t * prev_p,const size_t size)435 jmem_heap_insert_block (jmem_heap_free_t *block_p, /**< block to insert */
436                         jmem_heap_free_t *prev_p, /**< the free block after which to insert 'block_p' */
437                         const size_t size) /**< size of the inserted block */
438 {
439   JERRY_ASSERT ((uintptr_t) block_p % JMEM_ALIGNMENT == 0);
440   JERRY_ASSERT (size % JMEM_ALIGNMENT == 0);
441 
442   JMEM_VALGRIND_NOACCESS_SPACE (block_p, size);
443 
444   JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
445   jmem_heap_free_t *next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset);
446   JMEM_VALGRIND_DEFINED_SPACE (block_p, sizeof (jmem_heap_free_t));
447   JMEM_VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t));
448 
449   const uint32_t block_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (block_p);
450 
451   /* Update prev. */
452   if (jmem_heap_get_region_end (prev_p) == block_p)
453   {
454     /* Can be merged. */
455     prev_p->size += (uint32_t) size;
456     JMEM_VALGRIND_NOACCESS_SPACE (block_p, sizeof (jmem_heap_free_t));
457     block_p = prev_p;
458   }
459   else
460   {
461     block_p->size = (uint32_t) size;
462     prev_p->next_offset = block_offset;
463   }
464 
465   /* Update next. */
466   if (jmem_heap_get_region_end (block_p) == next_p)
467   {
468     /* Can be merged. */
469     block_p->size += next_p->size;
470     block_p->next_offset = next_p->next_offset;
471   }
472   else
473   {
474     block_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (next_p);
475   }
476 
477   JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p;
478 
479   JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
480   JMEM_VALGRIND_NOACCESS_SPACE (block_p, sizeof (jmem_heap_free_t));
481   JMEM_VALGRIND_NOACCESS_SPACE (next_p, sizeof (jmem_heap_free_t));
482 } /* jmem_heap_insert_block */
483 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
484 
485 /**
486  * Internal method for freeing a memory block.
487  */
488 void JERRY_ATTR_HOT
jmem_heap_free_block_internal(void * ptr,const size_t size)489 jmem_heap_free_block_internal (void *ptr, /**< pointer to beginning of data space of the block */
490                                const size_t size) /**< size of allocated region */
491 {
492   JERRY_ASSERT (size > 0);
493   JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size));
494   JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_allocated_size) > 0);
495 
496   if (JerryHeapFree(ptr)) {
497       JERRY_CONTEXT (jmem_heap_allocated_size) -= size;
498 
499       while (JERRY_CONTEXT (jmem_heap_allocated_size) + CONFIG_GC_LIMIT <= JERRY_CONTEXT(jmem_heap_limit))
500       {
501         JERRY_CONTEXT(jmem_heap_limit) -= CONFIG_GC_LIMIT;
502       }
503 
504       JERRY_ASSERT (JERRY_CONTEXT(jmem_heap_limit) >= JERRY_CONTEXT(jmem_heap_allocated_size));
505       return;
506   }
507 
508 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
509   /* checking that ptr points to the heap */
510   JERRY_ASSERT (jmem_is_heap_pointer (ptr));
511   JERRY_ASSERT ((uintptr_t) ptr % JMEM_ALIGNMENT == 0);
512 
513   const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
514 
515   jmem_heap_free_t *const block_p = (jmem_heap_free_t *) ptr;
516   jmem_heap_free_t *const prev_p = jmem_heap_find_prev (block_p);
517   jmem_heap_insert_block (block_p, prev_p, aligned_size);
518 
519   JERRY_CONTEXT (jmem_heap_allocated_size) -= aligned_size;
520 
521   JMEM_VALGRIND_FREELIKE_SPACE (ptr);
522 #else /* ENABLED (JERRY_SYSTEM_ALLOCATOR) */
523   JERRY_CONTEXT (jmem_heap_allocated_size) -= size;
524   free (ptr);
525 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
526   while (JERRY_CONTEXT (jmem_heap_allocated_size) + CONFIG_GC_LIMIT <= JERRY_CONTEXT (jmem_heap_limit))
527   {
528     JERRY_CONTEXT (jmem_heap_limit) -= CONFIG_GC_LIMIT;
529   }
530 
531   JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size));
532 } /* jmem_heap_free_block_internal */
533 
534 /**
535  * Reallocates the memory region pointed to by 'ptr', changing the size of the allocated region.
536  *
537  * @return pointer to the reallocated region
538  */
539 void * JERRY_ATTR_HOT
jmem_heap_realloc_block(void * ptr,const size_t old_size,const size_t new_size)540 jmem_heap_realloc_block (void *ptr, /**< memory region to reallocate */
541                          const size_t old_size, /**< current size of the region */
542                          const size_t new_size) /**< desired new size */
543 {
544   const size_t required_size = new_size - old_size;
545   if (JERRY_CONTEXT (jmem_heap_allocated_size) + required_size >= JERRY_CONTEXT (jmem_heap_limit))
546   {
547     ecma_free_unused_memory (JMEM_PRESSURE_LOW);
548   }
549 
550   void *newBuffer = jmem_heap_alloc_block(new_size);
551   size_t copySize = (old_size > new_size) ? new_size : old_size;
552   if (newBuffer) {
553     memcpy(newBuffer, ptr, copySize);
554     jmem_heap_free_block(ptr, old_size);
555     return newBuffer;
556   }else {
557     jmem_heap_free_block(ptr, old_size);
558     return NULL;
559   }
560 } /* jmem_heap_realloc_block */
561 
562 /**
563  * Free memory block
564  */
565 extern inline void JERRY_ATTR_HOT JERRY_ATTR_ALWAYS_INLINE
jmem_heap_free_block(void * ptr,const size_t size)566 jmem_heap_free_block (void *ptr, /**< pointer to beginning of data space of the block */
567                       const size_t size) /**< size of allocated region */
568 {
569   jmem_heap_free_block_internal (ptr, size);
570   JMEM_HEAP_STAT_FREE (size);
571   return;
572 } /* jmem_heap_free_block */
573 
574 #ifndef JERRY_NDEBUG
575 /**
576  * Check whether the pointer points to the heap
577  *
578  * Note:
579  *      the routine should be used only for assertion checks
580  *
581  * @return true - if pointer points to the heap,
582  *         false - otherwise
583  */
584 bool
jmem_is_heap_pointer(const void * pointer)585 jmem_is_heap_pointer (const void *pointer) /**< pointer */
586 {
587 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
588   return ((uint8_t *) pointer >= JERRY_HEAP_CONTEXT (area)
589           && (uint8_t *) pointer <= (JERRY_HEAP_CONTEXT (area) + JMEM_HEAP_AREA_SIZE));
590 #else /* ENABLED (JERRY_SYSTEM_ALLOCATOR) */
591   JERRY_UNUSED (pointer);
592   return true;
593 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
594 } /* jmem_is_heap_pointer */
595 #endif /* !JERRY_NDEBUG */
596 
597 #if ENABLED (JERRY_MEM_STATS)
598 /**
599  * Get heap memory usage statistics
600  */
601 void
jmem_heap_get_stats(jmem_heap_stats_t * out_heap_stats_p)602 jmem_heap_get_stats (jmem_heap_stats_t *out_heap_stats_p) /**< [out] heap stats */
603 {
604   JERRY_ASSERT (out_heap_stats_p != NULL);
605 
606   *out_heap_stats_p = JERRY_CONTEXT (jmem_heap_stats);
607 } /* jmem_heap_get_stats */
608 
609 /**
610  * Print heap memory usage statistics
611  */
612 void
jmem_heap_stats_print(void)613 jmem_heap_stats_print (void)
614 {
615   jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats);
616 
617   JERRY_DEBUG_MSG ("Heap stats:\n");
618 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
619   JERRY_DEBUG_MSG ("  Heap size = %"PRI_SIZET" bytes\n",
620                    (MSG_SIZE_TYPE)(heap_stats->size));
621 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
622   JERRY_DEBUG_MSG ("  Allocated = %"PRI_SIZET" bytes\n"
623                    "  Peak allocated = %"PRI_SIZET" bytes\n"
624                    "  Waste = %"PRI_SIZET" bytes\n"
625                    "  Peak waste = %"PRI_SIZET" bytes\n"
626                    "  Allocated byte code data = %"PRI_SIZET" bytes\n"
627                    "  Peak allocated byte code data = %"PRI_SIZET" bytes\n"
628                    "  Allocated string data = %"PRI_SIZET" bytes\n"
629                    "  Peak allocated string data = %"PRI_SIZET" bytes\n"
630                    "  Allocated object data = %"PRI_SIZET" bytes\n"
631                    "  Peak allocated object data = %"PRI_SIZET" bytes\n"
632                    "  Allocated property data = %"PRI_SIZET" bytes\n"
633                    "  Peak allocated property data = %"PRI_SIZET" bytes\n",
634                    (MSG_SIZE_TYPE)(heap_stats->allocated_bytes),
635                    (MSG_SIZE_TYPE)(heap_stats->peak_allocated_bytes),
636                    (MSG_SIZE_TYPE)(heap_stats->waste_bytes),
637                    (MSG_SIZE_TYPE)(heap_stats->peak_waste_bytes),
638                    (MSG_SIZE_TYPE)(heap_stats->byte_code_bytes),
639                    (MSG_SIZE_TYPE)(heap_stats->peak_byte_code_bytes),
640                    (MSG_SIZE_TYPE)(heap_stats->string_bytes),
641                    (MSG_SIZE_TYPE)(heap_stats->peak_string_bytes),
642                    (MSG_SIZE_TYPE)(heap_stats->object_bytes),
643                    (MSG_SIZE_TYPE)(heap_stats->peak_object_bytes),
644                    (MSG_SIZE_TYPE)(heap_stats->property_bytes),
645                    (MSG_SIZE_TYPE)(heap_stats->peak_property_bytes));
646 } /* jmem_heap_stats_print */
647 
648 /**
649  * Initalize heap memory usage statistics account structure
650  */
651 void
jmem_heap_stat_init(void)652 jmem_heap_stat_init (void)
653 {
654 #if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
655   JERRY_CONTEXT (jmem_heap_stats).size = JMEM_HEAP_AREA_SIZE;
656 #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
657 } /* jmem_heap_stat_init */
658 
659 /**
660  * Account allocation
661  */
662 void
jmem_heap_stat_alloc(size_t size)663 jmem_heap_stat_alloc (size_t size) /**< Size of allocated block */
664 {
665   const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
666   const size_t waste_bytes = aligned_size - size;
667 
668   jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats);
669 
670   heap_stats->allocated_bytes += aligned_size;
671   heap_stats->waste_bytes += waste_bytes;
672 
673   if (heap_stats->allocated_bytes > heap_stats->peak_allocated_bytes)
674   {
675     heap_stats->peak_allocated_bytes = heap_stats->allocated_bytes;
676   }
677 
678   if (heap_stats->waste_bytes > heap_stats->peak_waste_bytes)
679   {
680     heap_stats->peak_waste_bytes = heap_stats->waste_bytes;
681   }
682 } /* jmem_heap_stat_alloc */
683 
684 /**
685  * Account freeing
686  */
687 void
jmem_heap_stat_free(size_t size)688 jmem_heap_stat_free (size_t size) /**< Size of freed block */
689 {
690   const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
691   const size_t waste_bytes = aligned_size - size;
692 
693   jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats);
694 
695   heap_stats->allocated_bytes -= aligned_size;
696   heap_stats->waste_bytes -= waste_bytes;
697 } /* jmem_heap_stat_free */
698 
699 #endif /* ENABLED (JERRY_MEM_STATS) */
700 
701 /**
702  * @}
703  * @}
704  */
705