• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* ----------------------------------------------------------------------------
2 Copyright (c) 2018-2022, Microsoft Research, Daan Leijen
3 This is free software; you can redistribute it and/or modify it under the
4 terms of the MIT license. A copy of the license can be found in the file
5 "LICENSE" at the root of this distribution.
6 -----------------------------------------------------------------------------*/
7 #ifndef _DEFAULT_SOURCE
8 #define _DEFAULT_SOURCE   // for realpath() on Linux
9 #endif
10 
11 #include "mimalloc.h"
12 #include "mimalloc/internal.h"
13 #include "mimalloc/atomic.h"
14 #include "mimalloc/prim.h"   // _mi_prim_thread_id()
15 
16 #include <string.h>      // memset, strlen (for mi_strdup)
17 #include <stdlib.h>      // malloc, abort
18 
19 #define _ZSt15get_new_handlerv _Py__ZSt15get_new_handlerv
20 
21 #define MI_IN_ALLOC_C
22 #include "alloc-override.c"
23 #undef MI_IN_ALLOC_C
24 
25 // ------------------------------------------------------
26 // Allocation
27 // ------------------------------------------------------
28 
29 #if (MI_DEBUG>0)
mi_debug_fill(mi_page_t * page,mi_block_t * block,int c,size_t size)30 static inline void mi_debug_fill(mi_page_t* page, mi_block_t* block, int c, size_t size) {
31   size_t offset = (size_t)page->debug_offset;
32   if (offset < size) {
33     memset((char*)block + offset, c, size - offset);
34   }
35 }
36 #endif
37 
38 // Fast allocation in a page: just pop from the free list.
39 // Fall back to generic allocation only if the list is empty.
_mi_page_malloc(mi_heap_t * heap,mi_page_t * page,size_t size,bool zero)40 extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept {
41   mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
42   mi_block_t* const block = page->free;
43   if mi_unlikely(block == NULL) {
44     return _mi_malloc_generic(heap, size, zero, 0);
45   }
46   mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
47   // pop from the free list
48   page->used++;
49   page->free = mi_block_next(page, block);
50   mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
51   #if MI_DEBUG>3
52   if (page->free_is_zero) {
53     mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block)));
54   }
55   #endif
56 
57   // allow use of the block internally
58   // note: when tracking we need to avoid ever touching the MI_PADDING since
59   // that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc/track.h`)
60   mi_track_mem_undefined(block, mi_page_usable_block_size(page));
61 
62   // zero the block? note: we need to zero the full block size (issue #63)
63   if mi_unlikely(zero) {
64     mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
65     mi_assert_internal(page->xblock_size >= MI_PADDING_SIZE);
66     if (page->free_is_zero) {
67       block->next = 0;
68       mi_track_mem_defined(block, page->xblock_size - MI_PADDING_SIZE);
69     }
70     else {
71       _mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE);
72     }
73   }
74 
75 #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
76   if (!zero && !mi_page_is_huge(page)) {
77     mi_debug_fill(page, block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page));
78   }
79 #elif (MI_SECURE!=0)
80   if (!zero) { block->next = 0; } // don't leak internal data
81 #endif
82 
83 #if (MI_STAT>0)
84   const size_t bsize = mi_page_usable_block_size(page);
85   if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
86     mi_heap_stat_increase(heap, normal, bsize);
87     mi_heap_stat_counter_increase(heap, normal_count, 1);
88 #if (MI_STAT>1)
89     const size_t bin = _mi_bin(bsize);
90     mi_heap_stat_increase(heap, normal_bins[bin], 1);
91 #endif
92   }
93 #endif
94 
95 #if MI_PADDING // && !MI_TRACK_ENABLED
96   mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
97   ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
98   #if (MI_DEBUG>=2)
99   mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
100   #endif
101   mi_track_mem_defined(padding,sizeof(mi_padding_t));  // note: re-enable since mi_page_usable_block_size may set noaccess
102   padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
103   padding->delta  = (uint32_t)(delta);
104   #if MI_PADDING_CHECK
105   if (!mi_page_is_huge(page)) {
106     uint8_t* fill = (uint8_t*)padding - delta;
107     const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
108     for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
109   }
110   #endif
111 #endif
112 
113   return block;
114 }
115 
mi_heap_malloc_small_zero(mi_heap_t * heap,size_t size,bool zero)116 static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
117   mi_assert(heap != NULL);
118   #if MI_DEBUG
119   const uintptr_t tid = _mi_thread_id();
120   mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local
121   #endif
122   mi_assert(size <= MI_SMALL_SIZE_MAX);
123   #if (MI_PADDING)
124   if (size == 0) { size = sizeof(void*); }
125   #endif
126   mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
127   void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
128   mi_track_malloc(p,size,zero);
129   #if MI_STAT>1
130   if (p != NULL) {
131     if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
132     mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
133   }
134   #endif
135   #if MI_DEBUG>3
136   if (p != NULL && zero) {
137     mi_assert_expensive(mi_mem_is_zero(p, size));
138   }
139   #endif
140   return p;
141 }
142 
143 // allocate a small block
mi_heap_malloc_small(mi_heap_t * heap,size_t size)144 mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
145   return mi_heap_malloc_small_zero(heap, size, false);
146 }
147 
mi_malloc_small(size_t size)148 mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept {
149   return mi_heap_malloc_small(mi_prim_get_default_heap(), size);
150 }
151 
152 // The main allocation function
_mi_heap_malloc_zero_ex(mi_heap_t * heap,size_t size,bool zero,size_t huge_alignment)153 extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept {
154   if mi_likely(size <= MI_SMALL_SIZE_MAX) {
155     mi_assert_internal(huge_alignment == 0);
156     return mi_heap_malloc_small_zero(heap, size, zero);
157   }
158   else {
159     mi_assert(heap!=NULL);
160     mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id());   // heaps are thread local
161     void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment);  // note: size can overflow but it is detected in malloc_generic
162     mi_track_malloc(p,size,zero);
163     #if MI_STAT>1
164     if (p != NULL) {
165       if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
166       mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
167     }
168     #endif
169     #if MI_DEBUG>3
170     if (p != NULL && zero) {
171       mi_assert_expensive(mi_mem_is_zero(p, size));
172     }
173     #endif
174     return p;
175   }
176 }
177 
_mi_heap_malloc_zero(mi_heap_t * heap,size_t size,bool zero)178 extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
179   return _mi_heap_malloc_zero_ex(heap, size, zero, 0);
180 }
181 
mi_heap_malloc(mi_heap_t * heap,size_t size)182 mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
183   return _mi_heap_malloc_zero(heap, size, false);
184 }
185 
mi_malloc(size_t size)186 mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
187   return mi_heap_malloc(mi_prim_get_default_heap(), size);
188 }
189 
190 // zero initialized small block
mi_zalloc_small(size_t size)191 mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
192   return mi_heap_malloc_small_zero(mi_prim_get_default_heap(), size, true);
193 }
194 
mi_heap_zalloc(mi_heap_t * heap,size_t size)195 mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
196   return _mi_heap_malloc_zero(heap, size, true);
197 }
198 
mi_zalloc(size_t size)199 mi_decl_nodiscard mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
200   return mi_heap_zalloc(mi_prim_get_default_heap(),size);
201 }
202 
203 
204 // ------------------------------------------------------
205 // Check for double free in secure and debug mode
206 // This is somewhat expensive so only enabled for secure mode 4
207 // ------------------------------------------------------
208 
209 #if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0))
210 // linear check if the free list contains a specific element
mi_list_contains(const mi_page_t * page,const mi_block_t * list,const mi_block_t * elem)211 static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) {
212   while (list != NULL) {
213     if (elem==list) return true;
214     list = mi_block_next(page, list);
215   }
216   return false;
217 }
218 
mi_check_is_double_freex(const mi_page_t * page,const mi_block_t * block)219 static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) {
220   // The decoded value is in the same page (or NULL).
221   // Walk the free lists to verify positively if it is already freed
222   if (mi_list_contains(page, page->free, block) ||
223       mi_list_contains(page, page->local_free, block) ||
224       mi_list_contains(page, mi_page_thread_free(page), block))
225   {
226     _mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page));
227     return true;
228   }
229   return false;
230 }
231 
232 #define mi_track_page(page,access)  { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); }
233 
mi_check_is_double_free(const mi_page_t * page,const mi_block_t * block)234 static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
235   bool is_double_free = false;
236   mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field
237   if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 &&  // quick check: aligned pointer?
238       (n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
239   {
240     // Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free?
241     // (continue in separate function to improve code generation)
242     is_double_free = mi_check_is_double_freex(page, block);
243   }
244   return is_double_free;
245 }
246 #else
mi_check_is_double_free(const mi_page_t * page,const mi_block_t * block)247 static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
248   MI_UNUSED(page);
249   MI_UNUSED(block);
250   return false;
251 }
252 #endif
253 
254 // ---------------------------------------------------------------------------
255 // Check for heap block overflow by setting up padding at the end of the block
256 // ---------------------------------------------------------------------------
257 
258 #if MI_PADDING // && !MI_TRACK_ENABLED
mi_page_decode_padding(const mi_page_t * page,const mi_block_t * block,size_t * delta,size_t * bsize)259 static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
260   *bsize = mi_page_usable_block_size(page);
261   const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
262   mi_track_mem_defined(padding,sizeof(mi_padding_t));
263   *delta = padding->delta;
264   uint32_t canary = padding->canary;
265   uintptr_t keys[2];
266   keys[0] = page->keys[0];
267   keys[1] = page->keys[1];
268   bool ok = ((uint32_t)mi_ptr_encode(page,block,keys) == canary && *delta <= *bsize);
269   mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
270   return ok;
271 }
272 
273 // Return the exact usable size of a block.
mi_page_usable_size_of(const mi_page_t * page,const mi_block_t * block)274 static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
275   size_t bsize;
276   size_t delta;
277   bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
278   mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
279   return (ok ? bsize - delta : 0);
280 }
281 
282 // When a non-thread-local block is freed, it becomes part of the thread delayed free
283 // list that is freed later by the owning heap. If the exact usable size is too small to
284 // contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
285 // so it will later not trigger an overflow error in `mi_free_block`.
_mi_padding_shrink(const mi_page_t * page,const mi_block_t * block,const size_t min_size)286 void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
287   size_t bsize;
288   size_t delta;
289   bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
290   mi_assert_internal(ok);
291   if (!ok || (bsize - delta) >= min_size) return;  // usually already enough space
292   mi_assert_internal(bsize >= min_size);
293   if (bsize < min_size) return;  // should never happen
294   size_t new_delta = (bsize - min_size);
295   mi_assert_internal(new_delta < bsize);
296   mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);
297   mi_track_mem_defined(padding,sizeof(mi_padding_t));
298   padding->delta = (uint32_t)new_delta;
299   mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
300 }
301 #else
mi_page_usable_size_of(const mi_page_t * page,const mi_block_t * block)302 static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
303   MI_UNUSED(block);
304   return mi_page_usable_block_size(page);
305 }
306 
_mi_padding_shrink(const mi_page_t * page,const mi_block_t * block,const size_t min_size)307 void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
308   MI_UNUSED(page);
309   MI_UNUSED(block);
310   MI_UNUSED(min_size);
311 }
312 #endif
313 
314 #if MI_PADDING && MI_PADDING_CHECK
315 
mi_verify_padding(const mi_page_t * page,const mi_block_t * block,size_t * size,size_t * wrong)316 static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
317   size_t bsize;
318   size_t delta;
319   bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
320   *size = *wrong = bsize;
321   if (!ok) return false;
322   mi_assert_internal(bsize >= delta);
323   *size = bsize - delta;
324   if (!mi_page_is_huge(page)) {
325     uint8_t* fill = (uint8_t*)block + bsize - delta;
326     const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes
327     mi_track_mem_defined(fill, maxpad);
328     for (size_t i = 0; i < maxpad; i++) {
329       if (fill[i] != MI_DEBUG_PADDING) {
330         *wrong = bsize - delta + i;
331         ok = false;
332         break;
333       }
334     }
335     mi_track_mem_noaccess(fill, maxpad);
336   }
337   return ok;
338 }
339 
mi_check_padding(const mi_page_t * page,const mi_block_t * block)340 static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
341   size_t size;
342   size_t wrong;
343   if (!mi_verify_padding(page,block,&size,&wrong)) {
344     _mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong );
345   }
346 }
347 
348 #else
349 
mi_check_padding(const mi_page_t * page,const mi_block_t * block)350 static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
351   MI_UNUSED(page);
352   MI_UNUSED(block);
353 }
354 
355 #endif
356 
357 // only maintain stats for smaller objects if requested
358 #if (MI_STAT>0)
mi_stat_free(const mi_page_t * page,const mi_block_t * block)359 static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
360   #if (MI_STAT < 2)
361   MI_UNUSED(block);
362   #endif
363   mi_heap_t* const heap = mi_heap_get_default();
364   const size_t bsize = mi_page_usable_block_size(page);
365   #if (MI_STAT>1)
366   const size_t usize = mi_page_usable_size_of(page, block);
367   mi_heap_stat_decrease(heap, malloc, usize);
368   #endif
369   if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
370     mi_heap_stat_decrease(heap, normal, bsize);
371     #if (MI_STAT > 1)
372     mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1);
373     #endif
374   }
375   else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
376     mi_heap_stat_decrease(heap, large, bsize);
377   }
378   else {
379     mi_heap_stat_decrease(heap, huge, bsize);
380   }
381 }
382 #else
mi_stat_free(const mi_page_t * page,const mi_block_t * block)383 static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
384   MI_UNUSED(page); MI_UNUSED(block);
385 }
386 #endif
387 
388 #if MI_HUGE_PAGE_ABANDON
389 #if (MI_STAT>0)
390 // maintain stats for huge objects
mi_stat_huge_free(const mi_page_t * page)391 static void mi_stat_huge_free(const mi_page_t* page) {
392   mi_heap_t* const heap = mi_heap_get_default();
393   const size_t bsize = mi_page_block_size(page); // to match stats in `page.c:mi_page_huge_alloc`
394   if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
395     mi_heap_stat_decrease(heap, large, bsize);
396   }
397   else {
398     mi_heap_stat_decrease(heap, huge, bsize);
399   }
400 }
401 #else
mi_stat_huge_free(const mi_page_t * page)402 static void mi_stat_huge_free(const mi_page_t* page) {
403   MI_UNUSED(page);
404 }
405 #endif
406 #endif
407 
408 // ------------------------------------------------------
409 // Free
410 // ------------------------------------------------------
411 
412 // multi-threaded free (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)
_mi_free_block_mt(mi_page_t * page,mi_block_t * block)413 static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
414 {
415   // The padding check may access the non-thread-owned page for the key values.
416   // that is safe as these are constant and the page won't be freed (as the block is not freed yet).
417   mi_check_padding(page, block);
418   _mi_padding_shrink(page, block, sizeof(mi_block_t));       // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
419 
420   // huge page segments are always abandoned and can be freed immediately
421   mi_segment_t* segment = _mi_page_segment(page);
422   if (segment->kind == MI_SEGMENT_HUGE) {
423     #if MI_HUGE_PAGE_ABANDON
424     // huge page segments are always abandoned and can be freed immediately
425     mi_stat_huge_free(page);
426     _mi_segment_huge_page_free(segment, page, block);
427     return;
428     #else
429     // huge pages are special as they occupy the entire segment
430     // as these are large we reset the memory occupied by the page so it is available to other threads
431     // (as the owning thread needs to actually free the memory later).
432     _mi_segment_huge_page_reset(segment, page, block);
433     #endif
434   }
435 
436   #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN        // note: when tracking, cannot use mi_usable_size with multi-threading
437   if (segment->kind != MI_SEGMENT_HUGE) {                  // not for huge segments as we just reset the content
438     mi_debug_fill(page, block, MI_DEBUG_FREED, mi_usable_size(block));
439   }
440   #endif
441 
442   // Try to put the block on either the page-local thread free list, or the heap delayed free list.
443   mi_thread_free_t tfreex;
444   bool use_delayed;
445   mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
446   do {
447     use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE);
448     if mi_unlikely(use_delayed) {
449       // unlikely: this only happens on the first concurrent free in a page that is in the full list
450       tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING);
451     }
452     else {
453       // usual: directly add to page thread_free list
454       mi_block_set_next(page, block, mi_tf_block(tfree));
455       tfreex = mi_tf_set_block(tfree,block);
456     }
457   } while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
458 
459   if mi_unlikely(use_delayed) {
460     // racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
461     mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page);
462     mi_assert_internal(heap != NULL);
463     if (heap != NULL) {
464       // add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity)
465       mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);
466       do {
467         mi_block_set_nextx(heap,block,dfree, heap->keys);
468       } while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block));
469     }
470 
471     // and reset the MI_DELAYED_FREEING flag
472     tfree = mi_atomic_load_relaxed(&page->xthread_free);
473     do {
474       tfreex = tfree;
475       mi_assert_internal(mi_tf_delayed(tfree) == MI_DELAYED_FREEING);
476       tfreex = mi_tf_set_delayed(tfree,MI_NO_DELAYED_FREE);
477     } while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
478   }
479 }
480 
481 // regular free
_mi_free_block(mi_page_t * page,bool local,mi_block_t * block)482 static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block)
483 {
484   // and push it on the free list
485   //const size_t bsize = mi_page_block_size(page);
486   if mi_likely(local) {
487     // owning thread can free a block directly
488     if mi_unlikely(mi_check_is_double_free(page, block)) return;
489     mi_check_padding(page, block);
490     #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
491     if (!mi_page_is_huge(page)) {   // huge page content may be already decommitted
492       mi_debug_fill(page, block, MI_DEBUG_FREED, mi_page_block_size(page));
493     }
494     #endif
495     mi_block_set_next(page, block, page->local_free);
496     page->local_free = block;
497     page->used--;
498     if mi_unlikely(mi_page_all_free(page)) {
499       _mi_page_retire(page);
500     }
501     else if mi_unlikely(mi_page_is_in_full(page)) {
502       _mi_page_unfull(page);
503     }
504   }
505   else {
506     _mi_free_block_mt(page,block);
507   }
508 }
509 
510 
511 // Adjust a block that was allocated aligned, to the actual start of the block in the page.
_mi_page_ptr_unalign(const mi_segment_t * segment,const mi_page_t * page,const void * p)512 mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p) {
513   mi_assert_internal(page!=NULL && p!=NULL);
514   const size_t diff   = (uint8_t*)p - _mi_page_start(segment, page, NULL);
515   const size_t adjust = (diff % mi_page_block_size(page));
516   return (mi_block_t*)((uintptr_t)p - adjust);
517 }
518 
519 
_mi_free_generic(const mi_segment_t * segment,mi_page_t * page,bool is_local,void * p)520 void mi_decl_noinline _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept {
521   mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
522   mi_stat_free(page, block);    // stat_free may access the padding
523   mi_track_free_size(block, mi_page_usable_size_of(page,block));
524   _mi_free_block(page, is_local, block);
525 }
526 
527 // Get the segment data belonging to a pointer
528 // This is just a single `and` in assembly but does further checks in debug mode
529 // (and secure mode) if this was a valid pointer.
mi_checked_ptr_segment(const void * p,const char * msg)530 static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* msg)
531 {
532   MI_UNUSED(msg);
533   mi_assert(p != NULL);
534 
535 #if (MI_DEBUG>0)
536   if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) {
537     _mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
538     return NULL;
539   }
540 #endif
541 
542   mi_segment_t* const segment = _mi_ptr_segment(p);
543   mi_assert_internal(segment != NULL);
544 
545 #if 0 && (MI_DEBUG>0)
546   if mi_unlikely(!mi_is_in_heap_region(p)) {
547   #if (MI_INTPTR_SIZE == 8 && defined(__linux__))
548     if (((uintptr_t)p >> 40) != 0x7F) { // linux tends to align large blocks above 0x7F000000000 (issue #640)
549   #else
550     {
551   #endif
552       _mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
553         "(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
554       if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) {
555         _mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
556       }
557     }
558   }
559 #endif
560 #if (MI_DEBUG>0 || MI_SECURE>=4)
561   if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) {
562     _mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);
563     return NULL;
564   }
565 #endif
566 
567   return segment;
568 }
569 
570 // Free a block
571 // fast path written carefully to prevent spilling on the stack
572 void mi_free(void* p) mi_attr_noexcept
573 {
574   if mi_unlikely(p == NULL) return;
575   mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
576   const bool          is_local= (_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id));
577   mi_page_t* const    page    = _mi_segment_page_of(segment, p);
578 
579   if mi_likely(is_local) {                       // thread-local free?
580     if mi_likely(page->flags.full_aligned == 0)  // and it is not a full page (full pages need to move from the full bin), nor has aligned blocks (aligned blocks need to be unaligned)
581     {
582       mi_block_t* const block = (mi_block_t*)p;
583       if mi_unlikely(mi_check_is_double_free(page, block)) return;
584       mi_check_padding(page, block);
585       mi_stat_free(page, block);
586       #if (MI_DEBUG>0) && !MI_TRACK_ENABLED  && !MI_TSAN
587       mi_debug_fill(page, block, MI_DEBUG_FREED, mi_page_block_size(page));
588       #endif
589       mi_track_free_size(p, mi_page_usable_size_of(page,block)); // faster then mi_usable_size as we already know the page and that p is unaligned
590       mi_block_set_next(page, block, page->local_free);
591       page->local_free = block;
592       if mi_unlikely(--page->used == 0) {   // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
593         _mi_page_retire(page);
594       }
595     }
596     else {
597       // page is full or contains (inner) aligned blocks; use generic path
598       _mi_free_generic(segment, page, true, p);
599     }
600   }
601   else {
602     // not thread-local; use generic path
603     _mi_free_generic(segment, page, false, p);
604   }
605 }
606 
607 // return true if successful
608 bool _mi_free_delayed_block(mi_block_t* block) {
609   // get segment and page
610   const mi_segment_t* const segment = _mi_ptr_segment(block);
611   mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
612 #ifndef Py_GIL_DISABLED
613   // The GC traverses heaps of other threads, which can trigger this assert.
614   mi_assert_internal(_mi_thread_id() == segment->thread_id);
615 #endif
616   mi_page_t* const page = _mi_segment_page_of(segment, block);
617 
618   // Clear the no-delayed flag so delayed freeing is used again for this page.
619   // This must be done before collecting the free lists on this page -- otherwise
620   // some blocks may end up in the page `thread_free` list with no blocks in the
621   // heap `thread_delayed_free` list which may cause the page to be never freed!
622   // (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`)
623   if (!_mi_page_try_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */)) {
624     return false;
625   }
626 
627   // collect all other non-local frees to ensure up-to-date `used` count
628   _mi_page_free_collect(page, false);
629 
630   // and free the block (possibly freeing the page as well since used is updated)
631   _mi_free_block(page, true, block);
632   return true;
633 }
634 
635 // Bytes available in a block
636 mi_decl_noinline static size_t mi_page_usable_aligned_size_of(const mi_segment_t* segment, const mi_page_t* page, const void* p) mi_attr_noexcept {
637   const mi_block_t* block = _mi_page_ptr_unalign(segment, page, p);
638   const size_t size = mi_page_usable_size_of(page, block);
639   const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block;
640   mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
641   return (size - adjust);
642 }
643 
644 static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept {
645   if (p == NULL) return 0;
646   const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);
647   const mi_page_t* const page = _mi_segment_page_of(segment, p);
648   if mi_likely(!mi_page_has_aligned(page)) {
649     const mi_block_t* block = (const mi_block_t*)p;
650     return mi_page_usable_size_of(page, block);
651   }
652   else {
653     // split out to separate routine for improved code generation
654     return mi_page_usable_aligned_size_of(segment, page, p);
655   }
656 }
657 
658 mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept {
659   return _mi_usable_size(p, "mi_usable_size");
660 }
661 
662 
663 // ------------------------------------------------------
664 // Allocation extensions
665 // ------------------------------------------------------
666 
667 void mi_free_size(void* p, size_t size) mi_attr_noexcept {
668   MI_UNUSED_RELEASE(size);
669   mi_assert(p == NULL || size <= _mi_usable_size(p,"mi_free_size"));
670   mi_free(p);
671 }
672 
673 void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept {
674   MI_UNUSED_RELEASE(alignment);
675   mi_assert(((uintptr_t)p % alignment) == 0);
676   mi_free_size(p,size);
677 }
678 
679 void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {
680   MI_UNUSED_RELEASE(alignment);
681   mi_assert(((uintptr_t)p % alignment) == 0);
682   mi_free(p);
683 }
684 
685 mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
686   size_t total;
687   if (mi_count_size_overflow(count,size,&total)) return NULL;
688   return mi_heap_zalloc(heap,total);
689 }
690 
691 mi_decl_nodiscard mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
692   return mi_heap_calloc(mi_prim_get_default_heap(),count,size);
693 }
694 
695 // Uninitialized `calloc`
696 mi_decl_nodiscard extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
697   size_t total;
698   if (mi_count_size_overflow(count, size, &total)) return NULL;
699   return mi_heap_malloc(heap, total);
700 }
701 
702 mi_decl_nodiscard mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
703   return mi_heap_mallocn(mi_prim_get_default_heap(),count,size);
704 }
705 
706 // Expand (or shrink) in place (or fail)
707 void* mi_expand(void* p, size_t newsize) mi_attr_noexcept {
708   #if MI_PADDING
709   // we do not shrink/expand with padding enabled
710   MI_UNUSED(p); MI_UNUSED(newsize);
711   return NULL;
712   #else
713   if (p == NULL) return NULL;
714   const size_t size = _mi_usable_size(p,"mi_expand");
715   if (newsize > size) return NULL;
716   return p; // it fits
717   #endif
718 }
719 
720 void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept {
721   // if p == NULL then behave as malloc.
722   // else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)).
723   // (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
724   const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0)
725   if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) {  // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
726     mi_assert_internal(p!=NULL);
727     // todo: do not track as the usable size is still the same in the free; adjust potential padding?
728     // mi_track_resize(p,size,newsize)
729     // if (newsize < size) { mi_track_mem_noaccess((uint8_t*)p + newsize, size - newsize); }
730     return p;  // reallocation still fits and not more than 50% waste
731   }
732   void* newp = mi_heap_malloc(heap,newsize);
733   if mi_likely(newp != NULL) {
734     if (zero && newsize > size) {
735       // also set last word in the previous allocation to zero to ensure any padding is zero-initialized
736       const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
737       _mi_memzero((uint8_t*)newp + start, newsize - start);
738     }
739     else if (newsize == 0) {
740       ((uint8_t*)newp)[0] = 0; // work around for applications that expect zero-reallocation to be zero initialized (issue #725)
741     }
742     if mi_likely(p != NULL) {
743       const size_t copysize = (newsize > size ? size : newsize);
744       mi_track_mem_defined(p,copysize);  // _mi_useable_size may be too large for byte precise memory tracking..
745       _mi_memcpy(newp, p, copysize);
746       mi_free(p); // only free the original pointer if successful
747     }
748   }
749   return newp;
750 }
751 
752 mi_decl_nodiscard void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
753   return _mi_heap_realloc_zero(heap, p, newsize, false);
754 }
755 
756 mi_decl_nodiscard void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
757   size_t total;
758   if (mi_count_size_overflow(count, size, &total)) return NULL;
759   return mi_heap_realloc(heap, p, total);
760 }
761 
762 
763 // Reallocate but free `p` on errors
764 mi_decl_nodiscard void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
765   void* newp = mi_heap_realloc(heap, p, newsize);
766   if (newp==NULL && p!=NULL) mi_free(p);
767   return newp;
768 }
769 
770 mi_decl_nodiscard void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
771   return _mi_heap_realloc_zero(heap, p, newsize, true);
772 }
773 
774 mi_decl_nodiscard void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
775   size_t total;
776   if (mi_count_size_overflow(count, size, &total)) return NULL;
777   return mi_heap_rezalloc(heap, p, total);
778 }
779 
780 
781 mi_decl_nodiscard void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
782   return mi_heap_realloc(mi_prim_get_default_heap(),p,newsize);
783 }
784 
785 mi_decl_nodiscard void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
786   return mi_heap_reallocn(mi_prim_get_default_heap(),p,count,size);
787 }
788 
789 // Reallocate but free `p` on errors
790 mi_decl_nodiscard void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
791   return mi_heap_reallocf(mi_prim_get_default_heap(),p,newsize);
792 }
793 
794 mi_decl_nodiscard void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
795   return mi_heap_rezalloc(mi_prim_get_default_heap(), p, newsize);
796 }
797 
798 mi_decl_nodiscard void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
799   return mi_heap_recalloc(mi_prim_get_default_heap(), p, count, size);
800 }
801 
802 
803 
804 // ------------------------------------------------------
805 // strdup, strndup, and realpath
806 // ------------------------------------------------------
807 
808 // `strdup` using mi_malloc
809 mi_decl_nodiscard mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
810   if (s == NULL) return NULL;
811   size_t n = strlen(s);
812   char* t = (char*)mi_heap_malloc(heap,n+1);
813   if (t == NULL) return NULL;
814   _mi_memcpy(t, s, n);
815   t[n] = 0;
816   return t;
817 }
818 
819 mi_decl_nodiscard mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept {
820   return mi_heap_strdup(mi_prim_get_default_heap(), s);
821 }
822 
823 // `strndup` using mi_malloc
824 mi_decl_nodiscard mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
825   if (s == NULL) return NULL;
826   const char* end = (const char*)memchr(s, 0, n);  // find end of string in the first `n` characters (returns NULL if not found)
827   const size_t m = (end != NULL ? (size_t)(end - s) : n);  // `m` is the minimum of `n` or the end-of-string
828   mi_assert_internal(m <= n);
829   char* t = (char*)mi_heap_malloc(heap, m+1);
830   if (t == NULL) return NULL;
831   _mi_memcpy(t, s, m);
832   t[m] = 0;
833   return t;
834 }
835 
836 mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
837   return mi_heap_strndup(mi_prim_get_default_heap(),s,n);
838 }
839 
840 #ifndef __wasi__
841 // `realpath` using mi_malloc
842 #ifdef _WIN32
843 #ifndef PATH_MAX
844 #define PATH_MAX MAX_PATH
845 #endif
846 #include <windows.h>
847 mi_decl_nodiscard mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
848   // todo: use GetFullPathNameW to allow longer file names
849   char buf[PATH_MAX];
850   DWORD res = GetFullPathNameA(fname, PATH_MAX, (resolved_name == NULL ? buf : resolved_name), NULL);
851   if (res == 0) {
852     errno = GetLastError(); return NULL;
853   }
854   else if (res > PATH_MAX) {
855     errno = EINVAL; return NULL;
856   }
857   else if (resolved_name != NULL) {
858     return resolved_name;
859   }
860   else {
861     return mi_heap_strndup(heap, buf, PATH_MAX);
862   }
863 }
864 #else
865 /*
866 #include <unistd.h>  // pathconf
867 static size_t mi_path_max(void) {
868   static size_t path_max = 0;
869   if (path_max <= 0) {
870     long m = pathconf("/",_PC_PATH_MAX);
871     if (m <= 0) path_max = 4096;      // guess
872     else if (m < 256) path_max = 256; // at least 256
873     else path_max = m;
874   }
875   return path_max;
876 }
877 */
878 char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
879   if (resolved_name != NULL) {
880     return realpath(fname,resolved_name);
881   }
882   else {
883     char* rname = realpath(fname, NULL);
884     if (rname == NULL) return NULL;
885     char* result = mi_heap_strdup(heap, rname);
886     free(rname);  // use regular free! (which may be redirected to our free but that's ok)
887     return result;
888   }
889   /*
890     const size_t n  = mi_path_max();
891     char* buf = (char*)mi_malloc(n+1);
892     if (buf == NULL) {
893       errno = ENOMEM;
894       return NULL;
895     }
896     char* rname  = realpath(fname,buf);
897     char* result = mi_heap_strndup(heap,rname,n); // ok if `rname==NULL`
898     mi_free(buf);
899     return result;
900   }
901   */
902 }
903 #endif
904 
905 mi_decl_nodiscard mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
906   return mi_heap_realpath(mi_prim_get_default_heap(),fname,resolved_name);
907 }
908 #endif
909 
910 /*-------------------------------------------------------
911 C++ new and new_aligned
912 The standard requires calling into `get_new_handler` and
913 throwing the bad_alloc exception on failure. If we compile
914 with a C++ compiler we can implement this precisely. If we
915 use a C compiler we cannot throw a `bad_alloc` exception
916 but we call `exit` instead (i.e. not returning).
917 -------------------------------------------------------*/
918 
919 #ifdef __cplusplus
920 #include <new>
921 static bool mi_try_new_handler(bool nothrow) {
922   #if defined(_MSC_VER) || (__cplusplus >= 201103L)
923     std::new_handler h = std::get_new_handler();
924   #else
925     std::new_handler h = std::set_new_handler();
926     std::set_new_handler(h);
927   #endif
928   if (h==NULL) {
929     _mi_error_message(ENOMEM, "out of memory in 'new'");
930     if (!nothrow) {
931       throw std::bad_alloc();
932     }
933     return false;
934   }
935   else {
936     h();
937     return true;
938   }
939 }
940 #else
941 typedef void (*std_new_handler_t)(void);
942 
943 #if (defined(__GNUC__) || (defined(__clang__) && !defined(_MSC_VER)))  // exclude clang-cl, see issue #631
944 std_new_handler_t __attribute__((weak)) _ZSt15get_new_handlerv(void) {
945   return NULL;
946 }
947 static std_new_handler_t mi_get_new_handler(void) {
948   return _ZSt15get_new_handlerv();
949 }
950 #else
951 // note: on windows we could dynamically link to `?get_new_handler@std@@YAP6AXXZXZ`.
952 static std_new_handler_t mi_get_new_handler() {
953   return NULL;
954 }
955 #endif
956 
957 static bool mi_try_new_handler(bool nothrow) {
958   std_new_handler_t h = mi_get_new_handler();
959   if (h==NULL) {
960     _mi_error_message(ENOMEM, "out of memory in 'new'");
961     if (!nothrow) {
962       abort();  // cannot throw in plain C, use abort
963     }
964     return false;
965   }
966   else {
967     h();
968     return true;
969   }
970 }
971 #endif
972 
973 mi_decl_export mi_decl_noinline void* mi_heap_try_new(mi_heap_t* heap, size_t size, bool nothrow ) {
974   void* p = NULL;
975   while(p == NULL && mi_try_new_handler(nothrow)) {
976     p = mi_heap_malloc(heap,size);
977   }
978   return p;
979 }
980 
981 static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow) {
982   return mi_heap_try_new(mi_prim_get_default_heap(), size, nothrow);
983 }
984 
985 
986 mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) {
987   void* p = mi_heap_malloc(heap,size);
988   if mi_unlikely(p == NULL) return mi_heap_try_new(heap, size, false);
989   return p;
990 }
991 
992 mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) {
993   return mi_heap_alloc_new(mi_prim_get_default_heap(), size);
994 }
995 
996 
997 mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) {
998   size_t total;
999   if mi_unlikely(mi_count_size_overflow(count, size, &total)) {
1000     mi_try_new_handler(false);  // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
1001     return NULL;
1002   }
1003   else {
1004     return mi_heap_alloc_new(heap,total);
1005   }
1006 }
1007 
1008 mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
1009   return mi_heap_alloc_new_n(mi_prim_get_default_heap(), size, count);
1010 }
1011 
1012 
1013 mi_decl_nodiscard mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
1014   void* p = mi_malloc(size);
1015   if mi_unlikely(p == NULL) return mi_try_new(size, true);
1016   return p;
1017 }
1018 
1019 mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
1020   void* p;
1021   do {
1022     p = mi_malloc_aligned(size, alignment);
1023   }
1024   while(p == NULL && mi_try_new_handler(false));
1025   return p;
1026 }
1027 
1028 mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept {
1029   void* p;
1030   do {
1031     p = mi_malloc_aligned(size, alignment);
1032   }
1033   while(p == NULL && mi_try_new_handler(true));
1034   return p;
1035 }
1036 
1037 mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) {
1038   void* q;
1039   do {
1040     q = mi_realloc(p, newsize);
1041   } while (q == NULL && mi_try_new_handler(false));
1042   return q;
1043 }
1044 
1045 mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
1046   size_t total;
1047   if mi_unlikely(mi_count_size_overflow(newcount, size, &total)) {
1048     mi_try_new_handler(false);  // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
1049     return NULL;
1050   }
1051   else {
1052     return mi_new_realloc(p, total);
1053   }
1054 }
1055 
1056 // ------------------------------------------------------
1057 // ensure explicit external inline definitions are emitted!
1058 // ------------------------------------------------------
1059 
1060 #ifdef __cplusplus
1061 void* _mi_externs[] = {
1062   (void*)&_mi_page_malloc,
1063   (void*)&_mi_heap_malloc_zero,
1064   (void*)&_mi_heap_malloc_zero_ex,
1065   (void*)&mi_malloc,
1066   (void*)&mi_malloc_small,
1067   (void*)&mi_zalloc_small,
1068   (void*)&mi_heap_malloc,
1069   (void*)&mi_heap_zalloc,
1070   (void*)&mi_heap_malloc_small,
1071   // (void*)&mi_heap_alloc_new,
1072   // (void*)&mi_heap_alloc_new_n
1073 };
1074 #endif
1075