1 /*----------------------------------------------------------------------------
2 Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
3 This is free software; you can redistribute it and/or modify it under the
4 terms of the MIT license. A copy of the license can be found in the file
5 "LICENSE" at the root of this distribution.
6 -----------------------------------------------------------------------------*/
7
8 #include "mimalloc.h"
9 #include "mimalloc/internal.h"
10 #include "mimalloc/atomic.h"
11 #include "mimalloc/prim.h" // mi_prim_get_default_heap
12
13 #include <string.h> // memset, memcpy
14
15 #if defined(_MSC_VER) && (_MSC_VER < 1920)
16 #pragma warning(disable:4204) // non-constant aggregate initializer
17 #endif
18
19 /* -----------------------------------------------------------
20 Helpers
21 ----------------------------------------------------------- */
22
23 // return `true` if ok, `false` to break
24 typedef bool (heap_page_visitor_fun)(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2);
25
26 // Visit all pages in a heap; returns `false` if break was called.
mi_heap_visit_pages(mi_heap_t * heap,heap_page_visitor_fun * fn,void * arg1,void * arg2)27 static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void* arg1, void* arg2)
28 {
29 if (heap==NULL || heap->page_count==0) return true;
30
31 // visit all pages
32 #if MI_DEBUG>1
33 size_t total = heap->page_count;
34 size_t count = 0;
35 #endif
36
37 for (size_t i = 0; i <= MI_BIN_FULL; i++) {
38 mi_page_queue_t* pq = &heap->pages[i];
39 mi_page_t* page = pq->first;
40 while(page != NULL) {
41 mi_page_t* next = page->next; // save next in case the page gets removed from the queue
42 mi_assert_internal(mi_page_heap(page) == heap);
43 #if MI_DEBUG>1
44 count++;
45 #endif
46 if (!fn(heap, pq, page, arg1, arg2)) return false;
47 page = next; // and continue
48 }
49 }
50 mi_assert_internal(count == total);
51 return true;
52 }
53
54
55 #if MI_DEBUG>=2
mi_heap_page_is_valid(mi_heap_t * heap,mi_page_queue_t * pq,mi_page_t * page,void * arg1,void * arg2)56 static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
57 MI_UNUSED(arg1);
58 MI_UNUSED(arg2);
59 MI_UNUSED(pq);
60 mi_assert_internal(mi_page_heap(page) == heap);
61 mi_segment_t* segment = _mi_page_segment(page);
62 mi_assert_internal(segment->thread_id == heap->thread_id);
63 mi_assert_expensive(_mi_page_is_valid(page));
64 return true;
65 }
66 #endif
67 #if MI_DEBUG>=3
mi_heap_is_valid(mi_heap_t * heap)68 static bool mi_heap_is_valid(mi_heap_t* heap) {
69 mi_assert_internal(heap!=NULL);
70 mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL);
71 return true;
72 }
73 #endif
74
75
76
77
78 /* -----------------------------------------------------------
79 "Collect" pages by migrating `local_free` and `thread_free`
80 lists and freeing empty pages. This is done when a thread
81 stops (and in that case abandons pages if there are still
82 blocks alive)
83 ----------------------------------------------------------- */
84
85 typedef enum mi_collect_e {
86 MI_NORMAL,
87 MI_FORCE,
88 MI_ABANDON
89 } mi_collect_t;
90
91
mi_heap_page_collect(mi_heap_t * heap,mi_page_queue_t * pq,mi_page_t * page,void * arg_collect,void * arg2)92 static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg_collect, void* arg2 ) {
93 MI_UNUSED(arg2);
94 MI_UNUSED(heap);
95 mi_assert_internal(mi_heap_page_is_valid(heap, pq, page, NULL, NULL));
96 mi_collect_t collect = *((mi_collect_t*)arg_collect);
97 _mi_page_free_collect(page, collect >= MI_FORCE);
98 if (mi_page_all_free(page)) {
99 // no more used blocks, free the page.
100 // note: this will free retired pages as well.
101 bool freed = _PyMem_mi_page_maybe_free(page, pq, collect >= MI_FORCE);
102 if (!freed && collect == MI_ABANDON) {
103 _mi_page_abandon(page, pq);
104 }
105 }
106 else if (collect == MI_ABANDON) {
107 // still used blocks but the thread is done; abandon the page
108 _mi_page_abandon(page, pq);
109 }
110 return true; // don't break
111 }
112
mi_heap_page_never_delayed_free(mi_heap_t * heap,mi_page_queue_t * pq,mi_page_t * page,void * arg1,void * arg2)113 static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
114 MI_UNUSED(arg1);
115 MI_UNUSED(arg2);
116 MI_UNUSED(heap);
117 MI_UNUSED(pq);
118 _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
119 return true; // don't break
120 }
121
mi_heap_collect_ex(mi_heap_t * heap,mi_collect_t collect)122 static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
123 {
124 if (heap==NULL || !mi_heap_is_initialized(heap)) return;
125
126 const bool force = collect >= MI_FORCE;
127 _mi_deferred_free(heap, force);
128
129 // gh-112532: we may be called from a thread that is not the owner of the heap
130 bool is_main_thread = _mi_is_main_thread() && heap->thread_id == _mi_thread_id();
131
132 // note: never reclaim on collect but leave it to threads that need storage to reclaim
133 const bool force_main =
134 #ifdef NDEBUG
135 collect == MI_FORCE
136 #else
137 collect >= MI_FORCE
138 #endif
139 && is_main_thread && mi_heap_is_backing(heap) && !heap->no_reclaim;
140
141 if (force_main) {
142 // the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.
143 // if all memory is freed by now, all segments should be freed.
144 _mi_abandoned_reclaim_all(heap, &heap->tld->segments);
145 }
146
147 // if abandoning, mark all pages to no longer add to delayed_free
148 if (collect == MI_ABANDON) {
149 mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL);
150 }
151
152 // free all current thread delayed blocks.
153 // (if abandoning, after this there are no more thread-delayed references into the pages.)
154 _mi_heap_delayed_free_all(heap);
155
156 // collect retired pages
157 _mi_heap_collect_retired(heap, force);
158
159 // free pages that were delayed with QSBR
160 _PyMem_mi_heap_collect_qsbr(heap);
161
162 // collect all pages owned by this thread
163 mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
164 mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
165
166 // collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list)
167 // note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment
168 _mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments);
169
170 // collect segment local caches
171 if (force) {
172 _mi_segment_thread_collect(&heap->tld->segments);
173 }
174
175 // collect regions on program-exit (or shared library unload)
176 if (force && is_main_thread && mi_heap_is_backing(heap)) {
177 _mi_thread_data_collect(); // collect thread data cache
178 _mi_arena_collect(true /* force purge */, &heap->tld->stats);
179 }
180 }
181
_mi_heap_collect_abandon(mi_heap_t * heap)182 void _mi_heap_collect_abandon(mi_heap_t* heap) {
183 mi_heap_collect_ex(heap, MI_ABANDON);
184 }
185
mi_heap_collect(mi_heap_t * heap,bool force)186 void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept {
187 mi_heap_collect_ex(heap, (force ? MI_FORCE : MI_NORMAL));
188 }
189
mi_collect(bool force)190 void mi_collect(bool force) mi_attr_noexcept {
191 mi_heap_collect(mi_prim_get_default_heap(), force);
192 }
193
194
195 /* -----------------------------------------------------------
196 Heap new
197 ----------------------------------------------------------- */
198
mi_heap_get_default(void)199 mi_heap_t* mi_heap_get_default(void) {
200 mi_thread_init();
201 return mi_prim_get_default_heap();
202 }
203
mi_heap_is_default(const mi_heap_t * heap)204 static bool mi_heap_is_default(const mi_heap_t* heap) {
205 return (heap == mi_prim_get_default_heap());
206 }
207
208
mi_heap_get_backing(void)209 mi_heap_t* mi_heap_get_backing(void) {
210 mi_heap_t* heap = mi_heap_get_default();
211 mi_assert_internal(heap!=NULL);
212 mi_heap_t* bheap = heap->tld->heap_backing;
213 mi_assert_internal(bheap!=NULL);
214 mi_assert_internal(bheap->thread_id == _mi_thread_id());
215 return bheap;
216 }
217
_mi_heap_init_ex(mi_heap_t * heap,mi_tld_t * tld,mi_arena_id_t arena_id,bool no_reclaim,uint8_t tag)218 void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool no_reclaim, uint8_t tag)
219 {
220 _mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t));
221 heap->tld = tld;
222 heap->thread_id = _mi_thread_id();
223 heap->arena_id = arena_id;
224 if (heap == tld->heap_backing) {
225 _mi_random_init(&heap->random);
226 }
227 else {
228 _mi_random_split(&tld->heap_backing->random, &heap->random);
229 }
230 heap->cookie = _mi_heap_random_next(heap) | 1;
231 heap->keys[0] = _mi_heap_random_next(heap);
232 heap->keys[1] = _mi_heap_random_next(heap);
233 heap->no_reclaim = no_reclaim;
234 heap->tag = tag;
235 // push on the thread local heaps list
236 heap->next = heap->tld->heaps;
237 heap->tld->heaps = heap;
238 }
239
mi_heap_new_in_arena(mi_arena_id_t arena_id)240 mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
241 mi_heap_t* bheap = mi_heap_get_backing();
242 mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
243 if (heap == NULL) return NULL;
244 // don't reclaim abandoned pages or otherwise destroy is unsafe
245 _mi_heap_init_ex(heap, bheap->tld, arena_id, true, 0);
246 return heap;
247 }
248
mi_heap_new(void)249 mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
250 return mi_heap_new_in_arena(_mi_arena_id_none());
251 }
252
_mi_heap_memid_is_suitable(mi_heap_t * heap,mi_memid_t memid)253 bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) {
254 return _mi_arena_memid_is_suitable(memid, heap->arena_id);
255 }
256
_mi_heap_random_next(mi_heap_t * heap)257 uintptr_t _mi_heap_random_next(mi_heap_t* heap) {
258 return _mi_random_next(&heap->random);
259 }
260
261 // zero out the page queues
mi_heap_reset_pages(mi_heap_t * heap)262 static void mi_heap_reset_pages(mi_heap_t* heap) {
263 mi_assert_internal(heap != NULL);
264 mi_assert_internal(mi_heap_is_initialized(heap));
265 // TODO: copy full empty heap instead?
266 memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct));
267 _mi_memcpy_aligned(&heap->pages, &_mi_heap_empty.pages, sizeof(heap->pages));
268 heap->thread_delayed_free = NULL;
269 heap->page_count = 0;
270 }
271
272 // called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources.
mi_heap_free(mi_heap_t * heap)273 static void mi_heap_free(mi_heap_t* heap) {
274 mi_assert(heap != NULL);
275 mi_assert_internal(mi_heap_is_initialized(heap));
276 if (heap==NULL || !mi_heap_is_initialized(heap)) return;
277 if (mi_heap_is_backing(heap)) return; // dont free the backing heap
278
279 // reset default
280 if (mi_heap_is_default(heap)) {
281 _mi_heap_set_default_direct(heap->tld->heap_backing);
282 }
283
284 // remove ourselves from the thread local heaps list
285 // linear search but we expect the number of heaps to be relatively small
286 mi_heap_t* prev = NULL;
287 mi_heap_t* curr = heap->tld->heaps;
288 while (curr != heap && curr != NULL) {
289 prev = curr;
290 curr = curr->next;
291 }
292 mi_assert_internal(curr == heap);
293 if (curr == heap) {
294 if (prev != NULL) { prev->next = heap->next; }
295 else { heap->tld->heaps = heap->next; }
296 }
297 mi_assert_internal(heap->tld->heaps != NULL);
298
299 // and free the used memory
300 mi_free(heap);
301 }
302
303
304 /* -----------------------------------------------------------
305 Heap destroy
306 ----------------------------------------------------------- */
307
_mi_heap_page_destroy(mi_heap_t * heap,mi_page_queue_t * pq,mi_page_t * page,void * arg1,void * arg2)308 static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
309 MI_UNUSED(arg1);
310 MI_UNUSED(arg2);
311 MI_UNUSED(heap);
312 MI_UNUSED(pq);
313
314 // ensure no more thread_delayed_free will be added
315 _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
316
317 // stats
318 const size_t bsize = mi_page_block_size(page);
319 if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) {
320 if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
321 mi_heap_stat_decrease(heap, large, bsize);
322 }
323 else {
324 mi_heap_stat_decrease(heap, huge, bsize);
325 }
326 }
327 #if (MI_STAT)
328 _mi_page_free_collect(page, false); // update used count
329 const size_t inuse = page->used;
330 if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
331 mi_heap_stat_decrease(heap, normal, bsize * inuse);
332 #if (MI_STAT>1)
333 mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], inuse);
334 #endif
335 }
336 mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks...
337 #endif
338
339 /// pretend it is all free now
340 mi_assert_internal(mi_page_thread_free(page) == NULL);
341 page->used = 0;
342
343 // and free the page
344 // mi_page_free(page,false);
345 page->next = NULL;
346 page->prev = NULL;
347 _mi_segment_page_free(page,false /* no force? */, &heap->tld->segments);
348
349 return true; // keep going
350 }
351
_mi_heap_destroy_pages(mi_heap_t * heap)352 void _mi_heap_destroy_pages(mi_heap_t* heap) {
353 mi_heap_visit_pages(heap, &_mi_heap_page_destroy, NULL, NULL);
354 mi_heap_reset_pages(heap);
355 }
356
357 #if MI_TRACK_HEAP_DESTROY
mi_heap_track_block_free(const mi_heap_t * heap,const mi_heap_area_t * area,void * block,size_t block_size,void * arg)358 static bool mi_cdecl mi_heap_track_block_free(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) {
359 MI_UNUSED(heap); MI_UNUSED(area); MI_UNUSED(arg); MI_UNUSED(block_size);
360 mi_track_free_size(block,mi_usable_size(block));
361 return true;
362 }
363 #endif
364
mi_heap_destroy(mi_heap_t * heap)365 void mi_heap_destroy(mi_heap_t* heap) {
366 mi_assert(heap != NULL);
367 mi_assert(mi_heap_is_initialized(heap));
368 mi_assert(heap->no_reclaim);
369 mi_assert_expensive(mi_heap_is_valid(heap));
370 if (heap==NULL || !mi_heap_is_initialized(heap)) return;
371 if (!heap->no_reclaim) {
372 // don't free in case it may contain reclaimed pages
373 mi_heap_delete(heap);
374 }
375 else {
376 // track all blocks as freed
377 #if MI_TRACK_HEAP_DESTROY
378 mi_heap_visit_blocks(heap, true, mi_heap_track_block_free, NULL);
379 #endif
380 // free all pages
381 _mi_heap_destroy_pages(heap);
382 mi_heap_free(heap);
383 }
384 }
385
386 // forcefully destroy all heaps in the current thread
_mi_heap_unsafe_destroy_all(void)387 void _mi_heap_unsafe_destroy_all(void) {
388 mi_heap_t* bheap = mi_heap_get_backing();
389 mi_heap_t* curr = bheap->tld->heaps;
390 while (curr != NULL) {
391 mi_heap_t* next = curr->next;
392 if (curr->no_reclaim) {
393 mi_heap_destroy(curr);
394 }
395 else {
396 _mi_heap_destroy_pages(curr);
397 }
398 curr = next;
399 }
400 }
401
402 /* -----------------------------------------------------------
403 Safe Heap delete
404 ----------------------------------------------------------- */
405
406 // Transfer the pages from one heap to the other
mi_heap_absorb(mi_heap_t * heap,mi_heap_t * from)407 static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
408 mi_assert_internal(heap!=NULL);
409 if (from==NULL || from->page_count == 0) return;
410
411 // reduce the size of the delayed frees
412 _mi_heap_delayed_free_partial(from);
413
414 // transfer all pages by appending the queues; this will set a new heap field
415 // so threads may do delayed frees in either heap for a while.
416 // note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state
417 // so after this only the new heap will get delayed frees
418 for (size_t i = 0; i <= MI_BIN_FULL; i++) {
419 mi_page_queue_t* pq = &heap->pages[i];
420 mi_page_queue_t* append = &from->pages[i];
421 size_t pcount = _mi_page_queue_append(heap, pq, append);
422 heap->page_count += pcount;
423 from->page_count -= pcount;
424 }
425 mi_assert_internal(from->page_count == 0);
426
427 // and do outstanding delayed frees in the `from` heap
428 // note: be careful here as the `heap` field in all those pages no longer point to `from`,
429 // turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a
430 // the regular `_mi_free_delayed_block` which is safe.
431 _mi_heap_delayed_free_all(from);
432 #if !defined(_MSC_VER) || (_MSC_VER > 1900) // somehow the following line gives an error in VS2015, issue #353
433 mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_block_t,&from->thread_delayed_free) == NULL);
434 #endif
435
436 // and reset the `from` heap
437 mi_heap_reset_pages(from);
438 }
439
440 // Safe delete a heap without freeing any still allocated blocks in that heap.
mi_heap_delete(mi_heap_t * heap)441 void mi_heap_delete(mi_heap_t* heap)
442 {
443 mi_assert(heap != NULL);
444 mi_assert(mi_heap_is_initialized(heap));
445 mi_assert_expensive(mi_heap_is_valid(heap));
446 if (heap==NULL || !mi_heap_is_initialized(heap)) return;
447
448 if (!mi_heap_is_backing(heap)) {
449 // tranfer still used pages to the backing heap
450 mi_heap_absorb(heap->tld->heap_backing, heap);
451 }
452 else {
453 // the backing heap abandons its pages
454 _mi_heap_collect_abandon(heap);
455 }
456 mi_assert_internal(heap->page_count==0);
457 mi_heap_free(heap);
458 }
459
mi_heap_set_default(mi_heap_t * heap)460 mi_heap_t* mi_heap_set_default(mi_heap_t* heap) {
461 mi_assert(heap != NULL);
462 mi_assert(mi_heap_is_initialized(heap));
463 if (heap==NULL || !mi_heap_is_initialized(heap)) return NULL;
464 mi_assert_expensive(mi_heap_is_valid(heap));
465 mi_heap_t* old = mi_prim_get_default_heap();
466 _mi_heap_set_default_direct(heap);
467 return old;
468 }
469
470
471
472
473 /* -----------------------------------------------------------
474 Analysis
475 ----------------------------------------------------------- */
476
477 // static since it is not thread safe to access heaps from other threads.
mi_heap_of_block(const void * p)478 static mi_heap_t* mi_heap_of_block(const void* p) {
479 if (p == NULL) return NULL;
480 mi_segment_t* segment = _mi_ptr_segment(p);
481 bool valid = (_mi_ptr_cookie(segment) == segment->cookie);
482 mi_assert_internal(valid);
483 if mi_unlikely(!valid) return NULL;
484 return mi_page_heap(_mi_segment_page_of(segment,p));
485 }
486
mi_heap_contains_block(mi_heap_t * heap,const void * p)487 bool mi_heap_contains_block(mi_heap_t* heap, const void* p) {
488 mi_assert(heap != NULL);
489 if (heap==NULL || !mi_heap_is_initialized(heap)) return false;
490 return (heap == mi_heap_of_block(p));
491 }
492
493
mi_heap_page_check_owned(mi_heap_t * heap,mi_page_queue_t * pq,mi_page_t * page,void * p,void * vfound)494 static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* p, void* vfound) {
495 MI_UNUSED(heap);
496 MI_UNUSED(pq);
497 bool* found = (bool*)vfound;
498 mi_segment_t* segment = _mi_page_segment(page);
499 void* start = _mi_page_start(segment, page, NULL);
500 void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page));
501 *found = (p >= start && p < end);
502 return (!*found); // continue if not found
503 }
504
mi_heap_check_owned(mi_heap_t * heap,const void * p)505 bool mi_heap_check_owned(mi_heap_t* heap, const void* p) {
506 mi_assert(heap != NULL);
507 if (heap==NULL || !mi_heap_is_initialized(heap)) return false;
508 if (((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) return false; // only aligned pointers
509 bool found = false;
510 mi_heap_visit_pages(heap, &mi_heap_page_check_owned, (void*)p, &found);
511 return found;
512 }
513
mi_check_owned(const void * p)514 bool mi_check_owned(const void* p) {
515 return mi_heap_check_owned(mi_prim_get_default_heap(), p);
516 }
517
518 /* -----------------------------------------------------------
519 Visit all heap blocks and areas
520 Todo: enable visiting abandoned pages, and
521 enable visiting all blocks of all heaps across threads
522 ----------------------------------------------------------- */
523
524 // Separate struct to keep `mi_page_t` out of the public interface
525 typedef struct mi_heap_area_ex_s {
526 mi_heap_area_t area;
527 mi_page_t* page;
528 } mi_heap_area_ex_t;
529
mi_fast_divisor(size_t divisor,size_t * magic,size_t * shift)530 static void mi_fast_divisor(size_t divisor, size_t* magic, size_t* shift) {
531 mi_assert_internal(divisor > 0 && divisor <= UINT32_MAX);
532 *shift = MI_INTPTR_BITS - mi_clz(divisor - 1);
533 *magic = (size_t)(((1ULL << 32) * ((1ULL << *shift) - divisor)) / divisor + 1);
534 }
535
mi_fast_divide(size_t n,size_t magic,size_t shift)536 static size_t mi_fast_divide(size_t n, size_t magic, size_t shift) {
537 mi_assert_internal(n <= UINT32_MAX);
538 return ((((uint64_t) n * magic) >> 32) + n) >> shift;
539 }
540
_mi_heap_area_visit_blocks(const mi_heap_area_t * area,mi_page_t * page,mi_block_visit_fun * visitor,void * arg)541 bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t *page, mi_block_visit_fun* visitor, void* arg) {
542 mi_assert(area != NULL);
543 if (area==NULL) return true;
544 mi_assert(page != NULL);
545 if (page == NULL) return true;
546
547 mi_assert_internal(page->local_free == NULL);
548 if (page->used == 0) return true;
549
550 const size_t bsize = mi_page_block_size(page);
551 const size_t ubsize = mi_page_usable_block_size(page); // without padding
552 size_t psize;
553 uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize);
554 mi_heap_t* heap = mi_page_heap(page);
555
556 if (page->capacity == 1) {
557 // optimize page with one block
558 mi_assert_internal(page->used == 1 && page->free == NULL);
559 return visitor(heap, area, pstart, ubsize, arg);
560 }
561
562 if (page->used == page->capacity) {
563 // optimize full pages
564 uint8_t* block = pstart;
565 for (size_t i = 0; i < page->capacity; i++) {
566 if (!visitor(heap, area, block, ubsize, arg)) return false;
567 block += bsize;
568 }
569 return true;
570 }
571
572 // create a bitmap of free blocks.
573 #define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*))
574 uintptr_t free_map[MI_MAX_BLOCKS / MI_INTPTR_BITS];
575 size_t bmapsize = (page->capacity + MI_INTPTR_BITS - 1) / MI_INTPTR_BITS;
576 memset(free_map, 0, bmapsize * sizeof(uintptr_t));
577
578 if (page->capacity % MI_INTPTR_BITS != 0) {
579 size_t shift = (page->capacity % MI_INTPTR_BITS);
580 uintptr_t mask = (UINTPTR_MAX << shift);
581 free_map[bmapsize-1] = mask;
582 }
583
584 // fast repeated division by the block size
585 size_t magic, shift;
586 mi_fast_divisor(bsize, &magic, &shift);
587
588 #if MI_DEBUG>1
589 size_t free_count = 0;
590 #endif
591 for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) {
592 #if MI_DEBUG>1
593 free_count++;
594 #endif
595 mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize));
596 size_t offset = (uint8_t*)block - pstart;
597 mi_assert_internal(offset % bsize == 0);
598 size_t blockidx = mi_fast_divide(offset, magic, shift);
599 mi_assert_internal(blockidx == offset / bsize);
600 mi_assert_internal(blockidx < MI_MAX_BLOCKS);
601 size_t bitidx = (blockidx / MI_INTPTR_BITS);
602 size_t bit = blockidx - (bitidx * MI_INTPTR_BITS);
603 free_map[bitidx] |= ((uintptr_t)1 << bit);
604 }
605 mi_assert_internal(page->capacity == (free_count + page->used));
606
607 // walk through all blocks skipping the free ones
608 #if MI_DEBUG>1
609 size_t used_count = 0;
610 #endif
611 uint8_t* block = pstart;
612 for (size_t i = 0; i < bmapsize; i++) {
613 if (free_map[i] == 0) {
614 // every block is in use
615 for (size_t j = 0; j < MI_INTPTR_BITS; j++) {
616 #if MI_DEBUG>1
617 used_count++;
618 #endif
619 if (!visitor(heap, area, block, ubsize, arg)) return false;
620 block += bsize;
621 }
622 }
623 else {
624 uintptr_t m = ~free_map[i];
625 while (m) {
626 #if MI_DEBUG>1
627 used_count++;
628 #endif
629 size_t bitidx = mi_ctz(m);
630 if (!visitor(heap, area, block + (bitidx * bsize), ubsize, arg)) return false;
631 m &= m - 1;
632 }
633 block += bsize * MI_INTPTR_BITS;
634 }
635 }
636 mi_assert_internal(page->used == used_count);
637 return true;
638 }
639
640 typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg);
641
_mi_heap_area_init(mi_heap_area_t * area,mi_page_t * page)642 void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page) {
643 _mi_page_free_collect(page,true);
644 const size_t bsize = mi_page_block_size(page);
645 const size_t ubsize = mi_page_usable_block_size(page);
646 area->reserved = page->reserved * bsize;
647 area->committed = page->capacity * bsize;
648 area->blocks = _mi_page_start(_mi_page_segment(page), page, NULL);
649 area->used = page->used; // number of blocks in use (#553)
650 area->block_size = ubsize;
651 area->full_block_size = bsize;
652 }
653
mi_heap_visit_areas_page(mi_heap_t * heap,mi_page_queue_t * pq,mi_page_t * page,void * vfun,void * arg)654 static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) {
655 MI_UNUSED(heap);
656 MI_UNUSED(pq);
657 mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun;
658 mi_heap_area_ex_t xarea;
659 xarea.page = page;
660 _mi_heap_area_init(&xarea.area, page);
661 return fun(heap, &xarea, arg);
662 }
663
664 // Visit all heap pages as areas
mi_heap_visit_areas(const mi_heap_t * heap,mi_heap_area_visit_fun * visitor,void * arg)665 static bool mi_heap_visit_areas(const mi_heap_t* heap, mi_heap_area_visit_fun* visitor, void* arg) {
666 if (visitor == NULL) return false;
667 return mi_heap_visit_pages((mi_heap_t*)heap, &mi_heap_visit_areas_page, (void*)(visitor), arg); // note: function pointer to void* :-{
668 }
669
670 // Just to pass arguments
671 typedef struct mi_visit_blocks_args_s {
672 bool visit_blocks;
673 mi_block_visit_fun* visitor;
674 void* arg;
675 } mi_visit_blocks_args_t;
676
mi_heap_area_visitor(const mi_heap_t * heap,const mi_heap_area_ex_t * xarea,void * arg)677 static bool mi_heap_area_visitor(const mi_heap_t* heap, const mi_heap_area_ex_t* xarea, void* arg) {
678 mi_visit_blocks_args_t* args = (mi_visit_blocks_args_t*)arg;
679 if (!args->visitor(heap, &xarea->area, NULL, xarea->area.block_size, args->arg)) return false;
680 if (args->visit_blocks) {
681 return _mi_heap_area_visit_blocks(&xarea->area, xarea->page, args->visitor, args->arg);
682 }
683 else {
684 return true;
685 }
686 }
687
688 // Visit all blocks in a heap
mi_heap_visit_blocks(const mi_heap_t * heap,bool visit_blocks,mi_block_visit_fun * visitor,void * arg)689 bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
690 mi_visit_blocks_args_t args = { visit_blocks, visitor, arg };
691 _mi_heap_delayed_free_partial((mi_heap_t *)heap);
692 return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args);
693 }
694