Lines Matching full:page
19 Definition of page queues for each block size
23 #include "page-queue.c"
28 Page helpers
31 // Index a block in a page
32 static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_start, size_t block_si… in mi_page_block_at() argument
33 MI_UNUSED(page); in mi_page_block_at()
34 mi_assert_internal(page != NULL); in mi_page_block_at()
35 mi_assert_internal(i <= page->reserved); in mi_page_block_at()
39 static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_tld_t* tld);
40 static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld);
43 static size_t mi_page_list_count(mi_page_t* page, mi_block_t* head) { in mi_page_list_count() argument
46 mi_assert_internal(page == _mi_ptr_page(head)); in mi_page_list_count()
48 head = mi_block_next(page, head); in mi_page_list_count()
54 // Start of the page available memory
55 static inline uint8_t* mi_page_area(const mi_page_t* page) {
56 return _mi_page_start(_mi_page_segment(page), page, NULL);
60 static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) { in mi_page_list_is_valid() argument
62 uint8_t* page_area = _mi_page_start(_mi_page_segment(page), page, &psize); in mi_page_list_is_valid()
67 p = mi_block_next(page, p); in mi_page_list_is_valid()
70 if (page->free_is_zero) { in mi_page_list_is_valid()
71 const size_t ubsize = mi_page_usable_block_size(page); in mi_page_list_is_valid()
72 for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page, block)) { in mi_page_list_is_valid()
80 static bool mi_page_is_valid_init(mi_page_t* page) { in mi_page_is_valid_init() argument
81 mi_assert_internal(page->xblock_size > 0); in mi_page_is_valid_init()
82 mi_assert_internal(page->used <= page->capacity); in mi_page_is_valid_init()
83 mi_assert_internal(page->capacity <= page->reserved); in mi_page_is_valid_init()
85 mi_segment_t* segment = _mi_page_segment(page); in mi_page_is_valid_init()
86 uint8_t* start = _mi_page_start(segment,page,NULL); in mi_page_is_valid_init()
87 mi_assert_internal(start == _mi_segment_page_start(segment,page,NULL)); in mi_page_is_valid_init()
88 //const size_t bsize = mi_page_block_size(page); in mi_page_is_valid_init()
89 //mi_assert_internal(start + page->capacity*page->block_size == page->top); in mi_page_is_valid_init()
91 mi_assert_internal(mi_page_list_is_valid(page,page->free)); in mi_page_is_valid_init()
92 mi_assert_internal(mi_page_list_is_valid(page,page->local_free)); in mi_page_is_valid_init()
95 if (page->free_is_zero) { in mi_page_is_valid_init()
96 const size_t ubsize = mi_page_usable_block_size(page); in mi_page_is_valid_init()
97 for(mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) { in mi_page_is_valid_init()
104 mi_block_t* tfree = mi_page_thread_free(page); in mi_page_is_valid_init()
105 mi_assert_internal(mi_page_list_is_valid(page, tfree)); in mi_page_is_valid_init()
106 //size_t tfree_count = mi_page_list_count(page, tfree); in mi_page_is_valid_init()
107 //mi_assert_internal(tfree_count <= page->thread_freed + 1); in mi_page_is_valid_init()
110 …size_t free_count = mi_page_list_count(page, page->free) + mi_page_list_count(page, page->local_fr… in mi_page_is_valid_init()
111 mi_assert_internal(page->used + free_count == page->capacity); in mi_page_is_valid_init()
118 bool _mi_page_is_valid(mi_page_t* page) { in _mi_page_is_valid() argument
119 mi_assert_internal(mi_page_is_valid_init(page)); in _mi_page_is_valid()
121 mi_assert_internal(page->keys[0] != 0); in _mi_page_is_valid()
123 if (mi_page_heap(page)!=NULL) { in _mi_page_is_valid()
124 mi_segment_t* segment = _mi_page_segment(page); in _mi_page_is_valid()
126 …ss_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id); in _mi_page_is_valid()
131 mi_page_queue_t* pq = mi_page_queue_of(page); in _mi_page_is_valid()
132 mi_assert_internal(mi_page_queue_contains(pq, page)); in _mi_page_is_valid()
133 …q->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi… in _mi_page_is_valid()
134 mi_assert_internal(mi_heap_contains_queue(mi_page_heap(page),pq)); in _mi_page_is_valid()
141 void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) { in _mi_page_use_delayed_free() argument
142 while (!_mi_page_try_use_delayed_free(page, delay, override_never)) { in _mi_page_use_delayed_free()
147 bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) { in _mi_page_try_use_delayed_free() argument
153 …tfree = mi_atomic_load_acquire(&page->xthread_free); // note: must acquire as we can break/repeat … in _mi_page_try_use_delayed_free()
169 !mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex)); in _mi_page_try_use_delayed_free()
175 Page collect the `local_free` and `thread_free` lists
181 // ensure that there was no race where the page became unfull just before the move.
182 static void _mi_page_thread_free_collect(mi_page_t* page) in _mi_page_thread_free_collect() argument
186 mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free); in _mi_page_thread_free_collect()
190 } while (!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tfree, tfreex)); in _mi_page_thread_free_collect()
196 uint32_t max_count = page->capacity; // cannot collect more than capacity in _mi_page_thread_free_collect()
200 while ((next = mi_block_next(page,tail)) != NULL && count <= max_count) { in _mi_page_thread_free_collect()
211 mi_block_set_next(page,tail, page->local_free); in _mi_page_thread_free_collect()
212 page->local_free = head; in _mi_page_thread_free_collect()
215 page->used -= count; in _mi_page_thread_free_collect()
218 void _mi_page_free_collect(mi_page_t* page, bool force) { in _mi_page_free_collect() argument
219 mi_assert_internal(page!=NULL); in _mi_page_free_collect()
222 if (force || mi_page_thread_free(page) != NULL) { // quick test to avoid an atomic operation in _mi_page_free_collect()
223 _mi_page_thread_free_collect(page); in _mi_page_free_collect()
227 if (page->local_free != NULL) { in _mi_page_free_collect()
228 // any previous QSBR goals are no longer valid because we reused the page in _mi_page_free_collect()
229 _PyMem_mi_page_clear_qsbr(page); in _mi_page_free_collect()
231 if mi_likely(page->free == NULL) { in _mi_page_free_collect()
233 page->free = page->local_free; in _mi_page_free_collect()
234 page->local_free = NULL; in _mi_page_free_collect()
235 page->free_is_zero = false; in _mi_page_free_collect()
239 mi_block_t* tail = page->local_free; in _mi_page_free_collect()
241 while ((next = mi_block_next(page, tail)) != NULL) { in _mi_page_free_collect()
244 mi_block_set_next(page, tail, page->free); in _mi_page_free_collect()
245 page->free = page->local_free; in _mi_page_free_collect()
246 page->local_free = NULL; in _mi_page_free_collect()
247 page->free_is_zero = false; in _mi_page_free_collect()
251 mi_assert_internal(!force || page->local_free == NULL); in _mi_page_free_collect()
257 Page fresh and retire
261 void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) { in _mi_page_reclaim() argument
262 mi_assert_expensive(mi_page_is_valid_init(page)); in _mi_page_reclaim()
264 mi_assert_internal(mi_page_heap(page) == heap); in _mi_page_reclaim()
265 mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE); in _mi_page_reclaim()
267 mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE); in _mi_page_reclaim()
271 mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page)); in _mi_page_reclaim()
272 mi_page_queue_push(heap, pq, page); in _mi_page_reclaim()
273 _PyMem_mi_page_reclaimed(page); in _mi_page_reclaim()
274 mi_assert_expensive(_mi_page_is_valid(page)); in _mi_page_reclaim()
277 // allocate a fresh page from a segment
284 …mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments, &… in mi_page_fresh_alloc() local
285 if (page == NULL) { in mi_page_fresh_alloc()
286 // this may be out-of-memory, or an abandoned page was reclaimed (and in our queue) in mi_page_fresh_alloc()
289 …e_alignment >0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || _mi_page_segment(page)->kind != MI_SEGMEN… in mi_page_fresh_alloc()
290 mi_assert_internal(pq!=NULL || page->xblock_size != 0); in mi_page_fresh_alloc()
291 mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size); in mi_page_fresh_alloc()
292 // a fresh page was found, initialize it in mi_page_fresh_alloc()
293 …ck_size = ((pq == NULL || mi_page_queue_is_huge(pq)) ? mi_page_block_size(page) : block_size); // … in mi_page_fresh_alloc()
295 mi_page_init(heap, page, full_block_size, heap->tld); in mi_page_fresh_alloc()
297 if (pq != NULL) { mi_page_queue_push(heap, pq, page); } in mi_page_fresh_alloc()
298 mi_assert_expensive(_mi_page_is_valid(page)); in mi_page_fresh_alloc()
299 return page; in mi_page_fresh_alloc()
302 // Get a fresh page to use
305 mi_page_t* page = mi_page_fresh_alloc(heap, pq, pq->block_size, 0); in mi_page_fresh() local
306 if (page==NULL) return NULL; in mi_page_fresh()
307 mi_assert_internal(pq->block_size==mi_page_block_size(page)); in mi_page_fresh()
308 mi_assert_internal(pq==mi_page_queue(heap, mi_page_block_size(page))); in mi_page_fresh()
309 return page; in mi_page_fresh()
314 (put there by other threads if they deallocated in a full page)
352 // Move a page from the full list back to a regular list
353 void _mi_page_unfull(mi_page_t* page) { in _mi_page_unfull() argument
354 mi_assert_internal(page != NULL); in _mi_page_unfull()
355 mi_assert_expensive(_mi_page_is_valid(page)); in _mi_page_unfull()
356 mi_assert_internal(mi_page_is_in_full(page)); in _mi_page_unfull()
357 if (!mi_page_is_in_full(page)) return; in _mi_page_unfull()
359 mi_heap_t* heap = mi_page_heap(page); in _mi_page_unfull()
361 mi_page_set_in_full(page, false); // to get the right queue in _mi_page_unfull()
362 mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page); in _mi_page_unfull()
363 mi_page_set_in_full(page, true); in _mi_page_unfull()
364 mi_page_queue_enqueue_from(pq, pqfull, page); in _mi_page_unfull()
367 static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) { in mi_page_to_full() argument
368 mi_assert_internal(pq == mi_page_queue_of(page)); in mi_page_to_full()
369 mi_assert_internal(!mi_page_immediate_available(page)); in mi_page_to_full()
370 mi_assert_internal(!mi_page_is_in_full(page)); in mi_page_to_full()
372 if (mi_page_is_in_full(page)) return; in mi_page_to_full()
373 mi_page_queue_enqueue_from(&mi_page_heap(page)->pages[MI_BIN_FULL], pq, page); in mi_page_to_full()
374 …_mi_page_free_collect(page,false); // try to collect right away in case another thread freed just… in mi_page_to_full()
378 // Abandon a page with used blocks at the end of a thread.
380 // the `page->heap->thread_delayed_free` into this page.
382 void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) { in _mi_page_abandon() argument
383 mi_assert_internal(page != NULL); in _mi_page_abandon()
384 mi_assert_expensive(_mi_page_is_valid(page)); in _mi_page_abandon()
385 mi_assert_internal(pq == mi_page_queue_of(page)); in _mi_page_abandon()
386 mi_assert_internal(mi_page_heap(page) != NULL); in _mi_page_abandon()
388 mi_heap_t* pheap = mi_page_heap(page); in _mi_page_abandon()
391 if (page->qsbr_node.next != NULL) { in _mi_page_abandon()
393 llist_remove(&page->qsbr_node); in _mi_page_abandon()
397 // remove from our page list in _mi_page_abandon()
399 mi_page_queue_remove(pq, page); in _mi_page_abandon()
401 // page is no longer associated with our heap in _mi_page_abandon()
402 mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE); in _mi_page_abandon()
403 mi_page_set_heap(page, NULL); in _mi_page_abandon()
408 mi_assert_internal(_mi_ptr_page(block) != page); in _mi_page_abandon()
413 mi_assert_internal(mi_page_heap(page) == NULL); in _mi_page_abandon()
414 _mi_segment_page_abandon(page,segments_tld); in _mi_page_abandon()
418 // Free a page with no more free blocks
419 void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) { in _mi_page_free() argument
420 mi_assert_internal(page != NULL); in _mi_page_free()
421 mi_assert_expensive(_mi_page_is_valid(page)); in _mi_page_free()
422 mi_assert_internal(pq == mi_page_queue_of(page)); in _mi_page_free()
423 mi_assert_internal(mi_page_all_free(page)); in _mi_page_free()
424 mi_assert_internal(mi_page_thread_free_flag(page)!=MI_DELAYED_FREEING); in _mi_page_free()
427 mi_page_set_has_aligned(page, false); in _mi_page_free()
429 mi_heap_t* heap = mi_page_heap(page); in _mi_page_free()
432 mi_assert_internal(page->qsbr_goal == 0); in _mi_page_free()
433 mi_assert_internal(page->qsbr_node.next == NULL); in _mi_page_free()
436 // remove from the page list in _mi_page_free()
439 mi_page_queue_remove(pq, page); in _mi_page_free()
442 mi_page_set_heap(page,NULL); in _mi_page_free()
443 _mi_segment_page_free(page, force, segments_tld); in _mi_page_free()
450 // Retire a page with no more used blocks
456 void _mi_page_retire(mi_page_t* page) mi_attr_noexcept { in _mi_page_retire() argument
457 mi_assert_internal(page != NULL); in _mi_page_retire()
458 mi_assert_expensive(_mi_page_is_valid(page)); in _mi_page_retire()
459 mi_assert_internal(mi_page_all_free(page)); in _mi_page_retire()
461 mi_page_set_has_aligned(page, false); in _mi_page_retire()
463 // any previous QSBR goals are no longer valid because we reused the page in _mi_page_retire()
464 _PyMem_mi_page_clear_qsbr(page); in _mi_page_retire()
469 // is the only page left with free blocks. It is not clear in _mi_page_retire()
471 // for now, we don't retire if it is the only page left of this size class. in _mi_page_retire()
472 mi_page_queue_t* pq = mi_page_queue_of(page); in _mi_page_retire()
473 …if mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_queue_is_special(pq)) { // not t… in _mi_page_retire()
474 if (pq->last==page && pq->first==page) { // the only page in the queue? in _mi_page_retire()
476 …page->retire_expire = 1 + (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETI… in _mi_page_retire()
477 mi_heap_t* heap = mi_page_heap(page); in _mi_page_retire()
483 mi_assert_internal(mi_page_all_free(page)); in _mi_page_retire()
487 _PyMem_mi_page_maybe_free(page, pq, false); in _mi_page_retire()
497 mi_page_t* page = pq->first; in _mi_heap_collect_retired() local
498 if (page != NULL && page->retire_expire != 0) { in _mi_heap_collect_retired()
499 if (mi_page_all_free(page)) { in _mi_heap_collect_retired()
500 page->retire_expire--; in _mi_heap_collect_retired()
501 if (force || page->retire_expire == 0) { in _mi_heap_collect_retired()
503 mi_assert_internal(page->qsbr_goal == 0); in _mi_heap_collect_retired()
505 _PyMem_mi_page_maybe_free(page, pq, force); in _mi_heap_collect_retired()
514 page->retire_expire = 0; in _mi_heap_collect_retired()
524 Initialize the initial free list in a page.
533 static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* const page, const siz… in mi_page_free_list_extend_secure() argument
536 mi_assert_internal(page->free == NULL); in mi_page_free_list_extend_secure()
537 mi_assert_internal(page->local_free == NULL); in mi_page_free_list_extend_secure()
539 mi_assert_internal(page->capacity + extend <= page->reserved); in mi_page_free_list_extend_secure()
540 mi_assert_internal(bsize == mi_page_block_size(page)); in mi_page_free_list_extend_secure()
541 void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL); in mi_page_free_list_extend_secure()
555 blocks[i] = mi_page_block_at(page, page_area, bsize, page->capacity + i*slice_extend); in mi_page_free_list_extend_secure()
582 …mi_block_set_next(page, block, blocks[next]); // and set next; note: we may have `current == nex… in mi_page_free_list_extend_secure()
586 mi_block_set_next(page, blocks[current], page->free); // end of the list in mi_page_free_list_extend_secure()
587 page->free = free_start; in mi_page_free_list_extend_secure()
590 static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, const size_t bsize, c… in mi_page_free_list_extend() argument
594 mi_assert_internal(page->free == NULL); in mi_page_free_list_extend()
595 mi_assert_internal(page->local_free == NULL); in mi_page_free_list_extend()
597 mi_assert_internal(page->capacity + extend <= page->reserved); in mi_page_free_list_extend()
598 mi_assert_internal(bsize == mi_page_block_size(page)); in mi_page_free_list_extend()
599 void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL ); in mi_page_free_list_extend()
601 mi_block_t* const start = mi_page_block_at(page, page_area, bsize, page->capacity); in mi_page_free_list_extend()
604 mi_block_t* const last = mi_page_block_at(page, page_area, bsize, page->capacity + extend - 1); in mi_page_free_list_extend()
608 mi_block_set_next(page,block,next); in mi_page_free_list_extend()
612 mi_block_set_next(page, last, page->free); in mi_page_free_list_extend()
613 page->free = start; in mi_page_free_list_extend()
617 Page initialize and extend the capacity
620 #define MI_MAX_EXTEND_SIZE (4*1024) // heuristic, one OS page seems to work well.
632 static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) { in mi_page_extend_free() argument
634 mi_assert_expensive(mi_page_is_valid_init(page)); in mi_page_extend_free()
636 mi_assert(page->free == NULL); in mi_page_extend_free()
637 mi_assert(page->local_free == NULL); in mi_page_extend_free()
638 if (page->free != NULL) return; in mi_page_extend_free()
640 if (page->capacity >= page->reserved) return; in mi_page_extend_free()
643 _mi_page_start(_mi_page_segment(page), page, &page_size); in mi_page_extend_free()
647 const size_t bsize = (page->xblock_size < MI_HUGE_BLOCK_SIZE ? page->xblock_size : page_size); in mi_page_extend_free()
648 size_t extend = page->reserved - page->capacity; in mi_page_extend_free()
656 // ensure we don't touch memory beyond the page to reduce page commit. in mi_page_extend_free()
661 mi_assert_internal(extend > 0 && extend + page->capacity <= page->reserved); in mi_page_extend_free()
666 mi_page_free_list_extend(page, bsize, extend, &tld->stats ); in mi_page_extend_free()
669 mi_page_free_list_extend_secure(heap, page, bsize, extend, &tld->stats); in mi_page_extend_free()
672 page->capacity += (uint16_t)extend; in mi_page_extend_free()
674 mi_assert_expensive(mi_page_is_valid_init(page)); in mi_page_extend_free()
677 // Initialize a fresh page
678 static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi_tld_t* tld) { in mi_page_init() argument
679 mi_assert(page != NULL); in mi_page_init()
680 mi_segment_t* segment = _mi_page_segment(page); in mi_page_init()
684 mi_page_set_heap(page, heap); in mi_page_init()
685 page->tag = heap->tag; in mi_page_init()
686 page->use_qsbr = heap->page_use_qsbr; in mi_page_init()
687 page->debug_offset = heap->debug_offset; in mi_page_init()
688 …page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE);… in mi_page_init()
690 const void* page_start = _mi_segment_page_start(segment, page, &page_size); in mi_page_init()
693 mi_assert_internal(mi_page_block_size(page) <= page_size); in mi_page_init()
694 mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE); in mi_page_init()
696 page->reserved = (uint16_t)(page_size / block_size); in mi_page_init()
697 mi_assert_internal(page->reserved > 0); in mi_page_init()
699 page->keys[0] = _mi_heap_random_next(heap); in mi_page_init()
700 page->keys[1] = _mi_heap_random_next(heap); in mi_page_init()
702 page->free_is_zero = page->is_zero_init; in mi_page_init()
704 if (page->is_zero_init) { in mi_page_init()
710 mi_assert_internal(page->is_committed); in mi_page_init()
711 mi_assert_internal(page->capacity == 0); in mi_page_init()
712 mi_assert_internal(page->free == NULL); in mi_page_init()
713 mi_assert_internal(page->used == 0); in mi_page_init()
714 mi_assert_internal(page->xthread_free == 0); in mi_page_init()
715 mi_assert_internal(page->next == NULL); in mi_page_init()
716 mi_assert_internal(page->prev == NULL); in mi_page_init()
718 mi_assert_internal(page->qsbr_goal == 0); in mi_page_init()
719 mi_assert_internal(page->qsbr_node.next == NULL); in mi_page_init()
721 mi_assert_internal(page->retire_expire == 0); in mi_page_init()
722 mi_assert_internal(!mi_page_has_aligned(page)); in mi_page_init()
724 mi_assert_internal(page->keys[0] != 0); in mi_page_init()
725 mi_assert_internal(page->keys[1] != 0); in mi_page_init()
727 mi_assert_expensive(mi_page_is_valid_init(page)); in mi_page_init()
730 mi_page_extend_free(heap,page,tld); in mi_page_init()
731 mi_assert(mi_page_immediate_available(page)); in mi_page_init()
739 // Find a page with free blocks of `page->block_size`.
746 mi_page_t* page = pq->first; in mi_page_queue_find_free_ex() local
747 while (page != NULL) in mi_page_queue_find_free_ex()
749 mi_page_t* next = page->next; // remember next in mi_page_queue_find_free_ex()
755 _mi_page_free_collect(page, false); in mi_page_queue_find_free_ex()
757 // 1. if the page contains free blocks, we are done in mi_page_queue_find_free_ex()
758 if (mi_page_immediate_available(page)) { in mi_page_queue_find_free_ex()
763 if (page->capacity < page->reserved) { in mi_page_queue_find_free_ex()
764 mi_page_extend_free(heap, page, heap->tld); in mi_page_queue_find_free_ex()
765 mi_assert_internal(mi_page_immediate_available(page)); in mi_page_queue_find_free_ex()
769 // 3. If the page is completely full, move it to the `mi_pages_full` in mi_page_queue_find_free_ex()
771 mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page)); in mi_page_queue_find_free_ex()
772 mi_page_to_full(page, pq); in mi_page_queue_find_free_ex()
774 page = next; in mi_page_queue_find_free_ex()
775 } // for each page in mi_page_queue_find_free_ex()
779 if (page == NULL) { in mi_page_queue_find_free_ex()
781 _mi_heap_collect_retired(heap, false); // perhaps make a page available? in mi_page_queue_find_free_ex()
782 page = mi_page_fresh(heap, pq); in mi_page_queue_find_free_ex()
783 if (page == NULL && first_try) { in mi_page_queue_find_free_ex()
784 // out-of-memory _or_ an abandoned page with free blocks was reclaimed, try once again in mi_page_queue_find_free_ex()
785 page = mi_page_queue_find_free_ex(heap, pq, false); in mi_page_queue_find_free_ex()
789 mi_assert(pq->first == page); in mi_page_queue_find_free_ex()
790 page->retire_expire = 0; in mi_page_queue_find_free_ex()
791 _PyMem_mi_page_clear_qsbr(page); in mi_page_queue_find_free_ex()
793 mi_assert_internal(page == NULL || mi_page_immediate_available(page)); in mi_page_queue_find_free_ex()
794 return page; in mi_page_queue_find_free_ex()
799 // Find a page with free blocks of `size`.
802 mi_page_t* page = pq->first; in mi_find_free_page() local
803 if (page != NULL) { in mi_find_free_page()
805 if (page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) { in mi_find_free_page()
806 mi_page_extend_free(heap, page, heap->tld); in mi_find_free_page()
807 mi_assert_internal(mi_page_immediate_available(page)); in mi_find_free_page()
812 _mi_page_free_collect(page,false); in mi_find_free_page()
815 if (mi_page_immediate_available(page)) { in mi_find_free_page()
816 page->retire_expire = 0; in mi_find_free_page()
817 _PyMem_mi_page_clear_qsbr(page); in mi_find_free_page()
818 return page; // fast path in mi_find_free_page()
854 // Large and huge page allocation.
857 // just that page, we always treat them as abandoned and any thread
858 // that frees the block can free the whole page and segment directly.
870 mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment); in mi_large_huge_page_alloc() local
871 if (page != NULL) { in mi_large_huge_page_alloc()
872 mi_assert_internal(mi_page_immediate_available(page)); in mi_large_huge_page_alloc()
875 mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE); in mi_large_huge_page_alloc()
876 mi_assert_internal(_mi_page_segment(page)->used==1); in mi_large_huge_page_alloc()
878 mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue in mi_large_huge_page_alloc()
879 mi_page_set_heap(page, NULL); in mi_large_huge_page_alloc()
883 mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE); in mi_large_huge_page_alloc()
886 …const size_t bsize = mi_page_usable_block_size(page); // note: not `mi_page_block_size` to accoun… in mi_large_huge_page_alloc()
896 return page; in mi_large_huge_page_alloc()
900 // Allocate a page
916 // otherwise find a page with free blocks in our size segregated queues in mi_find_page()
945 // find (or allocate) a page of the right size in _mi_malloc_generic()
946 mi_page_t* page = mi_find_page(heap, size, huge_alignment); in _mi_malloc_generic() local
947 …if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocatio… in _mi_malloc_generic()
949 page = mi_find_page(heap, size, huge_alignment); in _mi_malloc_generic()
952 if mi_unlikely(page == NULL) { // out of memory in _mi_malloc_generic()
958 mi_assert_internal(mi_page_immediate_available(page)); in _mi_malloc_generic()
959 mi_assert_internal(mi_page_block_size(page) >= size); in _mi_malloc_generic()
962 if mi_unlikely(zero && page->xblock_size == 0) { in _mi_malloc_generic()
964 void* p = _mi_page_malloc(heap, page, size, false); in _mi_malloc_generic()
966 _mi_memzero_aligned(p, mi_page_usable_block_size(page)); in _mi_malloc_generic()
970 return _mi_page_malloc(heap, page, size, zero); in _mi_malloc_generic()