• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*----------------------------------------------------------------------------
2 Copyright (c) 2018-2020, Microsoft Research, Daan Leijen
3 This is free software; you can redistribute it and/or modify it under the
4 terms of the MIT license. A copy of the license can be found in the file
5 "LICENSE" at the root of this distribution.
6 -----------------------------------------------------------------------------*/
7 
8 /* -----------------------------------------------------------
9   Definition of page queues for each block size
10 ----------------------------------------------------------- */
11 
12 #ifndef MI_IN_PAGE_C
13 #error "this file should be included from 'page.c'"
14 #endif
15 
16 /* -----------------------------------------------------------
17   Minimal alignment in machine words (i.e. `sizeof(void*)`)
18 ----------------------------------------------------------- */
19 
20 #if (MI_MAX_ALIGN_SIZE > 4*MI_INTPTR_SIZE)
21   #error "define alignment for more than 4x word size for this platform"
22 #elif (MI_MAX_ALIGN_SIZE > 2*MI_INTPTR_SIZE)
23   #define MI_ALIGN4W   // 4 machine words minimal alignment
24 #elif (MI_MAX_ALIGN_SIZE > MI_INTPTR_SIZE)
25   #define MI_ALIGN2W   // 2 machine words minimal alignment
26 #else
27   // ok, default alignment is 1 word
28 #endif
29 
30 
31 /* -----------------------------------------------------------
32   Queue query
33 ----------------------------------------------------------- */
34 
35 
mi_page_queue_is_huge(const mi_page_queue_t * pq)36 static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) {
37   return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+sizeof(uintptr_t)));
38 }
39 
mi_page_queue_is_full(const mi_page_queue_t * pq)40 static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) {
41   return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));
42 }
43 
mi_page_queue_is_special(const mi_page_queue_t * pq)44 static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) {
45   return (pq->block_size > MI_MEDIUM_OBJ_SIZE_MAX);
46 }
47 
48 /* -----------------------------------------------------------
49   Bins
50 ----------------------------------------------------------- */
51 
52 // Return the bin for a given field size.
53 // Returns MI_BIN_HUGE if the size is too large.
54 // We use `wsize` for the size in "machine word sizes",
55 // i.e. byte size == `wsize*sizeof(void*)`.
mi_bin(size_t size)56 static inline uint8_t mi_bin(size_t size) {
57   size_t wsize = _mi_wsize_from_size(size);
58   uint8_t bin;
59   if (wsize <= 1) {
60     bin = 1;
61   }
62   #if defined(MI_ALIGN4W)
63   else if (wsize <= 4) {
64     bin = (uint8_t)((wsize+1)&~1); // round to double word sizes
65   }
66   #elif defined(MI_ALIGN2W)
67   else if (wsize <= 8) {
68     bin = (uint8_t)((wsize+1)&~1); // round to double word sizes
69   }
70   #else
71   else if (wsize <= 8) {
72     bin = (uint8_t)wsize;
73   }
74   #endif
75   else if (wsize > MI_MEDIUM_OBJ_WSIZE_MAX) {
76     bin = MI_BIN_HUGE;
77   }
78   else {
79     #if defined(MI_ALIGN4W)
80     if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes
81     #endif
82     wsize--;
83     // find the highest bit
84     uint8_t b = (uint8_t)mi_bsr(wsize);  // note: wsize != 0
85     // and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation).
86     // - adjust with 3 because we use do not round the first 8 sizes
87     //   which each get an exact bin
88     bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3;
89     mi_assert_internal(bin < MI_BIN_HUGE);
90   }
91   mi_assert_internal(bin > 0 && bin <= MI_BIN_HUGE);
92   return bin;
93 }
94 
95 
96 
97 /* -----------------------------------------------------------
98   Queue of pages with free blocks
99 ----------------------------------------------------------- */
100 
_mi_bin(size_t size)101 uint8_t _mi_bin(size_t size) {
102   return mi_bin(size);
103 }
104 
_mi_bin_size(uint8_t bin)105 size_t _mi_bin_size(uint8_t bin) {
106   return _mi_heap_empty.pages[bin].block_size;
107 }
108 
109 // Good size for allocation
mi_good_size(size_t size)110 size_t mi_good_size(size_t size) mi_attr_noexcept {
111   if (size <= MI_MEDIUM_OBJ_SIZE_MAX) {
112     return _mi_bin_size(mi_bin(size));
113   }
114   else {
115     return _mi_align_up(size,_mi_os_page_size());
116   }
117 }
118 
119 #if (MI_DEBUG>1)
mi_page_queue_contains(mi_page_queue_t * queue,const mi_page_t * page)120 static bool mi_page_queue_contains(mi_page_queue_t* queue, const mi_page_t* page) {
121   mi_assert_internal(page != NULL);
122   mi_page_t* list = queue->first;
123   while (list != NULL) {
124     mi_assert_internal(list->next == NULL || list->next->prev == list);
125     mi_assert_internal(list->prev == NULL || list->prev->next == list);
126     if (list == page) break;
127     list = list->next;
128   }
129   return (list == page);
130 }
131 
132 #endif
133 
134 #if (MI_DEBUG>1)
mi_heap_contains_queue(const mi_heap_t * heap,const mi_page_queue_t * pq)135 static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t* pq) {
136   return (pq >= &heap->pages[0] && pq <= &heap->pages[MI_BIN_FULL]);
137 }
138 #endif
139 
mi_page_queue_of(const mi_page_t * page)140 static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
141   uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size));
142   mi_heap_t* heap = mi_page_heap(page);
143   mi_assert_internal(heap != NULL && bin <= MI_BIN_FULL);
144   mi_page_queue_t* pq = &heap->pages[bin];
145   mi_assert_internal(bin >= MI_BIN_HUGE || page->xblock_size == pq->block_size);
146   mi_assert_expensive(mi_page_queue_contains(pq, page));
147   return pq;
148 }
149 
mi_heap_page_queue_of(mi_heap_t * heap,const mi_page_t * page)150 static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {
151   uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size));
152   mi_assert_internal(bin <= MI_BIN_FULL);
153   mi_page_queue_t* pq = &heap->pages[bin];
154   mi_assert_internal(mi_page_is_in_full(page) || page->xblock_size == pq->block_size);
155   return pq;
156 }
157 
158 // The current small page array is for efficiency and for each
159 // small size (up to 256) it points directly to the page for that
160 // size without having to compute the bin. This means when the
161 // current free page queue is updated for a small bin, we need to update a
162 // range of entries in `_mi_page_small_free`.
mi_heap_queue_first_update(mi_heap_t * heap,const mi_page_queue_t * pq)163 static inline void mi_heap_queue_first_update(mi_heap_t* heap, const mi_page_queue_t* pq) {
164   mi_assert_internal(mi_heap_contains_queue(heap,pq));
165   size_t size = pq->block_size;
166   if (size > MI_SMALL_SIZE_MAX) return;
167 
168   mi_page_t* page = pq->first;
169   if (pq->first == NULL) page = (mi_page_t*)&_mi_page_empty;
170 
171   // find index in the right direct page array
172   size_t start;
173   size_t idx = _mi_wsize_from_size(size);
174   mi_page_t** pages_free = heap->pages_free_direct;
175 
176   if (pages_free[idx] == page) return;  // already set
177 
178   // find start slot
179   if (idx<=1) {
180     start = 0;
181   }
182   else {
183     // find previous size; due to minimal alignment upto 3 previous bins may need to be skipped
184     uint8_t bin = mi_bin(size);
185     const mi_page_queue_t* prev = pq - 1;
186     while( bin == mi_bin(prev->block_size) && prev > &heap->pages[0]) {
187       prev--;
188     }
189     start = 1 + _mi_wsize_from_size(prev->block_size);
190     if (start > idx) start = idx;
191   }
192 
193   // set size range to the right page
194   mi_assert(start <= idx);
195   for (size_t sz = start; sz <= idx; sz++) {
196     pages_free[sz] = page;
197   }
198 }
199 
200 /*
201 static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
202   return (queue->first == NULL);
203 }
204 */
205 
mi_page_queue_remove(mi_page_queue_t * queue,mi_page_t * page)206 static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
207   mi_assert_internal(page != NULL);
208   mi_assert_expensive(mi_page_queue_contains(queue, page));
209   mi_assert_internal(page->xblock_size == queue->block_size || (page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue))  || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
210   mi_heap_t* heap = mi_page_heap(page);
211 
212   if (page->prev != NULL) page->prev->next = page->next;
213   if (page->next != NULL) page->next->prev = page->prev;
214   if (page == queue->last)  queue->last = page->prev;
215   if (page == queue->first) {
216     queue->first = page->next;
217     // update first
218     mi_assert_internal(mi_heap_contains_queue(heap, queue));
219     mi_heap_queue_first_update(heap,queue);
220   }
221   heap->page_count--;
222   page->next = NULL;
223   page->prev = NULL;
224   // mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), NULL);
225   mi_page_set_in_full(page,false);
226 }
227 
228 
mi_page_queue_push(mi_heap_t * heap,mi_page_queue_t * queue,mi_page_t * page)229 static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {
230   mi_assert_internal(mi_page_heap(page) == heap);
231   mi_assert_internal(!mi_page_queue_contains(queue, page));
232   #if MI_HUGE_PAGE_ABANDON
233   mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
234   #endif
235   mi_assert_internal(page->xblock_size == queue->block_size ||
236                       (page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX) ||
237                         (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
238 
239   mi_page_set_in_full(page, mi_page_queue_is_full(queue));
240   // mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), heap);
241   page->next = queue->first;
242   page->prev = NULL;
243   if (queue->first != NULL) {
244     mi_assert_internal(queue->first->prev == NULL);
245     queue->first->prev = page;
246     queue->first = page;
247   }
248   else {
249     queue->first = queue->last = page;
250   }
251 
252   // update direct
253   mi_heap_queue_first_update(heap, queue);
254   heap->page_count++;
255 }
256 
257 
mi_page_queue_enqueue_from(mi_page_queue_t * to,mi_page_queue_t * from,mi_page_t * page)258 static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
259   mi_assert_internal(page != NULL);
260   mi_assert_expensive(mi_page_queue_contains(from, page));
261   mi_assert_expensive(!mi_page_queue_contains(to, page));
262 
263   mi_assert_internal((page->xblock_size == to->block_size && page->xblock_size == from->block_size) ||
264                      (page->xblock_size == to->block_size && mi_page_queue_is_full(from)) ||
265                      (page->xblock_size == from->block_size && mi_page_queue_is_full(to)) ||
266                      (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(to)) ||
267                      (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_full(to)));
268 
269   mi_heap_t* heap = mi_page_heap(page);
270   if (page->prev != NULL) page->prev->next = page->next;
271   if (page->next != NULL) page->next->prev = page->prev;
272   if (page == from->last)  from->last = page->prev;
273   if (page == from->first) {
274     from->first = page->next;
275     // update first
276     mi_assert_internal(mi_heap_contains_queue(heap, from));
277     mi_heap_queue_first_update(heap, from);
278   }
279 
280   page->prev = to->last;
281   page->next = NULL;
282   if (to->last != NULL) {
283     mi_assert_internal(heap == mi_page_heap(to->last));
284     to->last->next = page;
285     to->last = page;
286   }
287   else {
288     to->first = page;
289     to->last = page;
290     mi_heap_queue_first_update(heap, to);
291   }
292 
293   mi_page_set_in_full(page, mi_page_queue_is_full(to));
294 }
295 
296 // Only called from `mi_heap_absorb`.
_mi_page_queue_append(mi_heap_t * heap,mi_page_queue_t * pq,mi_page_queue_t * append)297 size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) {
298   mi_assert_internal(mi_heap_contains_queue(heap,pq));
299   mi_assert_internal(pq->block_size == append->block_size);
300 
301   if (append->first==NULL) return 0;
302 
303   // set append pages to new heap and count
304   size_t count = 0;
305   for (mi_page_t* page = append->first; page != NULL; page = page->next) {
306     // inline `mi_page_set_heap` to avoid wrong assertion during absorption;
307     // in this case it is ok to be delayed freeing since both "to" and "from" heap are still alive.
308     mi_atomic_store_release(&page->xheap, (uintptr_t)heap);
309     // set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a
310     // side effect that it spins until any DELAYED_FREEING is finished. This ensures
311     // that after appending only the new heap will be used for delayed free operations.
312     _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false);
313     count++;
314   }
315 
316   if (pq->last==NULL) {
317     // take over afresh
318     mi_assert_internal(pq->first==NULL);
319     pq->first = append->first;
320     pq->last = append->last;
321     mi_heap_queue_first_update(heap, pq);
322   }
323   else {
324     // append to end
325     mi_assert_internal(pq->last!=NULL);
326     mi_assert_internal(append->first!=NULL);
327     pq->last->next = append->first;
328     append->first->prev = pq->last;
329     pq->last = append->last;
330   }
331   return count;
332 }
333