• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* ----------------------------------------------------------------------------
2 Copyright (c) 2019-2023, Microsoft Research, Daan Leijen
3 This is free software; you can redistribute it and/or modify it under the
4 terms of the MIT license. A copy of the license can be found in the file
5 "LICENSE" at the root of this distribution.
6 -----------------------------------------------------------------------------*/
7 
8 /* ----------------------------------------------------------------------------
9 "Arenas" are fixed area's of OS memory from which we can allocate
10 large blocks (>= MI_ARENA_MIN_BLOCK_SIZE, 4MiB).
11 In contrast to the rest of mimalloc, the arenas are shared between
12 threads and need to be accessed using atomic operations.
13 
14 Arenas are used to for huge OS page (1GiB) reservations or for reserving
15 OS memory upfront which can be improve performance or is sometimes needed
16 on embedded devices. We can also employ this with WASI or `sbrk` systems
17 to reserve large arenas upfront and be able to reuse the memory more effectively.
18 
19 The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
20 -----------------------------------------------------------------------------*/
21 #include "mimalloc.h"
22 #include "mimalloc/internal.h"
23 #include "mimalloc/atomic.h"
24 
25 #include <string.h>  // memset
26 #include <errno.h>   // ENOMEM
27 
28 #include "bitmap.h"  // atomic bitmap
29 
30 /* -----------------------------------------------------------
31   Arena allocation
32 ----------------------------------------------------------- */
33 
34 // Block info: bit 0 contains the `in_use` bit, the upper bits the
35 // size in count of arena blocks.
36 typedef uintptr_t mi_block_info_t;
37 #define MI_ARENA_BLOCK_SIZE   (MI_SEGMENT_SIZE)        // 64MiB  (must be at least MI_SEGMENT_ALIGN)
38 #define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2)  // 32MiB
39 #define MI_MAX_ARENAS         (112)                    // not more than 126 (since we use 7 bits in the memid and an arena index + 1)
40 
41 // A memory arena descriptor
42 typedef struct mi_arena_s {
43   mi_arena_id_t id;                       // arena id; 0 for non-specific
44   mi_memid_t memid;                       // memid of the memory area
45   _Atomic(uint8_t*) start;                // the start of the memory area
46   size_t   block_count;                   // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`)
47   size_t   field_count;                   // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
48   size_t   meta_size;                     // size of the arena structure itself (including its bitmaps)
49   mi_memid_t meta_memid;                  // memid of the arena structure itself (OS or static allocation)
50   int      numa_node;                     // associated NUMA node
51   bool     exclusive;                     // only allow allocations if specifically for this arena
52   bool     is_large;                      // memory area consists of large- or huge OS pages (always committed)
53   _Atomic(size_t) search_idx;             // optimization to start the search for free blocks
54   _Atomic(mi_msecs_t) purge_expire;       // expiration time when blocks should be decommitted from `blocks_decommit`.
55   mi_bitmap_field_t* blocks_dirty;        // are the blocks potentially non-zero?
56   mi_bitmap_field_t* blocks_committed;    // are the blocks committed? (can be NULL for memory that cannot be decommitted)
57   mi_bitmap_field_t* blocks_purge;        // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
58   mi_bitmap_field_t  blocks_inuse[1];     // in-place bitmap of in-use blocks (of size `field_count`)
59 } mi_arena_t;
60 
61 
62 // The available arenas
63 static mi_decl_cache_align _Atomic(mi_arena_t*) mi_arenas[MI_MAX_ARENAS];
64 static mi_decl_cache_align _Atomic(size_t)      mi_arena_count; // = 0
65 
66 
67 //static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept;
68 
69 /* -----------------------------------------------------------
70   Arena id's
71   id = arena_index + 1
72 ----------------------------------------------------------- */
73 
mi_arena_id_index(mi_arena_id_t id)74 static size_t mi_arena_id_index(mi_arena_id_t id) {
75   return (size_t)(id <= 0 ? MI_MAX_ARENAS : id - 1);
76 }
77 
mi_arena_id_create(size_t arena_index)78 static mi_arena_id_t mi_arena_id_create(size_t arena_index) {
79   mi_assert_internal(arena_index < MI_MAX_ARENAS);
80   return (int)arena_index + 1;
81 }
82 
_mi_arena_id_none(void)83 mi_arena_id_t _mi_arena_id_none(void) {
84   return 0;
85 }
86 
mi_arena_id_is_suitable(mi_arena_id_t arena_id,bool arena_is_exclusive,mi_arena_id_t req_arena_id)87 static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclusive, mi_arena_id_t req_arena_id) {
88   return ((!arena_is_exclusive && req_arena_id == _mi_arena_id_none()) ||
89           (arena_id == req_arena_id));
90 }
91 
_mi_arena_memid_is_suitable(mi_memid_t memid,mi_arena_id_t request_arena_id)92 bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id) {
93   if (memid.memkind == MI_MEM_ARENA) {
94     return mi_arena_id_is_suitable(memid.mem.arena.id, memid.mem.arena.is_exclusive, request_arena_id);
95   }
96   else {
97     return mi_arena_id_is_suitable(0, false, request_arena_id);
98   }
99 }
100 
_mi_arena_memid_is_os_allocated(mi_memid_t memid)101 bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) {
102   return (memid.memkind == MI_MEM_OS);
103 }
104 
105 /* -----------------------------------------------------------
106   Arena allocations get a (currently) 16-bit memory id where the
107   lower 8 bits are the arena id, and the upper bits the block index.
108 ----------------------------------------------------------- */
109 
mi_block_count_of_size(size_t size)110 static size_t mi_block_count_of_size(size_t size) {
111   return _mi_divide_up(size, MI_ARENA_BLOCK_SIZE);
112 }
113 
mi_arena_block_size(size_t bcount)114 static size_t mi_arena_block_size(size_t bcount) {
115   return (bcount * MI_ARENA_BLOCK_SIZE);
116 }
117 
mi_arena_size(mi_arena_t * arena)118 static size_t mi_arena_size(mi_arena_t* arena) {
119   return mi_arena_block_size(arena->block_count);
120 }
121 
mi_memid_create_arena(mi_arena_id_t id,bool is_exclusive,mi_bitmap_index_t bitmap_index)122 static mi_memid_t mi_memid_create_arena(mi_arena_id_t id, bool is_exclusive, mi_bitmap_index_t bitmap_index) {
123   mi_memid_t memid = _mi_memid_create(MI_MEM_ARENA);
124   memid.mem.arena.id = id;
125   memid.mem.arena.block_index = bitmap_index;
126   memid.mem.arena.is_exclusive = is_exclusive;
127   return memid;
128 }
129 
mi_arena_memid_indices(mi_memid_t memid,size_t * arena_index,mi_bitmap_index_t * bitmap_index)130 static bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
131   mi_assert_internal(memid.memkind == MI_MEM_ARENA);
132   *arena_index = mi_arena_id_index(memid.mem.arena.id);
133   *bitmap_index = memid.mem.arena.block_index;
134   return memid.mem.arena.is_exclusive;
135 }
136 
137 
138 
139 /* -----------------------------------------------------------
140   Special static area for mimalloc internal structures
141   to avoid OS calls (for example, for the arena metadata)
142 ----------------------------------------------------------- */
143 
144 #define MI_ARENA_STATIC_MAX  (MI_INTPTR_SIZE*MI_KiB)  // 8 KiB on 64-bit
145 
146 static uint8_t mi_arena_static[MI_ARENA_STATIC_MAX];
147 static _Atomic(size_t) mi_arena_static_top;
148 
mi_arena_static_zalloc(size_t size,size_t alignment,mi_memid_t * memid)149 static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* memid) {
150   *memid = _mi_memid_none();
151   if (size == 0 || size > MI_ARENA_STATIC_MAX) return NULL;
152   if ((mi_atomic_load_relaxed(&mi_arena_static_top) + size) > MI_ARENA_STATIC_MAX) return NULL;
153 
154   // try to claim space
155   if (alignment == 0) { alignment = 1; }
156   const size_t oversize = size + alignment - 1;
157   if (oversize > MI_ARENA_STATIC_MAX) return NULL;
158   const size_t oldtop = mi_atomic_add_acq_rel(&mi_arena_static_top, oversize);
159   size_t top = oldtop + oversize;
160   if (top > MI_ARENA_STATIC_MAX) {
161     // try to roll back, ok if this fails
162     mi_atomic_cas_strong_acq_rel(&mi_arena_static_top, &top, oldtop);
163     return NULL;
164   }
165 
166   // success
167   *memid = _mi_memid_create(MI_MEM_STATIC);
168   const size_t start = _mi_align_up(oldtop, alignment);
169   uint8_t* const p = &mi_arena_static[start];
170   _mi_memzero(p, size);
171   return p;
172 }
173 
mi_arena_meta_zalloc(size_t size,mi_memid_t * memid,mi_stats_t * stats)174 static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) {
175   *memid = _mi_memid_none();
176 
177   // try static
178   void* p = mi_arena_static_zalloc(size, MI_ALIGNMENT_MAX, memid);
179   if (p != NULL) return p;
180 
181   // or fall back to the OS
182   return _mi_os_alloc(size, memid, stats);
183 }
184 
mi_arena_meta_free(void * p,mi_memid_t memid,size_t size,mi_stats_t * stats)185 static void mi_arena_meta_free(void* p, mi_memid_t memid, size_t size, mi_stats_t* stats) {
186   if (mi_memkind_is_os(memid.memkind)) {
187     _mi_os_free(p, size, memid, stats);
188   }
189   else {
190     mi_assert(memid.memkind == MI_MEM_STATIC);
191   }
192 }
193 
mi_arena_block_start(mi_arena_t * arena,mi_bitmap_index_t bindex)194 static void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex) {
195   return (arena->start + mi_arena_block_size(mi_bitmap_index_bit(bindex)));
196 }
197 
198 
199 /* -----------------------------------------------------------
200   Thread safe allocation in an arena
201 ----------------------------------------------------------- */
202 
203 // claim the `blocks_inuse` bits
mi_arena_try_claim(mi_arena_t * arena,size_t blocks,mi_bitmap_index_t * bitmap_idx)204 static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx)
205 {
206   size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx);  // start from last search; ok to be relaxed as the exact start does not matter
207   if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
208     mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx));  // start search from found location next time around
209     return true;
210   };
211   return false;
212 }
213 
214 
215 /* -----------------------------------------------------------
216   Arena Allocation
217 ----------------------------------------------------------- */
218 
mi_arena_try_alloc_at(mi_arena_t * arena,size_t arena_index,size_t needed_bcount,bool commit,mi_memid_t * memid,mi_os_tld_t * tld)219 static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
220                                                     bool commit, mi_memid_t* memid, mi_os_tld_t* tld)
221 {
222   MI_UNUSED(arena_index);
223   mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
224 
225   mi_bitmap_index_t bitmap_index;
226   if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL;
227 
228   // claimed it!
229   void* p = mi_arena_block_start(arena, bitmap_index);
230   *memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index);
231   memid->is_pinned = arena->memid.is_pinned;
232 
233   // none of the claimed blocks should be scheduled for a decommit
234   if (arena->blocks_purge != NULL) {
235     // this is thread safe as a potential purge only decommits parts that are not yet claimed as used (in `blocks_inuse`).
236     _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, needed_bcount, bitmap_index);
237   }
238 
239   // set the dirty bits (todo: no need for an atomic op here?)
240   if (arena->memid.initially_zero && arena->blocks_dirty != NULL) {
241     memid->initially_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
242   }
243 
244   // set commit state
245   if (arena->blocks_committed == NULL) {
246     // always committed
247     memid->initially_committed = true;
248   }
249   else if (commit) {
250     // commit requested, but the range may not be committed as a whole: ensure it is committed now
251     memid->initially_committed = true;
252     bool any_uncommitted;
253     _mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
254     if (any_uncommitted) {
255       bool commit_zero = false;
256       if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats)) {
257         memid->initially_committed = false;
258       }
259       else {
260         if (commit_zero) { memid->initially_zero = true; }
261       }
262     }
263   }
264   else {
265     // no need to commit, but check if already fully committed
266     memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
267   }
268 
269   return p;
270 }
271 
272 // allocate in a speficic arena
mi_arena_try_alloc_at_id(mi_arena_id_t arena_id,bool match_numa_node,int numa_node,size_t size,size_t alignment,bool commit,bool allow_large,mi_arena_id_t req_arena_id,mi_memid_t * memid,mi_os_tld_t * tld)273 static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
274                                        bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
275 {
276   MI_UNUSED_RELEASE(alignment);
277   mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
278   const size_t bcount = mi_block_count_of_size(size);
279   const size_t arena_index = mi_arena_id_index(arena_id);
280   mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count));
281   mi_assert_internal(size <= mi_arena_block_size(bcount));
282 
283   // Check arena suitability
284   mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
285   if (arena == NULL) return NULL;
286   if (!allow_large && arena->is_large) return NULL;
287   if (!mi_arena_id_is_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL;
288   if (req_arena_id == _mi_arena_id_none()) { // in not specific, check numa affinity
289     const bool numa_suitable = (numa_node < 0 || arena->numa_node < 0 || arena->numa_node == numa_node);
290     if (match_numa_node) { if (!numa_suitable) return NULL; }
291                     else { if (numa_suitable) return NULL; }
292   }
293 
294   // try to allocate
295   void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid, tld);
296   mi_assert_internal(p == NULL || _mi_is_aligned(p, alignment));
297   return p;
298 }
299 
300 
301 // allocate from an arena with fallback to the OS
mi_arena_try_alloc(int numa_node,size_t size,size_t alignment,bool commit,bool allow_large,mi_arena_id_t req_arena_id,mi_memid_t * memid,mi_os_tld_t * tld)302 static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
303                                                   bool commit, bool allow_large,
304                                                   mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
305 {
306   MI_UNUSED(alignment);
307   mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
308   const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
309   if mi_likely(max_arena == 0) return NULL;
310 
311   if (req_arena_id != _mi_arena_id_none()) {
312     // try a specific arena if requested
313     if (mi_arena_id_index(req_arena_id) < max_arena) {
314       void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
315       if (p != NULL) return p;
316     }
317   }
318   else {
319     // try numa affine allocation
320     for (size_t i = 0; i < max_arena; i++) {
321       void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
322       if (p != NULL) return p;
323     }
324 
325     // try from another numa node instead..
326     if (numa_node >= 0) {  // if numa_node was < 0 (no specific affinity requested), all arena's have been tried already
327       for (size_t i = 0; i < max_arena; i++) {
328         void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
329         if (p != NULL) return p;
330       }
331     }
332   }
333   return NULL;
334 }
335 
336 // try to reserve a fresh arena space
mi_arena_reserve(size_t req_size,bool allow_large,mi_arena_id_t req_arena_id,mi_arena_id_t * arena_id)337 static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t req_arena_id, mi_arena_id_t *arena_id)
338 {
339   if (_mi_preloading()) return false;  // use OS only while pre loading
340   if (req_arena_id != _mi_arena_id_none()) return false;
341 
342   const size_t arena_count = mi_atomic_load_acquire(&mi_arena_count);
343   if (arena_count > (MI_MAX_ARENAS - 4)) return false;
344 
345   size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve);
346   if (arena_reserve == 0) return false;
347 
348   if (!_mi_os_has_virtual_reserve()) {
349     arena_reserve = arena_reserve/4;  // be conservative if virtual reserve is not supported (for some embedded systems for example)
350   }
351   arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE);
352   if (arena_count >= 8 && arena_count <= 128) {
353     arena_reserve = ((size_t)1<<(arena_count/8)) * arena_reserve;  // scale up the arena sizes exponentially
354   }
355   if (arena_reserve < req_size) return false;  // should be able to at least handle the current allocation size
356 
357   // commit eagerly?
358   bool arena_commit = false;
359   if (mi_option_get(mi_option_arena_eager_commit) == 2)      { arena_commit = _mi_os_has_overcommit(); }
360   else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
361 
362   return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive */, arena_id) == 0);
363 }
364 
365 
_mi_arena_alloc_aligned(size_t size,size_t alignment,size_t align_offset,bool commit,bool allow_large,mi_arena_id_t req_arena_id,mi_memid_t * memid,mi_os_tld_t * tld)366 void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
367                               mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
368 {
369   mi_assert_internal(memid != NULL && tld != NULL);
370   mi_assert_internal(size > 0);
371   *memid = _mi_memid_none();
372 
373   const int numa_node = _mi_os_numa_node(tld); // current numa node
374 
375   // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
376   if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
377     void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
378     if (p != NULL) return p;
379 
380     // otherwise, try to first eagerly reserve a new arena
381     if (req_arena_id == _mi_arena_id_none()) {
382       mi_arena_id_t arena_id = 0;
383       if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) {
384         // and try allocate in there
385         mi_assert_internal(req_arena_id == _mi_arena_id_none());
386         p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
387         if (p != NULL) return p;
388       }
389     }
390   }
391 
392   // if we cannot use OS allocation, return NULL
393   if (mi_option_is_enabled(mi_option_limit_os_alloc) || req_arena_id != _mi_arena_id_none()) {
394     errno = ENOMEM;
395     return NULL;
396   }
397 
398   // finally, fall back to the OS
399   if (align_offset > 0) {
400     return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats);
401   }
402   else {
403     return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats);
404   }
405 }
406 
_mi_arena_alloc(size_t size,bool commit,bool allow_large,mi_arena_id_t req_arena_id,mi_memid_t * memid,mi_os_tld_t * tld)407 void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
408 {
409   return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid, tld);
410 }
411 
412 
mi_arena_area(mi_arena_id_t arena_id,size_t * size)413 void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
414   if (size != NULL) *size = 0;
415   size_t arena_index = mi_arena_id_index(arena_id);
416   if (arena_index >= MI_MAX_ARENAS) return NULL;
417   mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
418   if (arena == NULL) return NULL;
419   if (size != NULL) { *size = mi_arena_block_size(arena->block_count); }
420   return arena->start;
421 }
422 
423 
424 /* -----------------------------------------------------------
425   Arena purge
426 ----------------------------------------------------------- */
427 
mi_arena_purge_delay(void)428 static long mi_arena_purge_delay(void) {
429   // <0 = no purging allowed, 0=immediate purging, >0=milli-second delay
430   return (mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult));
431 }
432 
433 // reset or decommit in an arena and update the committed/decommit bitmaps
434 // assumes we own the area (i.e. blocks_in_use is claimed by us)
mi_arena_purge(mi_arena_t * arena,size_t bitmap_idx,size_t blocks,mi_stats_t * stats)435 static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
436   mi_assert_internal(arena->blocks_committed != NULL);
437   mi_assert_internal(arena->blocks_purge != NULL);
438   mi_assert_internal(!arena->memid.is_pinned);
439   const size_t size = mi_arena_block_size(blocks);
440   void* const p = mi_arena_block_start(arena, bitmap_idx);
441   bool needs_recommit;
442   if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) {
443     // all blocks are committed, we can purge freely
444     needs_recommit = _mi_os_purge(p, size, stats);
445   }
446   else {
447     // some blocks are not committed -- this can happen when a partially committed block is freed
448     // in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge
449     // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
450     // and also undo the decommit stats (as it was already adjusted)
451     mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
452     needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats);
453     _mi_stat_increase(&stats->committed, size);
454   }
455 
456   // clear the purged blocks
457   _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx);
458   // update committed bitmap
459   if (needs_recommit) {
460     _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
461   }
462 }
463 
464 // Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls.
465 // Note: assumes we (still) own the area as we may purge immediately
mi_arena_schedule_purge(mi_arena_t * arena,size_t bitmap_idx,size_t blocks,mi_stats_t * stats)466 static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
467   mi_assert_internal(arena->blocks_purge != NULL);
468   const long delay = mi_arena_purge_delay();
469   if (delay < 0) return;  // is purging allowed at all?
470 
471   if (_mi_preloading() || delay == 0) {
472     // decommit directly
473     mi_arena_purge(arena, bitmap_idx, blocks, stats);
474   }
475   else {
476     // schedule decommit
477     mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
478     if (expire != 0) {
479       mi_atomic_addi64_acq_rel(&arena->purge_expire, delay/10);  // add smallish extra delay
480     }
481     else {
482       mi_atomic_storei64_release(&arena->purge_expire, _mi_clock_now() + delay);
483     }
484     _mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL);
485   }
486 }
487 
488 // purge a range of blocks
489 // return true if the full range was purged.
490 // assumes we own the area (i.e. blocks_in_use is claimed by us)
mi_arena_purge_range(mi_arena_t * arena,size_t idx,size_t startidx,size_t bitlen,size_t purge,mi_stats_t * stats)491 static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge, mi_stats_t* stats) {
492   const size_t endidx = startidx + bitlen;
493   size_t bitidx = startidx;
494   bool all_purged = false;
495   while (bitidx < endidx) {
496     // count consequetive ones in the purge mask
497     size_t count = 0;
498     while (bitidx + count < endidx && (purge & ((size_t)1 << (bitidx + count))) != 0) {
499       count++;
500     }
501     if (count > 0) {
502       // found range to be purged
503       const mi_bitmap_index_t range_idx = mi_bitmap_index_create(idx, bitidx);
504       mi_arena_purge(arena, range_idx, count, stats);
505       if (count == bitlen) {
506         all_purged = true;
507       }
508     }
509     bitidx += (count+1); // +1 to skip the zero bit (or end)
510   }
511   return all_purged;
512 }
513 
514 // returns true if anything was purged
mi_arena_try_purge(mi_arena_t * arena,mi_msecs_t now,bool force,mi_stats_t * stats)515 static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats)
516 {
517   if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false;
518   mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
519   if (expire == 0) return false;
520   if (!force && expire > now) return false;
521 
522   // reset expire (if not already set concurrently)
523   mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, 0);
524 
525   // potential purges scheduled, walk through the bitmap
526   bool any_purged = false;
527   bool full_purge = true;
528   for (size_t i = 0; i < arena->field_count; i++) {
529     size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]);
530     if (purge != 0) {
531       size_t bitidx = 0;
532       while (bitidx < MI_BITMAP_FIELD_BITS) {
533         // find consequetive range of ones in the purge mask
534         size_t bitlen = 0;
535         while (bitidx + bitlen < MI_BITMAP_FIELD_BITS && (purge & ((size_t)1 << (bitidx + bitlen))) != 0) {
536           bitlen++;
537         }
538         // try to claim the longest range of corresponding in_use bits
539         const mi_bitmap_index_t bitmap_index = mi_bitmap_index_create(i, bitidx);
540         while( bitlen > 0 ) {
541           if (_mi_bitmap_try_claim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index)) {
542             break;
543           }
544           bitlen--;
545         }
546         // actual claimed bits at `in_use`
547         if (bitlen > 0) {
548           // read purge again now that we have the in_use bits
549           purge = mi_atomic_load_acquire(&arena->blocks_purge[i]);
550           if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge, stats)) {
551             full_purge = false;
552           }
553           any_purged = true;
554           // release the claimed `in_use` bits again
555           _mi_bitmap_unclaim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index);
556         }
557         bitidx += (bitlen+1);  // +1 to skip the zero (or end)
558       } // while bitidx
559     } // purge != 0
560   }
561   // if not fully purged, make sure to purge again in the future
562   if (!full_purge) {
563     const long delay = mi_arena_purge_delay();
564     mi_msecs_t expected = 0;
565     mi_atomic_casi64_strong_acq_rel(&arena->purge_expire,&expected,_mi_clock_now() + delay);
566   }
567   return any_purged;
568 }
569 
mi_arenas_try_purge(bool force,bool visit_all,mi_stats_t * stats)570 static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) {
571   if (_mi_preloading() || mi_arena_purge_delay() <= 0) return;  // nothing will be scheduled
572 
573   const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count);
574   if (max_arena == 0) return;
575 
576   // allow only one thread to purge at a time
577   static mi_atomic_guard_t purge_guard;
578   mi_atomic_guard(&purge_guard)
579   {
580     mi_msecs_t now = _mi_clock_now();
581     size_t max_purge_count = (visit_all ? max_arena : 1);
582     for (size_t i = 0; i < max_arena; i++) {
583       mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
584       if (arena != NULL) {
585         if (mi_arena_try_purge(arena, now, force, stats)) {
586           if (max_purge_count <= 1) break;
587           max_purge_count--;
588         }
589       }
590     }
591   }
592 }
593 
594 
595 /* -----------------------------------------------------------
596   Arena free
597 ----------------------------------------------------------- */
598 
_mi_arena_free(void * p,size_t size,size_t committed_size,mi_memid_t memid,mi_stats_t * stats)599 void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid, mi_stats_t* stats) {
600   mi_assert_internal(size > 0 && stats != NULL);
601   mi_assert_internal(committed_size <= size);
602   if (p==NULL) return;
603   if (size==0) return;
604   const bool all_committed = (committed_size == size);
605 
606   if (mi_memkind_is_os(memid.memkind)) {
607     // was a direct OS allocation, pass through
608     if (!all_committed && committed_size > 0) {
609       // if partially committed, adjust the committed stats (as `_mi_os_free` will increase decommit by the full size)
610       _mi_stat_decrease(&stats->committed, committed_size);
611     }
612     _mi_os_free(p, size, memid, stats);
613   }
614   else if (memid.memkind == MI_MEM_ARENA) {
615     // allocated in an arena
616     size_t arena_idx;
617     size_t bitmap_idx;
618     mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx);
619     mi_assert_internal(arena_idx < MI_MAX_ARENAS);
620     mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t,&mi_arenas[arena_idx]);
621     mi_assert_internal(arena != NULL);
622     const size_t blocks = mi_block_count_of_size(size);
623 
624     // checks
625     if (arena == NULL) {
626       _mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
627       return;
628     }
629     mi_assert_internal(arena->field_count > mi_bitmap_index_field(bitmap_idx));
630     if (arena->field_count <= mi_bitmap_index_field(bitmap_idx)) {
631       _mi_error_message(EINVAL, "trying to free from non-existent arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
632       return;
633     }
634 
635     // need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.)
636     mi_track_mem_undefined(p,size);
637 
638     // potentially decommit
639     if (arena->memid.is_pinned || arena->blocks_committed == NULL) {
640       mi_assert_internal(all_committed);
641     }
642     else {
643       mi_assert_internal(arena->blocks_committed != NULL);
644       mi_assert_internal(arena->blocks_purge != NULL);
645 
646       if (!all_committed) {
647         // mark the entire range as no longer committed (so we recommit the full range when re-using)
648         _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
649         mi_track_mem_noaccess(p,size);
650         if (committed_size > 0) {
651           // if partially committed, adjust the committed stats (is it will be recommitted when re-using)
652           // in the delayed purge, we now need to not count a decommit if the range is not marked as committed.
653           _mi_stat_decrease(&stats->committed, committed_size);
654         }
655         // note: if not all committed, it may be that the purge will reset/decommit the entire range
656         // that contains already decommitted parts. Since purge consistently uses reset or decommit that
657         // works (as we should never reset decommitted parts).
658       }
659       // (delay) purge the entire range
660       mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);
661     }
662 
663     // and make it available to others again
664     bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
665     if (!all_inuse) {
666       _mi_error_message(EAGAIN, "trying to free an already freed arena block: %p, size %zu\n", p, size);
667       return;
668     };
669   }
670   else {
671     // arena was none, external, or static; nothing to do
672     mi_assert_internal(memid.memkind < MI_MEM_OS);
673   }
674 
675   // purge expired decommits
676   mi_arenas_try_purge(false, false, stats);
677 }
678 
679 // destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
680 // for dynamic libraries that are unloaded and need to release all their allocated memory.
mi_arenas_unsafe_destroy(void)681 static void mi_arenas_unsafe_destroy(void) {
682   const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
683   size_t new_max_arena = 0;
684   for (size_t i = 0; i < max_arena; i++) {
685     mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
686     if (arena != NULL) {
687       if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
688         mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
689         _mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main);
690       }
691       else {
692         new_max_arena = i;
693       }
694       mi_arena_meta_free(arena, arena->meta_memid, arena->meta_size, &_mi_stats_main);
695     }
696   }
697 
698   // try to lower the max arena.
699   size_t expected = max_arena;
700   mi_atomic_cas_strong_acq_rel(&mi_arena_count, &expected, new_max_arena);
701 }
702 
703 // Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
_mi_arena_collect(bool force_purge,mi_stats_t * stats)704 void _mi_arena_collect(bool force_purge, mi_stats_t* stats) {
705   mi_arenas_try_purge(force_purge, true /* visit all */, stats);
706 }
707 
708 // destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
709 // for dynamic libraries that are unloaded and need to release all their allocated memory.
_mi_arena_unsafe_destroy_all(mi_stats_t * stats)710 void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) {
711   mi_arenas_unsafe_destroy();
712   _mi_arena_collect(true /* force purge */, stats);  // purge non-owned arenas
713 }
714 
715 // Is a pointer inside any of our arenas?
_mi_arena_contains(const void * p)716 bool _mi_arena_contains(const void* p) {
717   const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
718   for (size_t i = 0; i < max_arena; i++) {
719     mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
720     if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) {
721       return true;
722     }
723   }
724   return false;
725 }
726 
727 
728 /* -----------------------------------------------------------
729   Add an arena.
730 ----------------------------------------------------------- */
731 
mi_arena_add(mi_arena_t * arena,mi_arena_id_t * arena_id)732 static bool mi_arena_add(mi_arena_t* arena, mi_arena_id_t* arena_id) {
733   mi_assert_internal(arena != NULL);
734   mi_assert_internal((uintptr_t)mi_atomic_load_ptr_relaxed(uint8_t,&arena->start) % MI_SEGMENT_ALIGN == 0);
735   mi_assert_internal(arena->block_count > 0);
736   if (arena_id != NULL) { *arena_id = -1; }
737 
738   size_t i = mi_atomic_increment_acq_rel(&mi_arena_count);
739   if (i >= MI_MAX_ARENAS) {
740     mi_atomic_decrement_acq_rel(&mi_arena_count);
741     return false;
742   }
743   arena->id = mi_arena_id_create(i);
744   mi_atomic_store_ptr_release(mi_arena_t,&mi_arenas[i], arena);
745   if (arena_id != NULL) { *arena_id = arena->id; }
746   return true;
747 }
748 
mi_manage_os_memory_ex2(void * start,size_t size,bool is_large,int numa_node,bool exclusive,mi_memid_t memid,mi_arena_id_t * arena_id)749 static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept
750 {
751   if (arena_id != NULL) *arena_id = _mi_arena_id_none();
752   if (size < MI_ARENA_BLOCK_SIZE) return false;
753 
754   if (is_large) {
755     mi_assert_internal(memid.initially_committed && memid.is_pinned);
756   }
757 
758   const size_t bcount = size / MI_ARENA_BLOCK_SIZE;
759   const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
760   const size_t bitmaps = (memid.is_pinned ? 2 : 4);
761   const size_t asize  = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t));
762   mi_memid_t meta_memid;
763   mi_arena_t* arena   = (mi_arena_t*)mi_arena_meta_zalloc(asize, &meta_memid, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
764   if (arena == NULL) return false;
765 
766   // already zero'd due to os_alloc
767   // _mi_memzero(arena, asize);
768   arena->id = _mi_arena_id_none();
769   arena->memid = memid;
770   arena->exclusive = exclusive;
771   arena->meta_size = asize;
772   arena->meta_memid = meta_memid;
773   arena->block_count = bcount;
774   arena->field_count = fields;
775   arena->start = (uint8_t*)start;
776   arena->numa_node    = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1)
777   arena->is_large     = is_large;
778   arena->purge_expire = 0;
779   arena->search_idx   = 0;
780   arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
781   arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap
782   arena->blocks_purge  = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after committed bitmap
783   // initialize committed bitmap?
784   if (arena->blocks_committed != NULL && arena->memid.initially_committed) {
785     memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning
786   }
787 
788   // and claim leftover blocks if needed (so we never allocate there)
789   ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount;
790   mi_assert_internal(post >= 0);
791   if (post > 0) {
792     // don't use leftover bits at the end
793     mi_bitmap_index_t postidx = mi_bitmap_index_create(fields - 1, MI_BITMAP_FIELD_BITS - post);
794     _mi_bitmap_claim(arena->blocks_inuse, fields, post, postidx, NULL);
795   }
796   return mi_arena_add(arena, arena_id);
797 
798 }
799 
mi_manage_os_memory_ex(void * start,size_t size,bool is_committed,bool is_large,bool is_zero,int numa_node,bool exclusive,mi_arena_id_t * arena_id)800 bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
801   mi_memid_t memid = _mi_memid_create(MI_MEM_EXTERNAL);
802   memid.initially_committed = is_committed;
803   memid.initially_zero = is_zero;
804   memid.is_pinned = is_large;
805   return mi_manage_os_memory_ex2(start,size,is_large,numa_node,exclusive,memid, arena_id);
806 }
807 
808 // Reserve a range of regular OS memory
mi_reserve_os_memory_ex(size_t size,bool commit,bool allow_large,bool exclusive,mi_arena_id_t * arena_id)809 int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
810   if (arena_id != NULL) *arena_id = _mi_arena_id_none();
811   size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block
812   mi_memid_t memid;
813   void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid, &_mi_stats_main);
814   if (start == NULL) return ENOMEM;
815   const bool is_large = memid.is_pinned; // todo: use separate is_large field?
816   if (!mi_manage_os_memory_ex2(start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) {
817     _mi_os_free_ex(start, size, commit, memid, &_mi_stats_main);
818     _mi_verbose_message("failed to reserve %zu k memory\n", _mi_divide_up(size, 1024));
819     return ENOMEM;
820   }
821   _mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size, 1024), is_large ? " (in large os pages)" : "");
822   return 0;
823 }
824 
825 
826 // Manage a range of regular OS memory
mi_manage_os_memory(void * start,size_t size,bool is_committed,bool is_large,bool is_zero,int numa_node)827 bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept {
828   return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false /* exclusive? */, NULL);
829 }
830 
831 // Reserve a range of regular OS memory
mi_reserve_os_memory(size_t size,bool commit,bool allow_large)832 int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept {
833   return mi_reserve_os_memory_ex(size, commit, allow_large, false, NULL);
834 }
835 
836 
837 /* -----------------------------------------------------------
838   Debugging
839 ----------------------------------------------------------- */
840 
mi_debug_show_bitmap(const char * prefix,mi_bitmap_field_t * fields,size_t field_count)841 static size_t mi_debug_show_bitmap(const char* prefix, mi_bitmap_field_t* fields, size_t field_count ) {
842   size_t inuse_count = 0;
843   for (size_t i = 0; i < field_count; i++) {
844     char buf[MI_BITMAP_FIELD_BITS + 1];
845     uintptr_t field = mi_atomic_load_relaxed(&fields[i]);
846     for (size_t bit = 0; bit < MI_BITMAP_FIELD_BITS; bit++) {
847       bool inuse = ((((uintptr_t)1 << bit) & field) != 0);
848       if (inuse) inuse_count++;
849       buf[MI_BITMAP_FIELD_BITS - 1 - bit] = (inuse ? 'x' : '.');
850     }
851     buf[MI_BITMAP_FIELD_BITS] = 0;
852     _mi_verbose_message("%s%s\n", prefix, buf);
853   }
854   return inuse_count;
855 }
856 
mi_debug_show_arenas(void)857 void mi_debug_show_arenas(void) mi_attr_noexcept {
858   size_t max_arenas = mi_atomic_load_relaxed(&mi_arena_count);
859   for (size_t i = 0; i < max_arenas; i++) {
860     mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
861     if (arena == NULL) break;
862     size_t inuse_count = 0;
863     _mi_verbose_message("arena %zu: %zu blocks with %zu fields\n", i, arena->block_count, arena->field_count);
864     inuse_count += mi_debug_show_bitmap("  ", arena->blocks_inuse, arena->field_count);
865     _mi_verbose_message("  blocks in use ('x'): %zu\n", inuse_count);
866   }
867 }
868 
869 
870 /* -----------------------------------------------------------
871   Reserve a huge page arena.
872 ----------------------------------------------------------- */
873 // reserve at a specific numa node
mi_reserve_huge_os_pages_at_ex(size_t pages,int numa_node,size_t timeout_msecs,bool exclusive,mi_arena_id_t * arena_id)874 int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
875   if (arena_id != NULL) *arena_id = -1;
876   if (pages==0) return 0;
877   if (numa_node < -1) numa_node = -1;
878   if (numa_node >= 0) numa_node = numa_node % _mi_os_numa_node_count();
879   size_t hsize = 0;
880   size_t pages_reserved = 0;
881   mi_memid_t memid;
882   void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize, &memid);
883   if (p==NULL || pages_reserved==0) {
884     _mi_warning_message("failed to reserve %zu GiB huge pages\n", pages);
885     return ENOMEM;
886   }
887   _mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
888 
889   if (!mi_manage_os_memory_ex2(p, hsize, true, numa_node, exclusive, memid, arena_id)) {
890     _mi_os_free(p, hsize, memid, &_mi_stats_main);
891     return ENOMEM;
892   }
893   return 0;
894 }
895 
mi_reserve_huge_os_pages_at(size_t pages,int numa_node,size_t timeout_msecs)896 int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept {
897   return mi_reserve_huge_os_pages_at_ex(pages, numa_node, timeout_msecs, false, NULL);
898 }
899 
900 // reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected)
mi_reserve_huge_os_pages_interleave(size_t pages,size_t numa_nodes,size_t timeout_msecs)901 int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept {
902   if (pages == 0) return 0;
903 
904   // pages per numa node
905   size_t numa_count = (numa_nodes > 0 ? numa_nodes : _mi_os_numa_node_count());
906   if (numa_count <= 0) numa_count = 1;
907   const size_t pages_per = pages / numa_count;
908   const size_t pages_mod = pages % numa_count;
909   const size_t timeout_per = (timeout_msecs==0 ? 0 : (timeout_msecs / numa_count) + 50);
910 
911   // reserve evenly among numa nodes
912   for (size_t numa_node = 0; numa_node < numa_count && pages > 0; numa_node++) {
913     size_t node_pages = pages_per;  // can be 0
914     if (numa_node < pages_mod) node_pages++;
915     int err = mi_reserve_huge_os_pages_at(node_pages, (int)numa_node, timeout_per);
916     if (err) return err;
917     if (pages < node_pages) {
918       pages = 0;
919     }
920     else {
921       pages -= node_pages;
922     }
923   }
924 
925   return 0;
926 }
927 
mi_reserve_huge_os_pages(size_t pages,double max_secs,size_t * pages_reserved)928 int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept {
929   MI_UNUSED(max_secs);
930   _mi_warning_message("mi_reserve_huge_os_pages is deprecated: use mi_reserve_huge_os_pages_interleave/at instead\n");
931   if (pages_reserved != NULL) *pages_reserved = 0;
932   int err = mi_reserve_huge_os_pages_interleave(pages, 0, (size_t)(max_secs * 1000.0));
933   if (err==0 && pages_reserved!=NULL) *pages_reserved = pages;
934   return err;
935 }
936