• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* ----------------------------------------------------------------------------
2 Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
3 This is free software; you can redistribute it and/or modify it under the
4 terms of the MIT license. A copy of the license can be found in the file
5 "LICENSE" at the root of this distribution.
6 -----------------------------------------------------------------------------*/
7 #pragma once
8 #ifndef MIMALLOC_TYPES_H
9 #define MIMALLOC_TYPES_H
10 
11 // --------------------------------------------------------------------------
12 // This file contains the main type definitions for mimalloc:
13 // mi_heap_t      : all data for a thread-local heap, contains
14 //                  lists of all managed heap pages.
15 // mi_segment_t   : a larger chunk of memory (32GiB) from where pages
16 //                  are allocated.
17 // mi_page_t      : a mimalloc page (usually 64KiB or 512KiB) from
18 //                  where objects are allocated.
19 // --------------------------------------------------------------------------
20 
21 
22 #include <stddef.h>   // ptrdiff_t
23 #include <stdint.h>   // uintptr_t, uint16_t, etc
24 #include "atomic.h"   // _Atomic
25 
26 #ifdef _MSC_VER
27 #pragma warning(disable:4214) // bitfield is not int
28 #endif
29 
30 // Minimal alignment necessary. On most platforms 16 bytes are needed
31 // due to SSE registers for example. This must be at least `sizeof(void*)`
32 #ifndef MI_MAX_ALIGN_SIZE
33 #define MI_MAX_ALIGN_SIZE  16   // sizeof(max_align_t)
34 #endif
35 
36 #define MI_CACHE_LINE          64
37 #if defined(_MSC_VER)
38 #pragma warning(disable:4127)   // suppress constant conditional warning (due to MI_SECURE paths)
39 #pragma warning(disable:26812)  // unscoped enum warning
40 #define mi_decl_noinline        __declspec(noinline)
41 #define mi_decl_thread          __declspec(thread)
42 #define mi_decl_cache_align     __declspec(align(MI_CACHE_LINE))
43 #elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc
44 #define mi_decl_noinline        __attribute__((noinline))
45 #define mi_decl_thread          __thread
46 #define mi_decl_cache_align     __attribute__((aligned(MI_CACHE_LINE)))
47 #else
48 #define mi_decl_noinline
49 #define mi_decl_thread          __thread        // hope for the best :-)
50 #define mi_decl_cache_align
51 #endif
52 
53 // ------------------------------------------------------
54 // Variants
55 // ------------------------------------------------------
56 
57 // Define NDEBUG in the release version to disable assertions.
58 // #define NDEBUG
59 
60 // Define MI_TRACK_<tool> to enable tracking support
61 // #define MI_TRACK_VALGRIND 1
62 // #define MI_TRACK_ASAN     1
63 // #define MI_TRACK_ETW      1
64 
65 // Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance).
66 // #define MI_STAT 1
67 
68 // Define MI_SECURE to enable security mitigations
69 // #define MI_SECURE 1  // guard page around metadata
70 // #define MI_SECURE 2  // guard page around each mimalloc page
71 // #define MI_SECURE 3  // encode free lists (detect corrupted free list (buffer overflow), and invalid pointer free)
72 // #define MI_SECURE 4  // checks for double free. (may be more expensive)
73 
74 #if !defined(MI_SECURE)
75 #define MI_SECURE 0
76 #endif
77 
78 // Define MI_DEBUG for debug mode
79 // #define MI_DEBUG 1  // basic assertion checks and statistics, check double free, corrupted free list, and invalid pointer free.
80 // #define MI_DEBUG 2  // + internal assertion checks
81 // #define MI_DEBUG 3  // + extensive internal invariant checking (cmake -DMI_DEBUG_FULL=ON)
82 #if !defined(MI_DEBUG)
83 #if !defined(NDEBUG) || defined(_DEBUG)
84 #define MI_DEBUG 2
85 #else
86 #define MI_DEBUG 0
87 #endif
88 #endif
89 
90 // Reserve extra padding at the end of each block to be more resilient against heap block overflows.
91 // The padding can detect buffer overflow on free.
92 #if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW))
93 #define MI_PADDING  1
94 #endif
95 
96 // Check padding bytes; allows byte-precise buffer overflow detection
97 #if !defined(MI_PADDING_CHECK) && MI_PADDING && (MI_SECURE>=3 || MI_DEBUG>=1)
98 #define MI_PADDING_CHECK 1
99 #endif
100 
101 
102 // Encoded free lists allow detection of corrupted free lists
103 // and can detect buffer overflows, modify after free, and double `free`s.
104 #if (MI_SECURE>=3 || MI_DEBUG>=1)
105 #define MI_ENCODE_FREELIST  1
106 #endif
107 
108 
109 // We used to abandon huge pages but to eagerly deallocate if freed from another thread,
110 // but that makes it not possible to visit them during a heap walk or include them in a
111 // `mi_heap_destroy`. We therefore instead reset/decommit the huge blocks if freed from
112 // another thread so most memory is available until it gets properly freed by the owning thread.
113 // #define MI_HUGE_PAGE_ABANDON 1
114 
115 
116 // ------------------------------------------------------
117 // Platform specific values
118 // ------------------------------------------------------
119 
120 // ------------------------------------------------------
121 // Size of a pointer.
122 // We assume that `sizeof(void*)==sizeof(intptr_t)`
123 // and it holds for all platforms we know of.
124 //
125 // However, the C standard only requires that:
126 //  p == (void*)((intptr_t)p))
127 // but we also need:
128 //  i == (intptr_t)((void*)i)
129 // or otherwise one might define an intptr_t type that is larger than a pointer...
130 // ------------------------------------------------------
131 
132 #if INTPTR_MAX > INT64_MAX
133 # define MI_INTPTR_SHIFT (4)  // assume 128-bit  (as on arm CHERI for example)
134 #elif INTPTR_MAX == INT64_MAX
135 # define MI_INTPTR_SHIFT (3)
136 #elif INTPTR_MAX == INT32_MAX
137 # define MI_INTPTR_SHIFT (2)
138 #else
139 #error platform pointers must be 32, 64, or 128 bits
140 #endif
141 
142 #if SIZE_MAX == UINT64_MAX
143 # define MI_SIZE_SHIFT (3)
144 typedef int64_t  mi_ssize_t;
145 #elif SIZE_MAX == UINT32_MAX
146 # define MI_SIZE_SHIFT (2)
147 typedef int32_t  mi_ssize_t;
148 #else
149 #error platform objects must be 32 or 64 bits
150 #endif
151 
152 #if (SIZE_MAX/2) > LONG_MAX
153 # define MI_ZU(x)  x##ULL
154 # define MI_ZI(x)  x##LL
155 #else
156 # define MI_ZU(x)  x##UL
157 # define MI_ZI(x)  x##L
158 #endif
159 
160 #define MI_INTPTR_SIZE  (1<<MI_INTPTR_SHIFT)
161 #define MI_INTPTR_BITS  (MI_INTPTR_SIZE*8)
162 
163 #define MI_SIZE_SIZE  (1<<MI_SIZE_SHIFT)
164 #define MI_SIZE_BITS  (MI_SIZE_SIZE*8)
165 
166 #define MI_KiB     (MI_ZU(1024))
167 #define MI_MiB     (MI_KiB*MI_KiB)
168 #define MI_GiB     (MI_MiB*MI_KiB)
169 
170 
171 // ------------------------------------------------------
172 // Main internal data-structures
173 // ------------------------------------------------------
174 
175 // Main tuning parameters for segment and page sizes
176 // Sizes for 64-bit (usually divide by two for 32-bit)
177 #define MI_SEGMENT_SLICE_SHIFT            (13 + MI_INTPTR_SHIFT)         // 64KiB  (32KiB on 32-bit)
178 
179 #if MI_INTPTR_SIZE > 4
180 #define MI_SEGMENT_SHIFT                  ( 9 + MI_SEGMENT_SLICE_SHIFT)  // 32MiB
181 #else
182 #define MI_SEGMENT_SHIFT                  ( 7 + MI_SEGMENT_SLICE_SHIFT)  // 4MiB on 32-bit
183 #endif
184 
185 #define MI_SMALL_PAGE_SHIFT               (MI_SEGMENT_SLICE_SHIFT)       // 64KiB
186 #define MI_MEDIUM_PAGE_SHIFT              ( 3 + MI_SMALL_PAGE_SHIFT)     // 512KiB
187 
188 
189 // Derived constants
190 #define MI_SEGMENT_SIZE                   (MI_ZU(1)<<MI_SEGMENT_SHIFT)
191 #define MI_SEGMENT_ALIGN                  MI_SEGMENT_SIZE
192 #define MI_SEGMENT_MASK                   ((uintptr_t)(MI_SEGMENT_ALIGN - 1))
193 #define MI_SEGMENT_SLICE_SIZE             (MI_ZU(1)<< MI_SEGMENT_SLICE_SHIFT)
194 #define MI_SLICES_PER_SEGMENT             (MI_SEGMENT_SIZE / MI_SEGMENT_SLICE_SIZE) // 1024
195 
196 #define MI_SMALL_PAGE_SIZE                (MI_ZU(1)<<MI_SMALL_PAGE_SHIFT)
197 #define MI_MEDIUM_PAGE_SIZE               (MI_ZU(1)<<MI_MEDIUM_PAGE_SHIFT)
198 
199 #define MI_SMALL_OBJ_SIZE_MAX             (MI_SMALL_PAGE_SIZE/4)   // 8KiB on 64-bit
200 #define MI_MEDIUM_OBJ_SIZE_MAX            (MI_MEDIUM_PAGE_SIZE/4)  // 128KiB on 64-bit
201 #define MI_MEDIUM_OBJ_WSIZE_MAX           (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
202 #define MI_LARGE_OBJ_SIZE_MAX             (MI_SEGMENT_SIZE/2)      // 32MiB on 64-bit
203 #define MI_LARGE_OBJ_WSIZE_MAX            (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
204 
205 // Maximum number of size classes. (spaced exponentially in 12.5% increments)
206 #define MI_BIN_HUGE  (73U)
207 
208 #if (MI_MEDIUM_OBJ_WSIZE_MAX >= 655360)
209 #error "mimalloc internal: define more bins"
210 #endif
211 
212 // Maximum slice offset (15)
213 #define MI_MAX_SLICE_OFFSET               ((MI_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
214 
215 // Used as a special value to encode block sizes in 32 bits.
216 #define MI_HUGE_BLOCK_SIZE                ((uint32_t)(2*MI_GiB))
217 
218 // blocks up to this size are always allocated aligned
219 #define MI_MAX_ALIGN_GUARANTEE            (8*MI_MAX_ALIGN_SIZE)
220 
221 // Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments
222 #define MI_ALIGNMENT_MAX                  (MI_SEGMENT_SIZE >> 1)
223 
224 
225 // ------------------------------------------------------
226 // Mimalloc pages contain allocated blocks
227 // ------------------------------------------------------
228 
229 // The free lists use encoded next fields
230 // (Only actually encodes when MI_ENCODED_FREELIST is defined.)
231 typedef uintptr_t  mi_encoded_t;
232 
233 // thread id's
234 typedef size_t     mi_threadid_t;
235 
236 // free lists contain blocks
237 typedef struct mi_block_s {
238   mi_encoded_t next;
239 } mi_block_t;
240 
241 
242 // The delayed flags are used for efficient multi-threaded free-ing
243 typedef enum mi_delayed_e {
244   MI_USE_DELAYED_FREE   = 0, // push on the owning heap thread delayed list
245   MI_DELAYED_FREEING    = 1, // temporary: another thread is accessing the owning heap
246   MI_NO_DELAYED_FREE    = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list
247   MI_NEVER_DELAYED_FREE = 3  // sticky, only resets on page reclaim
248 } mi_delayed_t;
249 
250 
251 // The `in_full` and `has_aligned` page flags are put in a union to efficiently
252 // test if both are false (`full_aligned == 0`) in the `mi_free` routine.
253 #if !MI_TSAN
254 typedef union mi_page_flags_s {
255   uint8_t full_aligned;
256   struct {
257     uint8_t in_full : 1;
258     uint8_t has_aligned : 1;
259   } x;
260 } mi_page_flags_t;
261 #else
262 // under thread sanitizer, use a byte for each flag to suppress warning, issue #130
263 typedef union mi_page_flags_s {
264   uint16_t full_aligned;
265   struct {
266     uint8_t in_full;
267     uint8_t has_aligned;
268   } x;
269 } mi_page_flags_t;
270 #endif
271 
272 // Thread free list.
273 // We use the bottom 2 bits of the pointer for mi_delayed_t flags
274 typedef uintptr_t mi_thread_free_t;
275 
276 // A page contains blocks of one specific size (`block_size`).
277 // Each page has three list of free blocks:
278 // `free` for blocks that can be allocated,
279 // `local_free` for freed blocks that are not yet available to `mi_malloc`
280 // `thread_free` for freed blocks by other threads
281 // The `local_free` and `thread_free` lists are migrated to the `free` list
282 // when it is exhausted. The separate `local_free` list is necessary to
283 // implement a monotonic heartbeat. The `thread_free` list is needed for
284 // avoiding atomic operations in the common case.
285 //
286 //
287 // `used - |thread_free|` == actual blocks that are in use (alive)
288 // `used - |thread_free| + |free| + |local_free| == capacity`
289 //
290 // We don't count `freed` (as |free|) but use `used` to reduce
291 // the number of memory accesses in the `mi_page_all_free` function(s).
292 //
293 // Notes:
294 // - Access is optimized for `mi_free` and `mi_page_alloc` (in `alloc.c`)
295 // - Using `uint16_t` does not seem to slow things down
296 // - The size is 8 words on 64-bit which helps the page index calculations
297 //   (and 10 words on 32-bit, and encoded free lists add 2 words. Sizes 10
298 //    and 12 are still good for address calculation)
299 // - To limit the structure size, the `xblock_size` is 32-bits only; for
300 //   blocks > MI_HUGE_BLOCK_SIZE the size is determined from the segment page size
301 // - `thread_free` uses the bottom bits as a delayed-free flags to optimize
302 //   concurrent frees where only the first concurrent free adds to the owning
303 //   heap `thread_delayed_free` list (see `alloc.c:mi_free_block_mt`).
304 //   The invariant is that no-delayed-free is only set if there is
305 //   at least one block that will be added, or as already been added, to
306 //   the owning heap `thread_delayed_free` list. This guarantees that pages
307 //   will be freed correctly even if only other threads free blocks.
308 typedef struct mi_page_s {
309   // "owned" by the segment
310   uint32_t              slice_count;       // slices in this page (0 if not a page)
311   uint32_t              slice_offset;      // distance from the actual page data slice (0 if a page)
312   uint8_t               is_committed : 1;  // `true` if the page virtual memory is committed
313   uint8_t               is_zero_init : 1;  // `true` if the page was initially zero initialized
314   uint8_t               use_qsbr : 1;      // delay page freeing using qsbr
315   uint8_t               tag : 4;           // tag from the owning heap
316   uint8_t               debug_offset;      // number of bytes to preserve when filling freed or uninitialized memory
317 
318   // layout like this to optimize access in `mi_malloc` and `mi_free`
319   uint16_t              capacity;          // number of blocks committed, must be the first field, see `segment.c:page_clear`
320   uint16_t              reserved;          // number of blocks reserved in memory
321   mi_page_flags_t       flags;             // `in_full` and `has_aligned` flags (8 bits)
322   uint8_t               free_is_zero : 1;  // `true` if the blocks in the free list are zero initialized
323   uint8_t               retire_expire : 7; // expiration count for retired blocks
324 
325   mi_block_t*           free;              // list of available free blocks (`malloc` allocates from this list)
326   uint32_t              used;              // number of blocks in use (including blocks in `local_free` and `thread_free`)
327   uint32_t              xblock_size;       // size available in each block (always `>0`)
328   mi_block_t*           local_free;        // list of deferred free blocks by this thread (migrates to `free`)
329 
330   #if (MI_ENCODE_FREELIST || MI_PADDING)
331   uintptr_t             keys[2];           // two random keys to encode the free lists (see `_mi_block_next`) or padding canary
332   #endif
333 
334   _Atomic(mi_thread_free_t) xthread_free;  // list of deferred free blocks freed by other threads
335   _Atomic(uintptr_t)        xheap;
336 
337   struct mi_page_s*     next;              // next page owned by this thread with the same `block_size`
338   struct mi_page_s*     prev;              // previous page owned by this thread with the same `block_size`
339 
340 #ifdef Py_GIL_DISABLED
341   struct llist_node     qsbr_node;
342   uint64_t              qsbr_goal;
343 #endif
344 
345   // 64-bit 9 words, 32-bit 12 words, (+2 for secure)
346   #if MI_INTPTR_SIZE==8 && !defined(Py_GIL_DISABLED)
347   uintptr_t padding[1];
348   #endif
349 } mi_page_t;
350 
351 
352 
353 // ------------------------------------------------------
354 // Mimalloc segments contain mimalloc pages
355 // ------------------------------------------------------
356 
357 typedef enum mi_page_kind_e {
358   MI_PAGE_SMALL,    // small blocks go into 64KiB pages inside a segment
359   MI_PAGE_MEDIUM,   // medium blocks go into medium pages inside a segment
360   MI_PAGE_LARGE,    // larger blocks go into a page of just one block
361   MI_PAGE_HUGE,     // huge blocks (> 16 MiB) are put into a single page in a single segment.
362 } mi_page_kind_t;
363 
364 typedef enum mi_segment_kind_e {
365   MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside.
366   MI_SEGMENT_HUGE,   // > MI_LARGE_SIZE_MAX segment with just one huge page inside.
367 } mi_segment_kind_t;
368 
369 // ------------------------------------------------------
370 // A segment holds a commit mask where a bit is set if
371 // the corresponding MI_COMMIT_SIZE area is committed.
372 // The MI_COMMIT_SIZE must be a multiple of the slice
373 // size. If it is equal we have the most fine grained
374 // decommit (but setting it higher can be more efficient).
375 // The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will
376 // be committed in one go which can be set higher than
377 // MI_COMMIT_SIZE for efficiency (while the decommit mask
378 // is still tracked in fine-grained MI_COMMIT_SIZE chunks)
379 // ------------------------------------------------------
380 
381 #define MI_MINIMAL_COMMIT_SIZE      (1*MI_SEGMENT_SLICE_SIZE)
382 #define MI_COMMIT_SIZE              (MI_SEGMENT_SLICE_SIZE)              // 64KiB
383 #define MI_COMMIT_MASK_BITS         (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
384 #define MI_COMMIT_MASK_FIELD_BITS    MI_SIZE_BITS
385 #define MI_COMMIT_MASK_FIELD_COUNT  (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS)
386 
387 #if (MI_COMMIT_MASK_BITS != (MI_COMMIT_MASK_FIELD_COUNT * MI_COMMIT_MASK_FIELD_BITS))
388 #error "the segment size must be exactly divisible by the (commit size * size_t bits)"
389 #endif
390 
391 typedef struct mi_commit_mask_s {
392   size_t mask[MI_COMMIT_MASK_FIELD_COUNT];
393 } mi_commit_mask_t;
394 
395 typedef mi_page_t  mi_slice_t;
396 typedef int64_t    mi_msecs_t;
397 
398 
399 // Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this.
400 typedef enum mi_memkind_e {
401   MI_MEM_NONE,      // not allocated
402   MI_MEM_EXTERNAL,  // not owned by mimalloc but provided externally (via `mi_manage_os_memory` for example)
403   MI_MEM_STATIC,    // allocated in a static area and should not be freed (for arena meta data for example)
404   MI_MEM_OS,        // allocated from the OS
405   MI_MEM_OS_HUGE,   // allocated as huge os pages
406   MI_MEM_OS_REMAP,  // allocated in a remapable area (i.e. using `mremap`)
407   MI_MEM_ARENA      // allocated from an arena (the usual case)
408 } mi_memkind_t;
409 
mi_memkind_is_os(mi_memkind_t memkind)410 static inline bool mi_memkind_is_os(mi_memkind_t memkind) {
411   return (memkind >= MI_MEM_OS && memkind <= MI_MEM_OS_REMAP);
412 }
413 
414 typedef struct mi_memid_os_info {
415   void*         base;               // actual base address of the block (used for offset aligned allocations)
416   size_t        alignment;          // alignment at allocation
417 } mi_memid_os_info_t;
418 
419 typedef struct mi_memid_arena_info {
420   size_t        block_index;        // index in the arena
421   mi_arena_id_t id;                 // arena id (>= 1)
422   bool          is_exclusive;       // the arena can only be used for specific arena allocations
423 } mi_memid_arena_info_t;
424 
425 typedef struct mi_memid_s {
426   union {
427     mi_memid_os_info_t    os;       // only used for MI_MEM_OS
428     mi_memid_arena_info_t arena;    // only used for MI_MEM_ARENA
429   } mem;
430   bool          is_pinned;          // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large OS pages)
431   bool          initially_committed;// `true` if the memory was originally allocated as committed
432   bool          initially_zero;     // `true` if the memory was originally zero initialized
433   mi_memkind_t  memkind;
434 } mi_memid_t;
435 
436 
437 // Segments are large allocated memory blocks (8mb on 64 bit) from
438 // the OS. Inside segments we allocated fixed size _pages_ that
439 // contain blocks.
440 typedef struct mi_segment_s {
441   // constant fields
442   mi_memid_t        memid;              // memory id for arena allocation
443   bool              allow_decommit;
444   bool              allow_purge;
445   size_t            segment_size;
446 
447   // segment fields
448   mi_msecs_t        purge_expire;
449   mi_commit_mask_t  purge_mask;
450   mi_commit_mask_t  commit_mask;
451 
452   _Atomic(struct mi_segment_s*) abandoned_next;
453 
454   // from here is zero initialized
455   struct mi_segment_s* next;            // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
456 
457   size_t            abandoned;          // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
458   size_t            abandoned_visits;   // count how often this segment is visited in the abandoned list (to force reclaim it it is too long)
459   size_t            used;               // count of pages in use
460   uintptr_t         cookie;             // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
461 
462   size_t            segment_slices;      // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
463   size_t            segment_info_slices; // initial slices we are using segment info and possible guard pages.
464 
465   // layout like this to optimize access in `mi_free`
466   mi_segment_kind_t kind;
467   size_t            slice_entries;       // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT`
468   _Atomic(mi_threadid_t) thread_id;      // unique id of the thread owning this segment
469 
470   mi_slice_t        slices[MI_SLICES_PER_SEGMENT+1];  // one more for huge blocks with large alignment
471 } mi_segment_t;
472 
473 typedef uintptr_t        mi_tagged_segment_t;
474 
475 // Segments unowned by any thread are put in a shared pool
476 typedef struct mi_abandoned_pool_s {
477   // This is a list of visited abandoned pages that were full at the time.
478   // this list migrates to `abandoned` when that becomes NULL. The use of
479   // this list reduces contention and the rate at which segments are visited.
480   mi_decl_cache_align _Atomic(mi_segment_t*)       abandoned_visited; // = NULL
481 
482   // The abandoned page list (tagged as it supports pop)
483   mi_decl_cache_align _Atomic(mi_tagged_segment_t) abandoned;         // = NULL
484 
485   // Maintain these for debug purposes (these counts may be a bit off)
486   mi_decl_cache_align _Atomic(size_t)           abandoned_count;
487   mi_decl_cache_align _Atomic(size_t)           abandoned_visited_count;
488 
489   // We also maintain a count of current readers of the abandoned list
490   // in order to prevent resetting/decommitting segment memory if it might
491   // still be read.
492   mi_decl_cache_align _Atomic(size_t)           abandoned_readers; // = 0
493 } mi_abandoned_pool_t;
494 
495 
496 // ------------------------------------------------------
497 // Heaps
498 // Provide first-class heaps to allocate from.
499 // A heap just owns a set of pages for allocation and
500 // can only be allocate/reallocate from the thread that created it.
501 // Freeing blocks can be done from any thread though.
502 // Per thread, the segments are shared among its heaps.
503 // Per thread, there is always a default heap that is
504 // used for allocation; it is initialized to statically
505 // point to an empty heap to avoid initialization checks
506 // in the fast path.
507 // ------------------------------------------------------
508 
509 // Thread local data
510 typedef struct mi_tld_s mi_tld_t;
511 
512 // Pages of a certain block size are held in a queue.
513 typedef struct mi_page_queue_s {
514   mi_page_t* first;
515   mi_page_t* last;
516   size_t     block_size;
517 } mi_page_queue_t;
518 
519 #define MI_BIN_FULL  (MI_BIN_HUGE+1)
520 
521 // Random context
522 typedef struct mi_random_cxt_s {
523   uint32_t input[16];
524   uint32_t output[16];
525   int      output_available;
526   bool     weak;
527 } mi_random_ctx_t;
528 
529 
530 // In debug mode there is a padding structure at the end of the blocks to check for buffer overflows
531 #if (MI_PADDING)
532 typedef struct mi_padding_s {
533   uint32_t canary; // encoded block value to check validity of the padding (in case of overflow)
534   uint32_t delta;  // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes)
535 } mi_padding_t;
536 #define MI_PADDING_SIZE   (sizeof(mi_padding_t))
537 #define MI_PADDING_WSIZE  ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE)
538 #else
539 #define MI_PADDING_SIZE   0
540 #define MI_PADDING_WSIZE  0
541 #endif
542 
543 #define MI_PAGES_DIRECT   (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1)
544 
545 
546 // A heap owns a set of pages.
547 struct mi_heap_s {
548   mi_tld_t*             tld;
549   mi_page_t*            pages_free_direct[MI_PAGES_DIRECT];  // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
550   mi_page_queue_t       pages[MI_BIN_FULL + 1];              // queue of pages for each size class (or "bin")
551   _Atomic(mi_block_t*)  thread_delayed_free;
552   mi_threadid_t         thread_id;                           // thread this heap belongs too
553   mi_arena_id_t         arena_id;                            // arena id if the heap belongs to a specific arena (or 0)
554   uintptr_t             cookie;                              // random cookie to verify pointers (see `_mi_ptr_cookie`)
555   uintptr_t             keys[2];                             // two random keys used to encode the `thread_delayed_free` list
556   mi_random_ctx_t       random;                              // random number context used for secure allocation
557   size_t                page_count;                          // total number of pages in the `pages` queues.
558   size_t                page_retired_min;                    // smallest retired index (retired pages are fully free, but still in the page queues)
559   size_t                page_retired_max;                    // largest retired index into the `pages` array.
560   mi_heap_t*            next;                                // list of heaps per thread
561   bool                  no_reclaim;                          // `true` if this heap should not reclaim abandoned pages
562   uint8_t               tag;                                 // custom identifier for this heap
563   uint8_t               debug_offset;                        // number of bytes to preserve when filling freed or uninitialized memory
564   bool                  page_use_qsbr;                       // should freeing pages be delayed using QSBR
565 };
566 
567 
568 
569 // ------------------------------------------------------
570 // Debug
571 // ------------------------------------------------------
572 
573 #if !defined(MI_DEBUG_UNINIT)
574 #define MI_DEBUG_UNINIT     (0xD0)
575 #endif
576 #if !defined(MI_DEBUG_FREED)
577 #define MI_DEBUG_FREED      (0xDF)
578 #endif
579 #if !defined(MI_DEBUG_PADDING)
580 #define MI_DEBUG_PADDING    (0xDE)
581 #endif
582 
583 #if (MI_DEBUG)
584 // use our own assertion to print without memory allocation
585 void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line, const char* func );
586 #define mi_assert(expr)     ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__))
587 #else
588 #define mi_assert(x)
589 #endif
590 
591 #if (MI_DEBUG>1)
592 #define mi_assert_internal    mi_assert
593 #else
594 #define mi_assert_internal(x)
595 #endif
596 
597 #if (MI_DEBUG>2)
598 #define mi_assert_expensive   mi_assert
599 #else
600 #define mi_assert_expensive(x)
601 #endif
602 
603 // ------------------------------------------------------
604 // Statistics
605 // ------------------------------------------------------
606 
607 #ifndef MI_STAT
608 #if (MI_DEBUG>0)
609 #define MI_STAT 2
610 #else
611 #define MI_STAT 0
612 #endif
613 #endif
614 
615 typedef struct mi_stat_count_s {
616   int64_t allocated;
617   int64_t freed;
618   int64_t peak;
619   int64_t current;
620 } mi_stat_count_t;
621 
622 typedef struct mi_stat_counter_s {
623   int64_t total;
624   int64_t count;
625 } mi_stat_counter_t;
626 
627 typedef struct mi_stats_s {
628   mi_stat_count_t segments;
629   mi_stat_count_t pages;
630   mi_stat_count_t reserved;
631   mi_stat_count_t committed;
632   mi_stat_count_t reset;
633   mi_stat_count_t purged;
634   mi_stat_count_t page_committed;
635   mi_stat_count_t segments_abandoned;
636   mi_stat_count_t pages_abandoned;
637   mi_stat_count_t threads;
638   mi_stat_count_t normal;
639   mi_stat_count_t huge;
640   mi_stat_count_t large;
641   mi_stat_count_t malloc;
642   mi_stat_count_t segments_cache;
643   mi_stat_counter_t pages_extended;
644   mi_stat_counter_t mmap_calls;
645   mi_stat_counter_t commit_calls;
646   mi_stat_counter_t reset_calls;
647   mi_stat_counter_t purge_calls;
648   mi_stat_counter_t page_no_retire;
649   mi_stat_counter_t searches;
650   mi_stat_counter_t normal_count;
651   mi_stat_counter_t huge_count;
652   mi_stat_counter_t large_count;
653 #if MI_STAT>1
654   mi_stat_count_t normal_bins[MI_BIN_HUGE+1];
655 #endif
656 } mi_stats_t;
657 
658 
659 void _mi_stat_increase(mi_stat_count_t* stat, size_t amount);
660 void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount);
661 void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
662 
663 #if (MI_STAT)
664 #define mi_stat_increase(stat,amount)         _mi_stat_increase( &(stat), amount)
665 #define mi_stat_decrease(stat,amount)         _mi_stat_decrease( &(stat), amount)
666 #define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount)
667 #else
668 #define mi_stat_increase(stat,amount)         (void)0
669 #define mi_stat_decrease(stat,amount)         (void)0
670 #define mi_stat_counter_increase(stat,amount) (void)0
671 #endif
672 
673 #define mi_heap_stat_counter_increase(heap,stat,amount)  mi_stat_counter_increase( (heap)->tld->stats.stat, amount)
674 #define mi_heap_stat_increase(heap,stat,amount)  mi_stat_increase( (heap)->tld->stats.stat, amount)
675 #define mi_heap_stat_decrease(heap,stat,amount)  mi_stat_decrease( (heap)->tld->stats.stat, amount)
676 
677 // ------------------------------------------------------
678 // Thread Local data
679 // ------------------------------------------------------
680 
681 // A "span" is is an available range of slices. The span queues keep
682 // track of slice spans of at most the given `slice_count` (but more than the previous size class).
683 typedef struct mi_span_queue_s {
684   mi_slice_t* first;
685   mi_slice_t* last;
686   size_t      slice_count;
687 } mi_span_queue_t;
688 
689 #define MI_SEGMENT_BIN_MAX (35)     // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT)
690 
691 // OS thread local data
692 typedef struct mi_os_tld_s {
693   size_t                region_idx;   // start point for next allocation
694   mi_stats_t*           stats;        // points to tld stats
695 } mi_os_tld_t;
696 
697 
698 // Segments thread local data
699 typedef struct mi_segments_tld_s {
700   mi_span_queue_t     spans[MI_SEGMENT_BIN_MAX+1];  // free slice spans inside segments
701   size_t              count;        // current number of segments;
702   size_t              peak_count;   // peak number of segments
703   size_t              current_size; // current size of all segments
704   size_t              peak_size;    // peak size of all segments
705   mi_stats_t*         stats;        // points to tld stats
706   mi_os_tld_t*        os;           // points to os stats
707   mi_abandoned_pool_t* abandoned;   // pool of abandoned segments
708 } mi_segments_tld_t;
709 
710 // Thread local data
711 struct mi_tld_s {
712   unsigned long long  heartbeat;     // monotonic heartbeat count
713   bool                recurse;       // true if deferred was called; used to prevent infinite recursion.
714   mi_heap_t*          heap_backing;  // backing heap of this thread (cannot be deleted)
715   mi_heap_t*          heaps;         // list of heaps in this thread (so we can abandon all when the thread terminates)
716   mi_segments_tld_t   segments;      // segment tld
717   mi_os_tld_t         os;            // os tld
718   mi_stats_t          stats;         // statistics
719 };
720 
721 #endif
722