• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <assert.h>
25 #include <stdarg.h>
26 #include <stdint.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 
31 #include "util/list.h"
32 #include "util/macros.h"
33 #include "util/u_math.h"
34 #include "util/u_printf.h"
35 
36 #include "ralloc.h"
37 
38 #define CANARY 0x5A1106
39 
40 #if defined(__LP64__) || defined(_WIN64)
41 #define HEADER_ALIGN 16
42 #else
43 #define HEADER_ALIGN 8
44 #endif
45 
46 /* Align the header's size so that ralloc() allocations will return with the
47  * same alignment as a libc malloc would have (8 on 32-bit GLIBC, 16 on
48  * 64-bit), avoiding performance penalities on x86 and alignment faults on
49  * ARM.
50  */
51 struct ralloc_header
52 {
53    alignas(HEADER_ALIGN)
54 
55 #ifndef NDEBUG
56    /* A canary value used to determine whether a pointer is ralloc'd. */
57    unsigned canary;
58    unsigned size;
59 #endif
60 
61    struct ralloc_header *parent;
62 
63    /* The first child (head of a linked list) */
64    struct ralloc_header *child;
65 
66    /* Linked list of siblings */
67    struct ralloc_header *prev;
68    struct ralloc_header *next;
69 
70    void (*destructor)(void *);
71 };
72 
73 typedef struct ralloc_header ralloc_header;
74 
75 static void unlink_block(ralloc_header *info);
76 static void unsafe_free(ralloc_header *info);
77 
78 static ralloc_header *
get_header(const void * ptr)79 get_header(const void *ptr)
80 {
81    ralloc_header *info = (ralloc_header *) (((char *) ptr) -
82 					    sizeof(ralloc_header));
83    assert(info->canary == CANARY);
84    return info;
85 }
86 
87 #define PTR_FROM_HEADER(info) (((char *) info) + sizeof(ralloc_header))
88 
89 static void
add_child(ralloc_header * parent,ralloc_header * info)90 add_child(ralloc_header *parent, ralloc_header *info)
91 {
92    if (parent != NULL) {
93       info->parent = parent;
94       info->next = parent->child;
95       parent->child = info;
96 
97       if (info->next != NULL)
98 	 info->next->prev = info;
99    }
100 }
101 
102 void *
ralloc_context(const void * ctx)103 ralloc_context(const void *ctx)
104 {
105    return ralloc_size(ctx, 0);
106 }
107 
108 void *
ralloc_size(const void * ctx,size_t size)109 ralloc_size(const void *ctx, size_t size)
110 {
111    /* Some malloc allocation doesn't always align to 16 bytes even on 64 bits
112     * system, from Android bionic/tests/malloc_test.cpp:
113     *  - Allocations of a size that rounds up to a multiple of 16 bytes
114     *    must have at least 16 byte alignment.
115     *  - Allocations of a size that rounds up to a multiple of 8 bytes and
116     *    not 16 bytes, are only required to have at least 8 byte alignment.
117     */
118    void *block = malloc(align64(size + sizeof(ralloc_header),
119                                 alignof(ralloc_header)));
120    ralloc_header *info;
121    ralloc_header *parent;
122 
123    if (unlikely(block == NULL))
124       return NULL;
125 
126    info = (ralloc_header *) block;
127    /* measurements have shown that calloc is slower (because of
128     * the multiplication overflow checking?), so clear things
129     * manually
130     */
131    info->parent = NULL;
132    info->child = NULL;
133    info->prev = NULL;
134    info->next = NULL;
135    info->destructor = NULL;
136 
137    parent = ctx != NULL ? get_header(ctx) : NULL;
138 
139    add_child(parent, info);
140 
141 #ifndef NDEBUG
142    info->canary = CANARY;
143    info->size = size;
144 #endif
145 
146    return PTR_FROM_HEADER(info);
147 }
148 
149 void *
rzalloc_size(const void * ctx,size_t size)150 rzalloc_size(const void *ctx, size_t size)
151 {
152    void *ptr = ralloc_size(ctx, size);
153 
154    if (likely(ptr))
155       memset(ptr, 0, size);
156 
157    return ptr;
158 }
159 
160 /* helper function - assumes ptr != NULL */
161 static void *
resize(void * ptr,size_t size)162 resize(void *ptr, size_t size)
163 {
164    ralloc_header *child, *old, *info;
165 
166    old = get_header(ptr);
167    info = realloc(old, align64(size + sizeof(ralloc_header),
168                                alignof(ralloc_header)));
169 
170    if (info == NULL)
171       return NULL;
172 
173    /* Update parent and sibling's links to the reallocated node. */
174    if (info != old && info->parent != NULL) {
175       if (info->parent->child == old)
176 	 info->parent->child = info;
177 
178       if (info->prev != NULL)
179 	 info->prev->next = info;
180 
181       if (info->next != NULL)
182 	 info->next->prev = info;
183    }
184 
185    /* Update child->parent links for all children */
186    for (child = info->child; child != NULL; child = child->next)
187       child->parent = info;
188 
189    return PTR_FROM_HEADER(info);
190 }
191 
192 void *
reralloc_size(const void * ctx,void * ptr,size_t size)193 reralloc_size(const void *ctx, void *ptr, size_t size)
194 {
195    if (unlikely(ptr == NULL))
196       return ralloc_size(ctx, size);
197 
198    assert(ralloc_parent(ptr) == ctx);
199    return resize(ptr, size);
200 }
201 
202 void *
rerzalloc_size(const void * ctx,void * ptr,size_t old_size,size_t new_size)203 rerzalloc_size(const void *ctx, void *ptr, size_t old_size, size_t new_size)
204 {
205    if (unlikely(ptr == NULL))
206       return rzalloc_size(ctx, new_size);
207 
208    assert(ralloc_parent(ptr) == ctx);
209    ptr = resize(ptr, new_size);
210 
211    if (new_size > old_size)
212       memset((char *)ptr + old_size, 0, new_size - old_size);
213 
214    return ptr;
215 }
216 
217 void *
ralloc_array_size(const void * ctx,size_t size,unsigned count)218 ralloc_array_size(const void *ctx, size_t size, unsigned count)
219 {
220    if (count > SIZE_MAX/size)
221       return NULL;
222 
223    return ralloc_size(ctx, size * count);
224 }
225 
226 void *
rzalloc_array_size(const void * ctx,size_t size,unsigned count)227 rzalloc_array_size(const void *ctx, size_t size, unsigned count)
228 {
229    if (count > SIZE_MAX/size)
230       return NULL;
231 
232    return rzalloc_size(ctx, size * count);
233 }
234 
235 void *
reralloc_array_size(const void * ctx,void * ptr,size_t size,unsigned count)236 reralloc_array_size(const void *ctx, void *ptr, size_t size, unsigned count)
237 {
238    if (count > SIZE_MAX/size)
239       return NULL;
240 
241    return reralloc_size(ctx, ptr, size * count);
242 }
243 
244 void *
rerzalloc_array_size(const void * ctx,void * ptr,size_t size,unsigned old_count,unsigned new_count)245 rerzalloc_array_size(const void *ctx, void *ptr, size_t size,
246                      unsigned old_count, unsigned new_count)
247 {
248    if (new_count > SIZE_MAX/size)
249       return NULL;
250 
251    return rerzalloc_size(ctx, ptr, size * old_count, size * new_count);
252 }
253 
254 void
ralloc_free(void * ptr)255 ralloc_free(void *ptr)
256 {
257    ralloc_header *info;
258 
259    if (ptr == NULL)
260       return;
261 
262    info = get_header(ptr);
263    unlink_block(info);
264    unsafe_free(info);
265 }
266 
267 #ifndef NDEBUG
268 static size_t
ralloc_total_size_internal(const ralloc_header * info)269 ralloc_total_size_internal(const ralloc_header *info)
270 {
271    /* Count the block itself. This requires NDEBUG for the statistic. */
272    unsigned sum = align64(info->size + sizeof(ralloc_header),
273                           alignof(ralloc_header));
274 
275    /* Recursively count children */
276    ralloc_header *it = info->child;
277    while (it != NULL) {
278       sum += ralloc_total_size_internal(it);
279       it = it->next;
280    }
281 
282    return sum;
283 }
284 
285 size_t
ralloc_total_size(const void * ptr)286 ralloc_total_size(const void *ptr)
287 {
288    return ralloc_total_size_internal(get_header(ptr));
289 }
290 #endif
291 
292 static void
unlink_block(ralloc_header * info)293 unlink_block(ralloc_header *info)
294 {
295    /* Unlink from parent & siblings */
296    if (info->parent != NULL) {
297       if (info->parent->child == info)
298 	 info->parent->child = info->next;
299 
300       if (info->prev != NULL)
301 	 info->prev->next = info->next;
302 
303       if (info->next != NULL)
304 	 info->next->prev = info->prev;
305    }
306    info->parent = NULL;
307    info->prev = NULL;
308    info->next = NULL;
309 }
310 
311 static void
unsafe_free(ralloc_header * info)312 unsafe_free(ralloc_header *info)
313 {
314    /* Recursively free any children...don't waste time unlinking them. */
315    ralloc_header *temp;
316    while (info->child != NULL) {
317       temp = info->child;
318       info->child = temp->next;
319       unsafe_free(temp);
320    }
321 
322    /* Free the block itself.  Call the destructor first, if any. */
323    if (info->destructor != NULL)
324       info->destructor(PTR_FROM_HEADER(info));
325 
326    free(info);
327 }
328 
329 void
ralloc_steal(const void * new_ctx,void * ptr)330 ralloc_steal(const void *new_ctx, void *ptr)
331 {
332    ralloc_header *info, *parent;
333 
334    if (unlikely(ptr == NULL))
335       return;
336 
337    info = get_header(ptr);
338    parent = new_ctx ? get_header(new_ctx) : NULL;
339 
340    unlink_block(info);
341 
342    add_child(parent, info);
343 }
344 
345 void
ralloc_adopt(const void * new_ctx,void * old_ctx)346 ralloc_adopt(const void *new_ctx, void *old_ctx)
347 {
348    ralloc_header *new_info, *old_info, *child;
349 
350    if (unlikely(old_ctx == NULL))
351       return;
352 
353    old_info = get_header(old_ctx);
354    new_info = get_header(new_ctx);
355 
356    /* If there are no children, bail. */
357    if (unlikely(old_info->child == NULL))
358       return;
359 
360    /* Set all the children's parent to new_ctx; get a pointer to the last child. */
361    for (child = old_info->child; child->next != NULL; child = child->next) {
362       child->parent = new_info;
363    }
364    child->parent = new_info;
365 
366    /* Connect the two lists together; parent them to new_ctx; make old_ctx empty. */
367    child->next = new_info->child;
368    if (child->next)
369       child->next->prev = child;
370    new_info->child = old_info->child;
371    old_info->child = NULL;
372 }
373 
374 void *
ralloc_parent(const void * ptr)375 ralloc_parent(const void *ptr)
376 {
377    ralloc_header *info;
378 
379    if (unlikely(ptr == NULL))
380       return NULL;
381 
382    info = get_header(ptr);
383    return info->parent ? PTR_FROM_HEADER(info->parent) : NULL;
384 }
385 
386 void
ralloc_set_destructor(const void * ptr,void (* destructor)(void *))387 ralloc_set_destructor(const void *ptr, void(*destructor)(void *))
388 {
389    ralloc_header *info = get_header(ptr);
390    info->destructor = destructor;
391 }
392 
393 void *
ralloc_memdup(const void * ctx,const void * mem,size_t n)394 ralloc_memdup(const void *ctx, const void *mem, size_t n)
395 {
396    void *ptr = ralloc_size(ctx, n);
397 
398    if (unlikely(ptr == NULL))
399       return NULL;
400 
401    memcpy(ptr, mem, n);
402    return ptr;
403 }
404 
405 char *
ralloc_strdup(const void * ctx,const char * str)406 ralloc_strdup(const void *ctx, const char *str)
407 {
408    size_t n;
409    char *ptr;
410 
411    if (unlikely(str == NULL))
412       return NULL;
413 
414    n = strlen(str);
415    ptr = ralloc_array(ctx, char, n + 1);
416    memcpy(ptr, str, n);
417    ptr[n] = '\0';
418    return ptr;
419 }
420 
421 char *
ralloc_strndup(const void * ctx,const char * str,size_t max)422 ralloc_strndup(const void *ctx, const char *str, size_t max)
423 {
424    size_t n;
425    char *ptr;
426 
427    if (unlikely(str == NULL))
428       return NULL;
429 
430    n = strnlen(str, max);
431    ptr = ralloc_array(ctx, char, n + 1);
432    memcpy(ptr, str, n);
433    ptr[n] = '\0';
434    return ptr;
435 }
436 
437 /* helper routine for strcat/strncat - n is the exact amount to copy */
438 static bool
cat(char ** dest,const char * str,size_t n)439 cat(char **dest, const char *str, size_t n)
440 {
441    char *both;
442    size_t existing_length;
443    assert(dest != NULL && *dest != NULL);
444 
445    existing_length = strlen(*dest);
446    both = resize(*dest, existing_length + n + 1);
447    if (unlikely(both == NULL))
448       return false;
449 
450    memcpy(both + existing_length, str, n);
451    both[existing_length + n] = '\0';
452 
453    *dest = both;
454    return true;
455 }
456 
457 
458 bool
ralloc_strcat(char ** dest,const char * str)459 ralloc_strcat(char **dest, const char *str)
460 {
461    return cat(dest, str, strlen(str));
462 }
463 
464 bool
ralloc_strncat(char ** dest,const char * str,size_t n)465 ralloc_strncat(char **dest, const char *str, size_t n)
466 {
467    return cat(dest, str, strnlen(str, n));
468 }
469 
470 bool
ralloc_str_append(char ** dest,const char * str,size_t existing_length,size_t str_size)471 ralloc_str_append(char **dest, const char *str,
472                   size_t existing_length, size_t str_size)
473 {
474    char *both;
475    assert(dest != NULL && *dest != NULL);
476 
477    both = resize(*dest, existing_length + str_size + 1);
478    if (unlikely(both == NULL))
479       return false;
480 
481    memcpy(both + existing_length, str, str_size);
482    both[existing_length + str_size] = '\0';
483 
484    *dest = both;
485 
486    return true;
487 }
488 
489 char *
ralloc_asprintf(const void * ctx,const char * fmt,...)490 ralloc_asprintf(const void *ctx, const char *fmt, ...)
491 {
492    char *ptr;
493    va_list args;
494    va_start(args, fmt);
495    ptr = ralloc_vasprintf(ctx, fmt, args);
496    va_end(args);
497    return ptr;
498 }
499 
500 char *
ralloc_vasprintf(const void * ctx,const char * fmt,va_list args)501 ralloc_vasprintf(const void *ctx, const char *fmt, va_list args)
502 {
503    size_t size = u_printf_length(fmt, args) + 1;
504 
505    char *ptr = ralloc_size(ctx, size);
506    if (ptr != NULL)
507       vsnprintf(ptr, size, fmt, args);
508 
509    return ptr;
510 }
511 
512 bool
ralloc_asprintf_append(char ** str,const char * fmt,...)513 ralloc_asprintf_append(char **str, const char *fmt, ...)
514 {
515    bool success;
516    va_list args;
517    va_start(args, fmt);
518    success = ralloc_vasprintf_append(str, fmt, args);
519    va_end(args);
520    return success;
521 }
522 
523 bool
ralloc_vasprintf_append(char ** str,const char * fmt,va_list args)524 ralloc_vasprintf_append(char **str, const char *fmt, va_list args)
525 {
526    size_t existing_length;
527    assert(str != NULL);
528    existing_length = *str ? strlen(*str) : 0;
529    return ralloc_vasprintf_rewrite_tail(str, &existing_length, fmt, args);
530 }
531 
532 bool
ralloc_asprintf_rewrite_tail(char ** str,size_t * start,const char * fmt,...)533 ralloc_asprintf_rewrite_tail(char **str, size_t *start, const char *fmt, ...)
534 {
535    bool success;
536    va_list args;
537    va_start(args, fmt);
538    success = ralloc_vasprintf_rewrite_tail(str, start, fmt, args);
539    va_end(args);
540    return success;
541 }
542 
543 bool
ralloc_vasprintf_rewrite_tail(char ** str,size_t * start,const char * fmt,va_list args)544 ralloc_vasprintf_rewrite_tail(char **str, size_t *start, const char *fmt,
545 			      va_list args)
546 {
547    size_t new_length;
548    char *ptr;
549 
550    assert(str != NULL);
551 
552    if (unlikely(*str == NULL)) {
553       // Assuming a NULL context is probably bad, but it's expected behavior.
554       *str = ralloc_vasprintf(NULL, fmt, args);
555       *start = strlen(*str);
556       return true;
557    }
558 
559    new_length = u_printf_length(fmt, args);
560 
561    ptr = resize(*str, *start + new_length + 1);
562    if (unlikely(ptr == NULL))
563       return false;
564 
565    vsnprintf(ptr + *start, new_length + 1, fmt, args);
566    *str = ptr;
567    *start += new_length;
568    return true;
569 }
570 
571 /***************************************************************************
572  * GC context.
573  ***************************************************************************
574  */
575 
576 /* The maximum size of an object that will be allocated specially.
577  */
578 #define MAX_FREELIST_SIZE 512
579 
580 /* Allocations small enough to be allocated from a freelist will be aligned up
581  * to this size.
582  */
583 #define FREELIST_ALIGNMENT 32
584 
585 #define NUM_FREELIST_BUCKETS (MAX_FREELIST_SIZE / FREELIST_ALIGNMENT)
586 
587 /* The size of a slab. */
588 #define SLAB_SIZE (32 * 1024)
589 
590 #define GC_CONTEXT_CANARY 0xAF6B6C83
591 #define GC_CANARY 0xAF6B5B72
592 
593 enum gc_flags {
594    IS_USED = (1 << 0),
595    CURRENT_GENERATION = (1 << 1),
596    IS_PADDING = (1 << 7),
597 };
598 
599 typedef struct
600 {
601 #ifndef NDEBUG
602    /* A canary value used to determine whether a pointer is allocated using gc_alloc. */
603    unsigned canary;
604 #endif
605 
606    uint16_t slab_offset;
607    uint8_t bucket;
608    uint8_t flags;
609 
610    /* The last padding byte must have IS_PADDING set and is used to store the amount of padding. If
611     * there is no padding, the IS_PADDING bit of "flags" is unset and "flags" is checked instead.
612     * Because of this, "flags" must be the last member of this struct.
613     */
614    uint8_t padding[];
615 } gc_block_header;
616 
617 /* This structure is at the start of the slab. Objects inside a slab are
618  * allocated using a freelist backed by a simple linear allocator.
619  */
620 typedef struct gc_slab {
621    alignas(HEADER_ALIGN)
622 
623    gc_ctx *ctx;
624 
625    /* Objects are allocated using either linear or freelist allocation. "next_available" is the
626     * pointer used for linear allocation, while "freelist" is the next free object for freelist
627     * allocation.
628     */
629    char *next_available;
630    gc_block_header *freelist;
631 
632    /* Slabs that handle the same-sized objects. */
633    struct list_head link;
634 
635    /* Free slabs that handle the same-sized objects. */
636    struct list_head free_link;
637 
638    /* Number of allocated and free objects, recorded so that we can free the slab if it
639     * becomes empty or add one to the freelist if it's no longer full.
640     */
641    unsigned num_allocated;
642    unsigned num_free;
643 } gc_slab;
644 
645 struct gc_ctx {
646 #ifndef NDEBUG
647    unsigned canary;
648 #endif
649 
650    /* Array of slabs for fixed-size allocations. Each slab tracks allocations
651     * of specific sized blocks. User allocations are rounded up to the nearest
652     * fixed size. slabs[N] contains allocations of size
653     * FREELIST_ALIGNMENT * (N + 1).
654     */
655    struct {
656       /* List of slabs in this bucket. */
657       struct list_head slabs;
658 
659       /* List of slabs with free space in this bucket, so we can quickly choose one when
660        * allocating.
661        */
662       struct list_head free_slabs;
663    } slabs[NUM_FREELIST_BUCKETS];
664 
665    uint8_t current_gen;
666    void *rubbish;
667 };
668 
669 static gc_block_header *
get_gc_header(const void * ptr)670 get_gc_header(const void *ptr)
671 {
672    uint8_t *c_ptr = (uint8_t *)ptr;
673 
674    /* Adjust for padding added to ensure alignment of the allocation. There might also be padding
675     * added by the compiler into gc_block_header, but that isn't counted in the IS_PADDING byte.
676     */
677    if (c_ptr[-1] & IS_PADDING)
678       c_ptr -= c_ptr[-1] & ~IS_PADDING;
679 
680    c_ptr -= sizeof(gc_block_header);
681 
682    gc_block_header *info = (gc_block_header *)c_ptr;
683    assert(info->canary == GC_CANARY);
684    return info;
685 }
686 
687 static gc_block_header *
get_gc_freelist_next(gc_block_header * ptr)688 get_gc_freelist_next(gc_block_header *ptr)
689 {
690    gc_block_header *next;
691    /* work around possible strict aliasing bug using memcpy */
692    memcpy(&next, (void*)(ptr + 1), sizeof(next));
693    return next;
694 }
695 
696 static void
set_gc_freelist_next(gc_block_header * ptr,gc_block_header * next)697 set_gc_freelist_next(gc_block_header *ptr, gc_block_header *next)
698 {
699    memcpy((void*)(ptr + 1), &next, sizeof(next));
700 }
701 
702 static gc_slab *
get_gc_slab(gc_block_header * header)703 get_gc_slab(gc_block_header *header)
704 {
705    return (gc_slab *)((char *)header - header->slab_offset);
706 }
707 
708 gc_ctx *
gc_context(const void * parent)709 gc_context(const void *parent)
710 {
711    gc_ctx *ctx = rzalloc(parent, gc_ctx);
712    for (unsigned i = 0; i < NUM_FREELIST_BUCKETS; i++) {
713       list_inithead(&ctx->slabs[i].slabs);
714       list_inithead(&ctx->slabs[i].free_slabs);
715    }
716 #ifndef NDEBUG
717    ctx->canary = GC_CONTEXT_CANARY;
718 #endif
719    return ctx;
720 }
721 
722 static_assert(UINT32_MAX >= MAX_FREELIST_SIZE, "Freelist sizes use uint32_t");
723 
724 static uint32_t
gc_bucket_obj_size(uint32_t bucket)725 gc_bucket_obj_size(uint32_t bucket)
726 {
727    return (bucket + 1) * FREELIST_ALIGNMENT;
728 }
729 
730 static uint32_t
gc_bucket_for_size(uint32_t size)731 gc_bucket_for_size(uint32_t size)
732 {
733    return (size - 1) / FREELIST_ALIGNMENT;
734 }
735 
736 static_assert(UINT32_MAX >= SLAB_SIZE, "SLAB_SIZE use uint32_t");
737 
738 static uint32_t
gc_bucket_num_objs(uint32_t bucket)739 gc_bucket_num_objs(uint32_t bucket)
740 {
741    return (SLAB_SIZE - sizeof(gc_slab)) / gc_bucket_obj_size(bucket);
742 }
743 
744 static gc_block_header *
alloc_from_slab(gc_slab * slab,uint32_t bucket)745 alloc_from_slab(gc_slab *slab, uint32_t bucket)
746 {
747    uint32_t size = gc_bucket_obj_size(bucket);
748    gc_block_header *header;
749    if (slab->freelist) {
750       /* Prioritize already-allocated chunks, since they probably have a page
751        * backing them.
752        */
753       header = slab->freelist;
754       slab->freelist = get_gc_freelist_next(slab->freelist);
755    } else if (slab->next_available + size <= ((char *) slab) + SLAB_SIZE) {
756       header = (gc_block_header *) slab->next_available;
757       header->slab_offset = (char *) header - (char *) slab;
758       header->bucket = bucket;
759       slab->next_available += size;
760    } else {
761       return NULL;
762    }
763 
764    slab->num_allocated++;
765    slab->num_free--;
766    if (!slab->num_free)
767       list_del(&slab->free_link);
768    return header;
769 }
770 
771 static void
free_slab(gc_slab * slab)772 free_slab(gc_slab *slab)
773 {
774    if (list_is_linked(&slab->free_link))
775       list_del(&slab->free_link);
776    list_del(&slab->link);
777    ralloc_free(slab);
778 }
779 
780 static void
free_from_slab(gc_block_header * header,bool keep_empty_slabs)781 free_from_slab(gc_block_header *header, bool keep_empty_slabs)
782 {
783    gc_slab *slab = get_gc_slab(header);
784 
785    if (slab->num_allocated == 1 && !(keep_empty_slabs && list_is_singular(&slab->free_link))) {
786       /* Free the slab if this is the last object. */
787       free_slab(slab);
788       return;
789    } else if (slab->num_free == 0) {
790       list_add(&slab->free_link, &slab->ctx->slabs[header->bucket].free_slabs);
791    } else {
792       /* Keep the free list sorted by the number of free objects in ascending order. By prefering to
793        * allocate from the slab with the fewest free objects, we help free the slabs with many free
794        * objects.
795        */
796       while (slab->free_link.next != &slab->ctx->slabs[header->bucket].free_slabs &&
797              slab->num_free > list_entry(slab->free_link.next, gc_slab, free_link)->num_free) {
798          gc_slab *next = list_entry(slab->free_link.next, gc_slab, free_link);
799 
800          /* Move "slab" to after "next". */
801          list_move_to(&slab->free_link, &next->free_link);
802       }
803    }
804 
805    set_gc_freelist_next(header, slab->freelist);
806    slab->freelist = header;
807 
808    slab->num_allocated--;
809    slab->num_free++;
810 }
811 
812 static uint32_t
get_slab_size(uint32_t bucket)813 get_slab_size(uint32_t bucket)
814 {
815    /* SLAB_SIZE rounded down to a multiple of the object size so that it's not larger than what can
816     * be used.
817     */
818    uint32_t obj_size = gc_bucket_obj_size(bucket);
819    uint32_t num_objs = gc_bucket_num_objs(bucket);
820    return align((uint32_t)sizeof(gc_slab) + num_objs * obj_size, alignof(gc_slab));
821 }
822 
823 static gc_slab *
create_slab(gc_ctx * ctx,unsigned bucket)824 create_slab(gc_ctx *ctx, unsigned bucket)
825 {
826    gc_slab *slab = ralloc_size(ctx, get_slab_size(bucket));
827    if (unlikely(!slab))
828       return NULL;
829 
830    slab->ctx = ctx;
831    slab->freelist = NULL;
832    slab->next_available = (char*)(slab + 1);
833    slab->num_allocated = 0;
834    slab->num_free = gc_bucket_num_objs(bucket);
835 
836    list_addtail(&slab->link, &ctx->slabs[bucket].slabs);
837    list_addtail(&slab->free_link, &ctx->slabs[bucket].free_slabs);
838 
839    return slab;
840 }
841 
842 void *
gc_alloc_size(gc_ctx * ctx,size_t size,size_t alignment)843 gc_alloc_size(gc_ctx *ctx, size_t size, size_t alignment)
844 {
845    assert(ctx);
846    assert(util_is_power_of_two_nonzero_uintptr(alignment));
847 
848    alignment = MAX2(alignment, alignof(gc_block_header));
849 
850    /* Alignment will add at most align-alignof(gc_block_header) bytes of padding to the header, and
851     * the IS_PADDING byte can only encode up to 127.
852     */
853    assert((alignment - alignof(gc_block_header)) <= 127);
854 
855    /* We can only align as high as the slab is. */
856    assert(alignment <= HEADER_ALIGN);
857 
858    size_t header_size = align64(sizeof(gc_block_header), alignment);
859    size = align64(size, alignment);
860    size += header_size;
861 
862    gc_block_header *header = NULL;
863    if (size <= MAX_FREELIST_SIZE) {
864       uint32_t bucket = gc_bucket_for_size((uint32_t)size);
865       if (list_is_empty(&ctx->slabs[bucket].free_slabs) && !create_slab(ctx, bucket))
866          return NULL;
867       gc_slab *slab = list_first_entry(&ctx->slabs[bucket].free_slabs, gc_slab, free_link);
868       header = alloc_from_slab(slab, bucket);
869    } else {
870       header = ralloc_size(ctx, size);
871       if (unlikely(!header))
872          return NULL;
873       /* Mark the header as allocated directly, so we know to actually free it. */
874       header->bucket = NUM_FREELIST_BUCKETS;
875    }
876 
877    header->flags = ctx->current_gen | IS_USED;
878 #ifndef NDEBUG
879    header->canary = GC_CANARY;
880 #endif
881 
882    uint8_t *ptr = (uint8_t *)header + header_size;
883    if ((header_size - 1) != offsetof(gc_block_header, flags))
884       ptr[-1] = IS_PADDING | (header_size - sizeof(gc_block_header));
885 
886    assert(((uintptr_t)ptr & (alignment - 1)) == 0);
887    return ptr;
888 }
889 
890 void *
gc_zalloc_size(gc_ctx * ctx,size_t size,size_t alignment)891 gc_zalloc_size(gc_ctx *ctx, size_t size, size_t alignment)
892 {
893    void *ptr = gc_alloc_size(ctx, size, alignment);
894 
895    if (likely(ptr))
896       memset(ptr, 0, size);
897 
898    return ptr;
899 }
900 
901 void
gc_free(void * ptr)902 gc_free(void *ptr)
903 {
904    if (!ptr)
905       return;
906 
907    gc_block_header *header = get_gc_header(ptr);
908    header->flags &= ~IS_USED;
909 
910    if (header->bucket < NUM_FREELIST_BUCKETS)
911       free_from_slab(header, true);
912    else
913       ralloc_free(header);
914 }
915 
gc_get_context(void * ptr)916 gc_ctx *gc_get_context(void *ptr)
917 {
918    gc_block_header *header = get_gc_header(ptr);
919 
920    if (header->bucket < NUM_FREELIST_BUCKETS)
921       return get_gc_slab(header)->ctx;
922    else
923       return ralloc_parent(header);
924 }
925 
926 void
gc_sweep_start(gc_ctx * ctx)927 gc_sweep_start(gc_ctx *ctx)
928 {
929    ctx->current_gen ^= CURRENT_GENERATION;
930 
931    ctx->rubbish = ralloc_context(NULL);
932    ralloc_adopt(ctx->rubbish, ctx);
933 }
934 
935 void
gc_mark_live(gc_ctx * ctx,const void * mem)936 gc_mark_live(gc_ctx *ctx, const void *mem)
937 {
938    gc_block_header *header = get_gc_header(mem);
939    if (header->bucket < NUM_FREELIST_BUCKETS)
940       header->flags ^= CURRENT_GENERATION;
941    else
942       ralloc_steal(ctx, header);
943 }
944 
945 void
gc_sweep_end(gc_ctx * ctx)946 gc_sweep_end(gc_ctx *ctx)
947 {
948    assert(ctx->rubbish);
949 
950    for (unsigned i = 0; i < NUM_FREELIST_BUCKETS; i++) {
951       unsigned obj_size = gc_bucket_obj_size(i);
952       list_for_each_entry_safe(gc_slab, slab, &ctx->slabs[i].slabs, link) {
953          if (!slab->num_allocated) {
954             free_slab(slab);
955             continue;
956          }
957 
958          for (char *ptr = (char*)(slab + 1); ptr != slab->next_available; ptr += obj_size) {
959             gc_block_header *header = (gc_block_header *)ptr;
960             if (!(header->flags & IS_USED))
961                continue;
962             if ((header->flags & CURRENT_GENERATION) == ctx->current_gen)
963                continue;
964 
965             bool last = slab->num_allocated == 1;
966 
967             header->flags &= ~IS_USED;
968             free_from_slab(header, false);
969 
970             if (last)
971                break;
972          }
973       }
974    }
975 
976    for (unsigned i = 0; i < NUM_FREELIST_BUCKETS; i++) {
977       list_for_each_entry(gc_slab, slab, &ctx->slabs[i].slabs, link) {
978          assert(slab->num_allocated > 0); /* free_from_slab() should free it otherwise */
979          ralloc_steal(ctx, slab);
980       }
981    }
982 
983    ralloc_free(ctx->rubbish);
984    ctx->rubbish = NULL;
985 }
986 
987 /***************************************************************************
988  * Linear allocator for short-lived allocations.
989  ***************************************************************************
990  *
991  * The allocator consists of a parent node (2K buffer), which requires
992  * a ralloc parent, and child nodes (allocations). Child nodes can't be freed
993  * directly, because the parent doesn't track them. You have to release
994  * the parent node in order to release all its children.
995  *
996  * The allocator uses a fixed-sized buffer with a monotonically increasing
997  * offset after each allocation. If the buffer is all used, another buffer
998  * is allocated, using the linear parent node as ralloc parent.
999  *
1000  * The linear parent node is always the first buffer and keeps track of all
1001  * other buffers.
1002  */
1003 
1004 #define SUBALLOC_ALIGNMENT 8
1005 #define LMAGIC_CONTEXT 0x87b9c7d3
1006 #define LMAGIC_NODE    0x87b910d3
1007 
1008 struct linear_ctx {
1009 
1010    alignas(HEADER_ALIGN)
1011 
1012 #ifndef NDEBUG
1013    unsigned magic;   /* for debugging */
1014 #endif
1015    unsigned min_buffer_size;
1016 
1017    unsigned offset;  /* points to the first unused byte in the latest buffer */
1018    unsigned size;    /* size of the latest buffer */
1019    void *latest;     /* the only buffer that has free space */
1020 };
1021 
1022 typedef struct linear_ctx linear_ctx;
1023 
1024 #ifndef NDEBUG
1025 struct linear_node_canary {
1026    alignas(HEADER_ALIGN)
1027    unsigned magic;
1028    unsigned offset;  /* points to the first unused byte in *this* buffer */
1029 };
1030 
1031 typedef struct linear_node_canary linear_node_canary;
1032 
1033 static linear_node_canary *
get_node_canary(void * ptr)1034 get_node_canary(void *ptr)
1035 {
1036    return (void *)((char *)ptr - sizeof(linear_node_canary));
1037 }
1038 #endif
1039 
1040 static unsigned
get_node_canary_size()1041 get_node_canary_size()
1042 {
1043 #ifndef NDEBUG
1044    return sizeof(linear_node_canary);
1045 #else
1046    return 0;
1047 #endif
1048 }
1049 
1050 void *
linear_alloc_child(linear_ctx * ctx,unsigned size)1051 linear_alloc_child(linear_ctx *ctx, unsigned size)
1052 {
1053    assert(ctx->magic == LMAGIC_CONTEXT);
1054    assert(get_node_canary(ctx->latest)->magic == LMAGIC_NODE);
1055    assert(get_node_canary(ctx->latest)->offset == ctx->offset);
1056 
1057    size = ALIGN_POT(size, SUBALLOC_ALIGNMENT);
1058 
1059    if (unlikely(ctx->offset + size > ctx->size)) {
1060       /* allocate a new node */
1061       unsigned node_size = size;
1062       if (likely(node_size < ctx->min_buffer_size))
1063          node_size = ctx->min_buffer_size;
1064 
1065       const unsigned canary_size = get_node_canary_size();
1066       const unsigned full_size = canary_size + node_size;
1067 
1068       /* linear context is also a ralloc context */
1069       char *ptr = ralloc_size(ctx, full_size);
1070       if (unlikely(!ptr))
1071          return NULL;
1072 
1073 #ifndef NDEBUG
1074       linear_node_canary *canary = (void *) ptr;
1075       canary->magic = LMAGIC_NODE;
1076       canary->offset = 0;
1077 #endif
1078 
1079       /* If the new buffer is going to be full, don't update `latest`
1080        * pointer.  Either the current one is also full, so doesn't
1081        * matter, or the current one is not full, so there's still chance
1082        * to use that space.
1083        */
1084       if (unlikely(size == node_size)) {
1085 #ifndef NDEBUG
1086          canary->offset = size;
1087 #endif
1088          assert((uintptr_t)(ptr + canary_size) % SUBALLOC_ALIGNMENT == 0);
1089          return ptr + canary_size;
1090       }
1091 
1092       ctx->offset = 0;
1093       ctx->size = node_size;
1094       ctx->latest = ptr + canary_size;
1095    }
1096 
1097    void *ptr = (char *)ctx->latest + ctx->offset;
1098    ctx->offset += size;
1099 
1100 #ifndef NDEBUG
1101    linear_node_canary *canary = get_node_canary(ctx->latest);
1102    canary->offset += size;
1103 #endif
1104 
1105    assert((uintptr_t)ptr % SUBALLOC_ALIGNMENT == 0);
1106    return ptr;
1107 }
1108 
1109 linear_ctx *
linear_context(void * ralloc_ctx)1110 linear_context(void *ralloc_ctx)
1111 {
1112    const linear_opts opts = {0};
1113    return linear_context_with_opts(ralloc_ctx, &opts);
1114 }
1115 
1116 linear_ctx *
linear_context_with_opts(void * ralloc_ctx,const linear_opts * opts)1117 linear_context_with_opts(void *ralloc_ctx, const linear_opts *opts)
1118 {
1119    linear_ctx *ctx;
1120 
1121    if (unlikely(!ralloc_ctx))
1122       return NULL;
1123 
1124    const unsigned default_min_buffer_size = 2048;
1125    const unsigned min_buffer_size =
1126       MAX2(ALIGN_POT(opts->min_buffer_size, default_min_buffer_size),
1127            default_min_buffer_size);
1128 
1129    const unsigned size = min_buffer_size;
1130    const unsigned canary_size = get_node_canary_size();
1131    const unsigned full_size =
1132       sizeof(linear_ctx) + canary_size + size;
1133 
1134    ctx = ralloc_size(ralloc_ctx, full_size);
1135    if (unlikely(!ctx))
1136       return NULL;
1137 
1138    ctx->min_buffer_size = min_buffer_size;
1139 
1140    ctx->offset = 0;
1141    ctx->size = size;
1142    ctx->latest = (char *)&ctx[1] + canary_size;
1143 #ifndef NDEBUG
1144    ctx->magic = LMAGIC_CONTEXT;
1145    linear_node_canary *canary = get_node_canary(ctx->latest);
1146    canary->magic = LMAGIC_NODE;
1147    canary->offset = 0;
1148 #endif
1149 
1150    return ctx;
1151 }
1152 
1153 void *
linear_zalloc_child(linear_ctx * ctx,unsigned size)1154 linear_zalloc_child(linear_ctx *ctx, unsigned size)
1155 {
1156    void *ptr = linear_alloc_child(ctx, size);
1157 
1158    if (likely(ptr))
1159       memset(ptr, 0, size);
1160    return ptr;
1161 }
1162 
1163 void
linear_free_context(linear_ctx * ctx)1164 linear_free_context(linear_ctx *ctx)
1165 {
1166    if (unlikely(!ctx))
1167       return;
1168 
1169    assert(ctx->magic == LMAGIC_CONTEXT);
1170 
1171    /* Linear context is also the ralloc parent of extra nodes. */
1172    ralloc_free(ctx);
1173 }
1174 
1175 void
ralloc_steal_linear_context(void * new_ralloc_ctx,linear_ctx * ctx)1176 ralloc_steal_linear_context(void *new_ralloc_ctx, linear_ctx *ctx)
1177 {
1178    if (unlikely(!ctx))
1179       return;
1180 
1181    assert(ctx->magic == LMAGIC_CONTEXT);
1182 
1183    /* Linear context is also the ralloc parent of extra nodes. */
1184    ralloc_steal(new_ralloc_ctx, ctx);
1185 }
1186 
1187 void *
ralloc_parent_of_linear_context(linear_ctx * ctx)1188 ralloc_parent_of_linear_context(linear_ctx *ctx)
1189 {
1190    assert(ctx->magic == LMAGIC_CONTEXT);
1191    return PTR_FROM_HEADER(get_header(ctx)->parent);
1192 }
1193 
1194 /* All code below is pretty much copied from ralloc and only the alloc
1195  * calls are different.
1196  */
1197 
1198 char *
linear_strdup(linear_ctx * ctx,const char * str)1199 linear_strdup(linear_ctx *ctx, const char *str)
1200 {
1201    unsigned n;
1202    char *ptr;
1203 
1204    if (unlikely(!str))
1205       return NULL;
1206 
1207    n = strlen(str);
1208    ptr = linear_alloc_child(ctx, n + 1);
1209    if (unlikely(!ptr))
1210       return NULL;
1211 
1212    memcpy(ptr, str, n);
1213    ptr[n] = '\0';
1214    return ptr;
1215 }
1216 
1217 char *
linear_asprintf(linear_ctx * ctx,const char * fmt,...)1218 linear_asprintf(linear_ctx *ctx, const char *fmt, ...)
1219 {
1220    char *ptr;
1221    va_list args;
1222    va_start(args, fmt);
1223    ptr = linear_vasprintf(ctx, fmt, args);
1224    va_end(args);
1225    return ptr;
1226 }
1227 
1228 char *
linear_vasprintf(linear_ctx * ctx,const char * fmt,va_list args)1229 linear_vasprintf(linear_ctx *ctx, const char *fmt, va_list args)
1230 {
1231    unsigned size = u_printf_length(fmt, args) + 1;
1232 
1233    char *ptr = linear_alloc_child(ctx, size);
1234    if (ptr != NULL)
1235       vsnprintf(ptr, size, fmt, args);
1236 
1237    return ptr;
1238 }
1239 
1240 bool
linear_asprintf_append(linear_ctx * ctx,char ** str,const char * fmt,...)1241 linear_asprintf_append(linear_ctx *ctx, char **str, const char *fmt, ...)
1242 {
1243    bool success;
1244    va_list args;
1245    va_start(args, fmt);
1246    success = linear_vasprintf_append(ctx, str, fmt, args);
1247    va_end(args);
1248    return success;
1249 }
1250 
1251 bool
linear_vasprintf_append(linear_ctx * ctx,char ** str,const char * fmt,va_list args)1252 linear_vasprintf_append(linear_ctx *ctx, char **str, const char *fmt, va_list args)
1253 {
1254    size_t existing_length;
1255    assert(str != NULL);
1256    existing_length = *str ? strlen(*str) : 0;
1257    return linear_vasprintf_rewrite_tail(ctx, str, &existing_length, fmt, args);
1258 }
1259 
1260 bool
linear_asprintf_rewrite_tail(linear_ctx * ctx,char ** str,size_t * start,const char * fmt,...)1261 linear_asprintf_rewrite_tail(linear_ctx *ctx, char **str, size_t *start,
1262                              const char *fmt, ...)
1263 {
1264    bool success;
1265    va_list args;
1266    va_start(args, fmt);
1267    success = linear_vasprintf_rewrite_tail(ctx, str, start, fmt, args);
1268    va_end(args);
1269    return success;
1270 }
1271 
1272 bool
linear_vasprintf_rewrite_tail(linear_ctx * ctx,char ** str,size_t * start,const char * fmt,va_list args)1273 linear_vasprintf_rewrite_tail(linear_ctx *ctx, char **str, size_t *start,
1274                               const char *fmt, va_list args)
1275 {
1276    size_t new_length;
1277    char *ptr;
1278 
1279    assert(str != NULL);
1280 
1281    if (unlikely(*str == NULL)) {
1282       *str = linear_vasprintf(ctx, fmt, args);
1283       *start = strlen(*str);
1284       return true;
1285    }
1286 
1287    new_length = u_printf_length(fmt, args);
1288 
1289    ptr = linear_alloc_child(ctx, *start + new_length + 1);
1290    if (unlikely(ptr == NULL))
1291       return false;
1292 
1293    memcpy(ptr, *str, *start);
1294 
1295    vsnprintf(ptr + *start, new_length + 1, fmt, args);
1296    *str = ptr;
1297    *start += new_length;
1298    return true;
1299 }
1300 
1301 /* helper routine for strcat/strncat - n is the exact amount to copy */
1302 static bool
linear_cat(linear_ctx * ctx,char ** dest,const char * str,unsigned n)1303 linear_cat(linear_ctx *ctx, char **dest, const char *str, unsigned n)
1304 {
1305    char *both;
1306    unsigned existing_length;
1307    assert(dest != NULL && *dest != NULL);
1308 
1309    existing_length = strlen(*dest);
1310    both = linear_alloc_child(ctx, existing_length + n + 1);
1311    if (unlikely(both == NULL))
1312       return false;
1313 
1314    memcpy(both, *dest, existing_length);
1315    memcpy(both + existing_length, str, n);
1316    both[existing_length + n] = '\0';
1317 
1318    *dest = both;
1319    return true;
1320 }
1321 
1322 bool
linear_strcat(linear_ctx * ctx,char ** dest,const char * str)1323 linear_strcat(linear_ctx *ctx, char **dest, const char *str)
1324 {
1325    return linear_cat(ctx, dest, str, strlen(str));
1326 }
1327 
1328 void *
linear_alloc_child_array(linear_ctx * ctx,size_t size,unsigned count)1329 linear_alloc_child_array(linear_ctx *ctx, size_t size, unsigned count)
1330 {
1331    if (count > SIZE_MAX/size)
1332       return NULL;
1333 
1334    return linear_alloc_child(ctx, size * count);
1335 }
1336 
1337 void *
linear_zalloc_child_array(linear_ctx * ctx,size_t size,unsigned count)1338 linear_zalloc_child_array(linear_ctx *ctx, size_t size, unsigned count)
1339 {
1340    if (count > SIZE_MAX/size)
1341       return NULL;
1342 
1343    return linear_zalloc_child(ctx, size * count);
1344 }
1345 
1346 typedef struct {
1347    FILE *f;
1348    unsigned indent;
1349 
1350    unsigned ralloc_count;
1351    unsigned linear_count;
1352    unsigned gc_count;
1353 
1354    /* These don't include padding or metadata from suballocators. */
1355    unsigned content_bytes;
1356    unsigned ralloc_metadata_bytes;
1357    unsigned linear_metadata_bytes;
1358    unsigned gc_metadata_bytes;
1359 
1360    bool inside_linear;
1361    bool inside_gc;
1362 } ralloc_print_info_state;
1363 
1364 static void
ralloc_print_info_helper(ralloc_print_info_state * state,const ralloc_header * info)1365 ralloc_print_info_helper(ralloc_print_info_state *state, const ralloc_header *info)
1366 {
1367    FILE *f = state->f;
1368 
1369    if (f) {
1370       for (unsigned i = 0; i < state->indent; i++) fputc(' ', f);
1371       fprintf(f, "%p", info);
1372    }
1373 
1374    /* TODO: Account for padding used in various places. */
1375 
1376 #ifndef NDEBUG
1377    assert(info->canary == CANARY);
1378    if (f) fprintf(f, " (%d bytes)", info->size);
1379    state->content_bytes += info->size;
1380    state->ralloc_metadata_bytes += sizeof(ralloc_header);
1381 
1382    const void *ptr = PTR_FROM_HEADER(info);
1383    const linear_ctx *lin_ctx = ptr;
1384    const gc_ctx *gc_ctx = ptr;
1385 
1386    if (lin_ctx->magic == LMAGIC_CONTEXT) {
1387       if (f) fprintf(f, " (linear context)");
1388       assert(!state->inside_gc && !state->inside_linear);
1389       state->inside_linear = true;
1390       state->linear_metadata_bytes += sizeof(linear_ctx);
1391       state->content_bytes -= sizeof(linear_ctx);
1392       state->linear_count++;
1393    } else if (gc_ctx->canary == GC_CONTEXT_CANARY) {
1394       if (f) fprintf(f, " (gc context)");
1395       assert(!state->inside_gc && !state->inside_linear);
1396       state->inside_gc = true;
1397       state->gc_metadata_bytes += sizeof(gc_block_header);
1398    } else if (state->inside_linear) {
1399       const linear_node_canary *lin_node = ptr;
1400       if (lin_node->magic == LMAGIC_NODE) {
1401          if (f) fprintf(f, " (linear node buffer)");
1402          state->content_bytes -= sizeof(linear_node_canary);
1403          state->linear_metadata_bytes += sizeof(linear_node_canary);
1404          state->linear_count++;
1405       }
1406    } else if (state->inside_gc) {
1407       if (f) fprintf(f, " (gc slab or large block)");
1408       state->gc_count++;
1409    }
1410 #endif
1411 
1412    state->ralloc_count++;
1413    if (f) fprintf(f, "\n");
1414 
1415    const ralloc_header *c = info->child;
1416    state->indent += 2;
1417    while (c != NULL) {
1418       ralloc_print_info_helper(state, c);
1419       c = c->next;
1420    }
1421    state->indent -= 2;
1422 
1423 #ifndef NDEBUG
1424    if (lin_ctx->magic == LMAGIC_CONTEXT) state->inside_linear = false;
1425    else if (gc_ctx->canary == GC_CONTEXT_CANARY) state->inside_gc = false;
1426 #endif
1427 }
1428 
1429 void
ralloc_print_info(FILE * f,const void * p,unsigned flags)1430 ralloc_print_info(FILE *f, const void *p, unsigned flags)
1431 {
1432    ralloc_print_info_state state = {
1433       .f =  ((flags & RALLOC_PRINT_INFO_SUMMARY_ONLY) == 1) ? NULL : f,
1434    };
1435 
1436    const ralloc_header *info = get_header(p);
1437    ralloc_print_info_helper(&state, info);
1438 
1439    fprintf(f, "==== RALLOC INFO ptr=%p info=%p\n"
1440               "ralloc allocations    = %d\n"
1441               "  - linear            = %d\n"
1442               "  - gc                = %d\n"
1443               "  - other             = %d\n",
1444               p, info,
1445               state.ralloc_count,
1446               state.linear_count,
1447               state.gc_count,
1448               state.ralloc_count - state.linear_count - state.gc_count);
1449 
1450    if (state.content_bytes) {
1451       fprintf(f,
1452               "content bytes         = %d\n"
1453               "ralloc metadata bytes = %d\n"
1454               "linear metadata bytes = %d\n",
1455               state.content_bytes,
1456               state.ralloc_metadata_bytes,
1457               state.linear_metadata_bytes);
1458    }
1459 
1460    fprintf(f, "====\n");
1461 }
1462 
1463