• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* GLIB sliced memory - fast concurrent memory chunk allocator
2  * Copyright (C) 2005 Tim Janik
3  *
4  * This library is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
16  */
17 /* MT safe */
18 
19 #include "config.h"
20 #include "glibconfig.h"
21 
22 #if defined(HAVE_POSIX_MEMALIGN) && !defined(_XOPEN_SOURCE)
23 #define _XOPEN_SOURCE 600       /* posix_memalign() */
24 #endif
25 #include <stdlib.h>             /* posix_memalign() */
26 #include <string.h>
27 #include <errno.h>
28 
29 #ifdef G_OS_UNIX
30 #include <unistd.h>             /* sysconf() */
31 #endif
32 #ifdef G_OS_WIN32
33 #include <windows.h>
34 #include <process.h>
35 #endif
36 
37 #include <stdio.h>              /* fputs */
38 
39 #include "gslice.h"
40 
41 #include "gmain.h"
42 #include "gmem.h"               /* gslice.h */
43 #include "gstrfuncs.h"
44 #include "gutils.h"
45 #include "gtrashstack.h"
46 #include "gtestutils.h"
47 #include "gthread.h"
48 #include "gthreadprivate.h"
49 #include "glib_trace.h"
50 #include "gprintf.h"
51 
52 #include "gvalgrind.h"
53 
54 /**
55  * SECTION:memory_slices
56  * @title: Memory Slices
57  * @short_description: efficient way to allocate groups of equal-sized
58  *     chunks of memory
59  *
60  * Memory slices provide a space-efficient and multi-processing scalable
61  * way to allocate equal-sized pieces of memory, just like the original
62  * #GMemChunks (from GLib 2.8), while avoiding their excessive
63  * memory-waste, scalability and performance problems.
64  *
65  * To achieve these goals, the slice allocator uses a sophisticated,
66  * layered design that has been inspired by Bonwick's slab allocator
67  * ([Bonwick94](http://citeseer.ist.psu.edu/bonwick94slab.html)
68  * Jeff Bonwick, The slab allocator: An object-caching kernel
69  * memory allocator. USENIX 1994, and
70  * [Bonwick01](http://citeseer.ist.psu.edu/bonwick01magazines.html)
71  * Bonwick and Jonathan Adams, Magazines and vmem: Extending the
72  * slab allocator to many cpu's and arbitrary resources. USENIX 2001)
73  *
74  * It uses posix_memalign() to optimize allocations of many equally-sized
75  * chunks, and has per-thread free lists (the so-called magazine layer)
76  * to quickly satisfy allocation requests of already known structure sizes.
77  * This is accompanied by extra caching logic to keep freed memory around
78  * for some time before returning it to the system. Memory that is unused
79  * due to alignment constraints is used for cache colorization (random
80  * distribution of chunk addresses) to improve CPU cache utilization. The
81  * caching layer of the slice allocator adapts itself to high lock contention
82  * to improve scalability.
83  *
84  * The slice allocator can allocate blocks as small as two pointers, and
85  * unlike malloc(), it does not reserve extra space per block. For large block
86  * sizes, g_slice_new() and g_slice_alloc() will automatically delegate to the
87  * system malloc() implementation. For newly written code it is recommended
88  * to use the new `g_slice` API instead of g_malloc() and
89  * friends, as long as objects are not resized during their lifetime and the
90  * object size used at allocation time is still available when freeing.
91  *
92  * Here is an example for using the slice allocator:
93  * |[<!-- language="C" -->
94  * gchar *mem[10000];
95  * gint i;
96  *
97  * // Allocate 10000 blocks.
98  * for (i = 0; i < 10000; i++)
99  *   {
100  *     mem[i] = g_slice_alloc (50);
101  *
102  *     // Fill in the memory with some junk.
103  *     for (j = 0; j < 50; j++)
104  *       mem[i][j] = i * j;
105  *   }
106  *
107  * // Now free all of the blocks.
108  * for (i = 0; i < 10000; i++)
109  *   g_slice_free1 (50, mem[i]);
110  * ]|
111  *
112  * And here is an example for using the using the slice allocator
113  * with data structures:
114  * |[<!-- language="C" -->
115  * GRealArray *array;
116  *
117  * // Allocate one block, using the g_slice_new() macro.
118  * array = g_slice_new (GRealArray);
119  *
120  * // We can now use array just like a normal pointer to a structure.
121  * array->data            = NULL;
122  * array->len             = 0;
123  * array->alloc           = 0;
124  * array->zero_terminated = (zero_terminated ? 1 : 0);
125  * array->clear           = (clear ? 1 : 0);
126  * array->elt_size        = elt_size;
127  *
128  * // We can free the block, so it can be reused.
129  * g_slice_free (GRealArray, array);
130  * ]|
131  */
132 
133 /* the GSlice allocator is split up into 4 layers, roughly modelled after the slab
134  * allocator and magazine extensions as outlined in:
135  * + [Bonwick94] Jeff Bonwick, The slab allocator: An object-caching kernel
136  *   memory allocator. USENIX 1994, http://citeseer.ist.psu.edu/bonwick94slab.html
137  * + [Bonwick01] Bonwick and Jonathan Adams, Magazines and vmem: Extending the
138  *   slab allocator to many cpu's and arbitrary resources.
139  *   USENIX 2001, http://citeseer.ist.psu.edu/bonwick01magazines.html
140  * the layers are:
141  * - the thread magazines. for each (aligned) chunk size, a magazine (a list)
142  *   of recently freed and soon to be allocated chunks is maintained per thread.
143  *   this way, most alloc/free requests can be quickly satisfied from per-thread
144  *   free lists which only require one g_private_get() call to retrive the
145  *   thread handle.
146  * - the magazine cache. allocating and freeing chunks to/from threads only
147  *   occours at magazine sizes from a global depot of magazines. the depot
148  *   maintaines a 15 second working set of allocated magazines, so full
149  *   magazines are not allocated and released too often.
150  *   the chunk size dependent magazine sizes automatically adapt (within limits,
151  *   see [3]) to lock contention to properly scale performance across a variety
152  *   of SMP systems.
153  * - the slab allocator. this allocator allocates slabs (blocks of memory) close
154  *   to the system page size or multiples thereof which have to be page aligned.
155  *   the blocks are divided into smaller chunks which are used to satisfy
156  *   allocations from the upper layers. the space provided by the reminder of
157  *   the chunk size division is used for cache colorization (random distribution
158  *   of chunk addresses) to improve processor cache utilization. multiple slabs
159  *   with the same chunk size are kept in a partially sorted ring to allow O(1)
160  *   freeing and allocation of chunks (as long as the allocation of an entirely
161  *   new slab can be avoided).
162  * - the page allocator. on most modern systems, posix_memalign(3) or
163  *   memalign(3) should be available, so this is used to allocate blocks with
164  *   system page size based alignments and sizes or multiples thereof.
165  *   if no memalign variant is provided, valloc() is used instead and
166  *   block sizes are limited to the system page size (no multiples thereof).
167  *   as a fallback, on system without even valloc(), a malloc(3)-based page
168  *   allocator with alloc-only behaviour is used.
169  *
170  * NOTES:
171  * [1] some systems memalign(3) implementations may rely on boundary tagging for
172  *     the handed out memory chunks. to avoid excessive page-wise fragmentation,
173  *     we reserve 2 * sizeof (void*) per block size for the systems memalign(3),
174  *     specified in NATIVE_MALLOC_PADDING.
175  * [2] using the slab allocator alone already provides for a fast and efficient
176  *     allocator, it doesn't properly scale beyond single-threaded uses though.
177  *     also, the slab allocator implements eager free(3)-ing, i.e. does not
178  *     provide any form of caching or working set maintenance. so if used alone,
179  *     it's vulnerable to trashing for sequences of balanced (alloc, free) pairs
180  *     at certain thresholds.
181  * [3] magazine sizes are bound by an implementation specific minimum size and
182  *     a chunk size specific maximum to limit magazine storage sizes to roughly
183  *     16KB.
184  * [4] allocating ca. 8 chunks per block/page keeps a good balance between
185  *     external and internal fragmentation (<= 12.5%). [Bonwick94]
186  */
187 
188 /* --- macros and constants --- */
189 #define LARGEALIGNMENT          (256)
190 #define P2ALIGNMENT             (2 * sizeof (gsize))                            /* fits 2 pointers (assumed to be 2 * GLIB_SIZEOF_SIZE_T below) */
191 #define ALIGN(size, base)       ((base) * (gsize) (((size) + (base) - 1) / (base)))
192 #define NATIVE_MALLOC_PADDING   P2ALIGNMENT                                     /* per-page padding left for native malloc(3) see [1] */
193 #define SLAB_INFO_SIZE          P2ALIGN (sizeof (SlabInfo) + NATIVE_MALLOC_PADDING)
194 #define MAX_MAGAZINE_SIZE       (256)                                           /* see [3] and allocator_get_magazine_threshold() for this */
195 #define MIN_MAGAZINE_SIZE       (4)
196 #define MAX_STAMP_COUNTER       (7)                                             /* distributes the load of gettimeofday() */
197 #define MAX_SLAB_CHUNK_SIZE(al) (((al)->max_page_size - SLAB_INFO_SIZE) / 8)    /* we want at last 8 chunks per page, see [4] */
198 #define MAX_SLAB_INDEX(al)      (SLAB_INDEX (al, MAX_SLAB_CHUNK_SIZE (al)) + 1)
199 #define SLAB_INDEX(al, asize)   ((asize) / P2ALIGNMENT - 1)                     /* asize must be P2ALIGNMENT aligned */
200 #define SLAB_CHUNK_SIZE(al, ix) (((ix) + 1) * P2ALIGNMENT)
201 #define SLAB_BPAGE_SIZE(al,csz) (8 * (csz) + SLAB_INFO_SIZE)
202 
203 /* optimized version of ALIGN (size, P2ALIGNMENT) */
204 #if     GLIB_SIZEOF_SIZE_T * 2 == 8  /* P2ALIGNMENT */
205 #define P2ALIGN(size)   (((size) + 0x7) & ~(gsize) 0x7)
206 #elif   GLIB_SIZEOF_SIZE_T * 2 == 16 /* P2ALIGNMENT */
207 #define P2ALIGN(size)   (((size) + 0xf) & ~(gsize) 0xf)
208 #else
209 #define P2ALIGN(size)   ALIGN (size, P2ALIGNMENT)
210 #endif
211 
212 /* special helpers to avoid gmessage.c dependency */
213 static void mem_error (const char *format, ...) G_GNUC_PRINTF (1,2);
214 #define mem_assert(cond)    do { if (G_LIKELY (cond)) ; else mem_error ("assertion failed: %s", #cond); } while (0)
215 
216 /* --- structures --- */
217 typedef struct _ChunkLink      ChunkLink;
218 typedef struct _SlabInfo       SlabInfo;
219 typedef struct _CachedMagazine CachedMagazine;
220 struct _ChunkLink {
221   ChunkLink *next;
222   ChunkLink *data;
223 };
224 struct _SlabInfo {
225   ChunkLink *chunks;
226   guint n_allocated;
227   SlabInfo *next, *prev;
228 };
229 typedef struct {
230   ChunkLink *chunks;
231   gsize      count;                     /* approximative chunks list length */
232 } Magazine;
233 typedef struct {
234   Magazine   *magazine1;                /* array of MAX_SLAB_INDEX (allocator) */
235   Magazine   *magazine2;                /* array of MAX_SLAB_INDEX (allocator) */
236 } ThreadMemory;
237 typedef struct {
238   gboolean always_malloc;
239   gboolean bypass_magazines;
240   gboolean debug_blocks;
241   gsize    working_set_msecs;
242   guint    color_increment;
243 } SliceConfig;
244 typedef struct {
245   /* const after initialization */
246   gsize         min_page_size, max_page_size;
247   SliceConfig   config;
248   gsize         max_slab_chunk_size_for_magazine_cache;
249   /* magazine cache */
250   GMutex        magazine_mutex;
251   ChunkLink   **magazines;                /* array of MAX_SLAB_INDEX (allocator) */
252   guint        *contention_counters;      /* array of MAX_SLAB_INDEX (allocator) */
253   gint          mutex_counter;
254   guint         stamp_counter;
255   guint         last_stamp;
256   /* slab allocator */
257   GMutex        slab_mutex;
258   SlabInfo    **slab_stack;                /* array of MAX_SLAB_INDEX (allocator) */
259   guint        color_accu;
260 } Allocator;
261 
262 /* --- g-slice prototypes --- */
263 static gpointer     slab_allocator_alloc_chunk       (gsize      chunk_size);
264 static void         slab_allocator_free_chunk        (gsize      chunk_size,
265                                                       gpointer   mem);
266 static void         private_thread_memory_cleanup    (gpointer   data);
267 static gpointer     allocator_memalign               (gsize      alignment,
268                                                       gsize      memsize);
269 static void         allocator_memfree                (gsize      memsize,
270                                                       gpointer   mem);
271 static inline void  magazine_cache_update_stamp      (void);
272 static inline gsize allocator_get_magazine_threshold (Allocator *allocator,
273                                                       guint      ix);
274 
275 /* --- g-slice memory checker --- */
276 static void     smc_notify_alloc  (void   *pointer,
277                                    size_t  size);
278 static int      smc_notify_free   (void   *pointer,
279                                    size_t  size);
280 
281 /* --- variables --- */
282 static GPrivate    private_thread_memory = G_PRIVATE_INIT (private_thread_memory_cleanup);
283 static gsize       sys_page_size = 0;
284 static Allocator   allocator[1] = { { 0, }, };
285 static SliceConfig slice_config = {
286   FALSE,        /* always_malloc */
287   FALSE,        /* bypass_magazines */
288   FALSE,        /* debug_blocks */
289   15 * 1000,    /* working_set_msecs */
290   1,            /* color increment, alt: 0x7fffffff */
291 };
292 static GMutex      smc_tree_mutex; /* mutex for G_SLICE=debug-blocks */
293 
294 /* --- auxiliary funcitons --- */
295 void
g_slice_set_config(GSliceConfig ckey,gint64 value)296 g_slice_set_config (GSliceConfig ckey,
297                     gint64       value)
298 {
299   g_return_if_fail (sys_page_size == 0);
300   switch (ckey)
301     {
302     case G_SLICE_CONFIG_ALWAYS_MALLOC:
303       slice_config.always_malloc = value != 0;
304       break;
305     case G_SLICE_CONFIG_BYPASS_MAGAZINES:
306       slice_config.bypass_magazines = value != 0;
307       break;
308     case G_SLICE_CONFIG_WORKING_SET_MSECS:
309       slice_config.working_set_msecs = value;
310       break;
311     case G_SLICE_CONFIG_COLOR_INCREMENT:
312       slice_config.color_increment = value;
313     default: ;
314     }
315 }
316 
317 gint64
g_slice_get_config(GSliceConfig ckey)318 g_slice_get_config (GSliceConfig ckey)
319 {
320   switch (ckey)
321     {
322     case G_SLICE_CONFIG_ALWAYS_MALLOC:
323       return slice_config.always_malloc;
324     case G_SLICE_CONFIG_BYPASS_MAGAZINES:
325       return slice_config.bypass_magazines;
326     case G_SLICE_CONFIG_WORKING_SET_MSECS:
327       return slice_config.working_set_msecs;
328     case G_SLICE_CONFIG_CHUNK_SIZES:
329       return MAX_SLAB_INDEX (allocator);
330     case G_SLICE_CONFIG_COLOR_INCREMENT:
331       return slice_config.color_increment;
332     default:
333       return 0;
334     }
335 }
336 
337 gint64*
g_slice_get_config_state(GSliceConfig ckey,gint64 address,guint * n_values)338 g_slice_get_config_state (GSliceConfig ckey,
339                           gint64       address,
340                           guint       *n_values)
341 {
342   guint i = 0;
343   g_return_val_if_fail (n_values != NULL, NULL);
344   *n_values = 0;
345   switch (ckey)
346     {
347       gint64 array[64];
348     case G_SLICE_CONFIG_CONTENTION_COUNTER:
349       array[i++] = SLAB_CHUNK_SIZE (allocator, address);
350       array[i++] = allocator->contention_counters[address];
351       array[i++] = allocator_get_magazine_threshold (allocator, address);
352       *n_values = i;
353       return g_memdup (array, sizeof (array[0]) * *n_values);
354     default:
355       return NULL;
356     }
357 }
358 
359 static void
slice_config_init(SliceConfig * config)360 slice_config_init (SliceConfig *config)
361 {
362   const gchar *val;
363 
364   *config = slice_config;
365 
366   val = getenv ("G_SLICE");
367   if (val != NULL)
368     {
369       gint flags;
370       const GDebugKey keys[] = {
371         { "always-malloc", 1 << 0 },
372         { "debug-blocks",  1 << 1 },
373       };
374 
375       flags = g_parse_debug_string (val, keys, G_N_ELEMENTS (keys));
376       if (flags & (1 << 0))
377         config->always_malloc = TRUE;
378       if (flags & (1 << 1))
379         config->debug_blocks = TRUE;
380     }
381   else
382     {
383       /* G_SLICE was not specified, so check if valgrind is running and
384        * disable ourselves if it is.
385        *
386        * This way it's possible to force gslice to be enabled under
387        * valgrind just by setting G_SLICE to the empty string.
388        */
389 #ifdef ENABLE_VALGRIND
390       if (RUNNING_ON_VALGRIND)
391         config->always_malloc = TRUE;
392 #endif
393     }
394 }
395 
396 static void
g_slice_init_nomessage(void)397 g_slice_init_nomessage (void)
398 {
399   /* we may not use g_error() or friends here */
400   mem_assert (sys_page_size == 0);
401   mem_assert (MIN_MAGAZINE_SIZE >= 4);
402 
403 #ifdef G_OS_WIN32
404   {
405     SYSTEM_INFO system_info;
406     GetSystemInfo (&system_info);
407     sys_page_size = system_info.dwPageSize;
408   }
409 #else
410   sys_page_size = sysconf (_SC_PAGESIZE); /* = sysconf (_SC_PAGE_SIZE); = getpagesize(); */
411 #endif
412   mem_assert (sys_page_size >= 2 * LARGEALIGNMENT);
413   mem_assert ((sys_page_size & (sys_page_size - 1)) == 0);
414   slice_config_init (&allocator->config);
415   allocator->min_page_size = sys_page_size;
416 #if HAVE_POSIX_MEMALIGN || HAVE_MEMALIGN
417   /* allow allocation of pages up to 8KB (with 8KB alignment).
418    * this is useful because many medium to large sized structures
419    * fit less than 8 times (see [4]) into 4KB pages.
420    * we allow very small page sizes here, to reduce wastage in
421    * threads if only small allocations are required (this does
422    * bear the risk of increasing allocation times and fragmentation
423    * though).
424    */
425   allocator->min_page_size = MAX (allocator->min_page_size, 4096);
426   allocator->max_page_size = MAX (allocator->min_page_size, 8192);
427   allocator->min_page_size = MIN (allocator->min_page_size, 128);
428 #else
429   /* we can only align to system page size */
430   allocator->max_page_size = sys_page_size;
431 #endif
432   if (allocator->config.always_malloc)
433     {
434       allocator->contention_counters = NULL;
435       allocator->magazines = NULL;
436       allocator->slab_stack = NULL;
437     }
438   else
439     {
440       allocator->contention_counters = g_new0 (guint, MAX_SLAB_INDEX (allocator));
441       allocator->magazines = g_new0 (ChunkLink*, MAX_SLAB_INDEX (allocator));
442       allocator->slab_stack = g_new0 (SlabInfo*, MAX_SLAB_INDEX (allocator));
443     }
444 
445   allocator->mutex_counter = 0;
446   allocator->stamp_counter = MAX_STAMP_COUNTER; /* force initial update */
447   allocator->last_stamp = 0;
448   allocator->color_accu = 0;
449   magazine_cache_update_stamp();
450   /* values cached for performance reasons */
451   allocator->max_slab_chunk_size_for_magazine_cache = MAX_SLAB_CHUNK_SIZE (allocator);
452   if (allocator->config.always_malloc || allocator->config.bypass_magazines)
453     allocator->max_slab_chunk_size_for_magazine_cache = 0;      /* non-optimized cases */
454 }
455 
456 static inline guint
allocator_categorize(gsize aligned_chunk_size)457 allocator_categorize (gsize aligned_chunk_size)
458 {
459   /* speed up the likely path */
460   if (G_LIKELY (aligned_chunk_size && aligned_chunk_size <= allocator->max_slab_chunk_size_for_magazine_cache))
461     return 1;           /* use magazine cache */
462 
463   if (!allocator->config.always_malloc &&
464       aligned_chunk_size &&
465       aligned_chunk_size <= MAX_SLAB_CHUNK_SIZE (allocator))
466     {
467       if (allocator->config.bypass_magazines)
468         return 2;       /* use slab allocator, see [2] */
469       return 1;         /* use magazine cache */
470     }
471   return 0;             /* use malloc() */
472 }
473 
474 static inline void
g_mutex_lock_a(GMutex * mutex,guint * contention_counter)475 g_mutex_lock_a (GMutex *mutex,
476                 guint  *contention_counter)
477 {
478   gboolean contention = FALSE;
479   if (!g_mutex_trylock (mutex))
480     {
481       g_mutex_lock (mutex);
482       contention = TRUE;
483     }
484   if (contention)
485     {
486       allocator->mutex_counter++;
487       if (allocator->mutex_counter >= 1)        /* quickly adapt to contention */
488         {
489           allocator->mutex_counter = 0;
490           *contention_counter = MIN (*contention_counter + 1, MAX_MAGAZINE_SIZE);
491         }
492     }
493   else /* !contention */
494     {
495       allocator->mutex_counter--;
496       if (allocator->mutex_counter < -11)       /* moderately recover magazine sizes */
497         {
498           allocator->mutex_counter = 0;
499           *contention_counter = MAX (*contention_counter, 1) - 1;
500         }
501     }
502 }
503 
504 static inline ThreadMemory*
thread_memory_from_self(void)505 thread_memory_from_self (void)
506 {
507   ThreadMemory *tmem = g_private_get (&private_thread_memory);
508   if (G_UNLIKELY (!tmem))
509     {
510       static GMutex init_mutex;
511       guint n_magazines;
512 
513       g_mutex_lock (&init_mutex);
514       if G_UNLIKELY (sys_page_size == 0)
515         g_slice_init_nomessage ();
516       g_mutex_unlock (&init_mutex);
517 
518       n_magazines = MAX_SLAB_INDEX (allocator);
519       tmem = g_private_set_alloc0 (&private_thread_memory, sizeof (ThreadMemory) + sizeof (Magazine) * 2 * n_magazines);
520       tmem->magazine1 = (Magazine*) (tmem + 1);
521       tmem->magazine2 = &tmem->magazine1[n_magazines];
522     }
523   return tmem;
524 }
525 
526 static inline ChunkLink*
magazine_chain_pop_head(ChunkLink ** magazine_chunks)527 magazine_chain_pop_head (ChunkLink **magazine_chunks)
528 {
529   /* magazine chains are linked via ChunkLink->next.
530    * each ChunkLink->data of the toplevel chain may point to a subchain,
531    * linked via ChunkLink->next. ChunkLink->data of the subchains just
532    * contains uninitialized junk.
533    */
534   ChunkLink *chunk = (*magazine_chunks)->data;
535   if (G_UNLIKELY (chunk))
536     {
537       /* allocating from freed list */
538       (*magazine_chunks)->data = chunk->next;
539     }
540   else
541     {
542       chunk = *magazine_chunks;
543       *magazine_chunks = chunk->next;
544     }
545   return chunk;
546 }
547 
548 #if 0 /* useful for debugging */
549 static guint
550 magazine_count (ChunkLink *head)
551 {
552   guint count = 0;
553   if (!head)
554     return 0;
555   while (head)
556     {
557       ChunkLink *child = head->data;
558       count += 1;
559       for (child = head->data; child; child = child->next)
560         count += 1;
561       head = head->next;
562     }
563   return count;
564 }
565 #endif
566 
567 static inline gsize
allocator_get_magazine_threshold(Allocator * allocator,guint ix)568 allocator_get_magazine_threshold (Allocator *allocator,
569                                   guint      ix)
570 {
571   /* the magazine size calculated here has a lower bound of MIN_MAGAZINE_SIZE,
572    * which is required by the implementation. also, for moderately sized chunks
573    * (say >= 64 bytes), magazine sizes shouldn't be much smaller then the number
574    * of chunks available per page/2 to avoid excessive traffic in the magazine
575    * cache for small to medium sized structures.
576    * the upper bound of the magazine size is effectively provided by
577    * MAX_MAGAZINE_SIZE. for larger chunks, this number is scaled down so that
578    * the content of a single magazine doesn't exceed ca. 16KB.
579    */
580   gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
581   guint threshold = MAX (MIN_MAGAZINE_SIZE, allocator->max_page_size / MAX (5 * chunk_size, 5 * 32));
582   guint contention_counter = allocator->contention_counters[ix];
583   if (G_UNLIKELY (contention_counter))  /* single CPU bias */
584     {
585       /* adapt contention counter thresholds to chunk sizes */
586       contention_counter = contention_counter * 64 / chunk_size;
587       threshold = MAX (threshold, contention_counter);
588     }
589   return threshold;
590 }
591 
592 /* --- magazine cache --- */
593 static inline void
magazine_cache_update_stamp(void)594 magazine_cache_update_stamp (void)
595 {
596   if (allocator->stamp_counter >= MAX_STAMP_COUNTER)
597     {
598       gint64 now_us = g_get_real_time ();
599       allocator->last_stamp = now_us / 1000; /* milli seconds */
600       allocator->stamp_counter = 0;
601     }
602   else
603     allocator->stamp_counter++;
604 }
605 
606 static inline ChunkLink*
magazine_chain_prepare_fields(ChunkLink * magazine_chunks)607 magazine_chain_prepare_fields (ChunkLink *magazine_chunks)
608 {
609   ChunkLink *chunk1;
610   ChunkLink *chunk2;
611   ChunkLink *chunk3;
612   ChunkLink *chunk4;
613   /* checked upon initialization: mem_assert (MIN_MAGAZINE_SIZE >= 4); */
614   /* ensure a magazine with at least 4 unused data pointers */
615   chunk1 = magazine_chain_pop_head (&magazine_chunks);
616   chunk2 = magazine_chain_pop_head (&magazine_chunks);
617   chunk3 = magazine_chain_pop_head (&magazine_chunks);
618   chunk4 = magazine_chain_pop_head (&magazine_chunks);
619   chunk4->next = magazine_chunks;
620   chunk3->next = chunk4;
621   chunk2->next = chunk3;
622   chunk1->next = chunk2;
623   return chunk1;
624 }
625 
626 /* access the first 3 fields of a specially prepared magazine chain */
627 #define magazine_chain_prev(mc)         ((mc)->data)
628 #define magazine_chain_stamp(mc)        ((mc)->next->data)
629 #define magazine_chain_uint_stamp(mc)   GPOINTER_TO_UINT ((mc)->next->data)
630 #define magazine_chain_next(mc)         ((mc)->next->next->data)
631 #define magazine_chain_count(mc)        ((mc)->next->next->next->data)
632 
633 static void
magazine_cache_trim(Allocator * allocator,guint ix,guint stamp)634 magazine_cache_trim (Allocator *allocator,
635                      guint      ix,
636                      guint      stamp)
637 {
638   /* g_mutex_lock (allocator->mutex); done by caller */
639   /* trim magazine cache from tail */
640   ChunkLink *current = magazine_chain_prev (allocator->magazines[ix]);
641   ChunkLink *trash = NULL;
642   while (!G_APPROX_VALUE(stamp, magazine_chain_uint_stamp (current),
643                          allocator->config.working_set_msecs))
644     {
645       /* unlink */
646       ChunkLink *prev = magazine_chain_prev (current);
647       ChunkLink *next = magazine_chain_next (current);
648       magazine_chain_next (prev) = next;
649       magazine_chain_prev (next) = prev;
650       /* clear special fields, put on trash stack */
651       magazine_chain_next (current) = NULL;
652       magazine_chain_count (current) = NULL;
653       magazine_chain_stamp (current) = NULL;
654       magazine_chain_prev (current) = trash;
655       trash = current;
656       /* fixup list head if required */
657       if (current == allocator->magazines[ix])
658         {
659           allocator->magazines[ix] = NULL;
660           break;
661         }
662       current = prev;
663     }
664   g_mutex_unlock (&allocator->magazine_mutex);
665   /* free trash */
666   if (trash)
667     {
668       const gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
669       g_mutex_lock (&allocator->slab_mutex);
670       while (trash)
671         {
672           current = trash;
673           trash = magazine_chain_prev (current);
674           magazine_chain_prev (current) = NULL; /* clear special field */
675           while (current)
676             {
677               ChunkLink *chunk = magazine_chain_pop_head (&current);
678               slab_allocator_free_chunk (chunk_size, chunk);
679             }
680         }
681       g_mutex_unlock (&allocator->slab_mutex);
682     }
683 }
684 
685 static void
magazine_cache_push_magazine(guint ix,ChunkLink * magazine_chunks,gsize count)686 magazine_cache_push_magazine (guint      ix,
687                               ChunkLink *magazine_chunks,
688                               gsize      count) /* must be >= MIN_MAGAZINE_SIZE */
689 {
690   ChunkLink *current = magazine_chain_prepare_fields (magazine_chunks);
691   ChunkLink *next, *prev;
692   g_mutex_lock (&allocator->magazine_mutex);
693   /* add magazine at head */
694   next = allocator->magazines[ix];
695   if (next)
696     prev = magazine_chain_prev (next);
697   else
698     next = prev = current;
699   magazine_chain_next (prev) = current;
700   magazine_chain_prev (next) = current;
701   magazine_chain_prev (current) = prev;
702   magazine_chain_next (current) = next;
703   magazine_chain_count (current) = (gpointer) count;
704   /* stamp magazine */
705   magazine_cache_update_stamp();
706   magazine_chain_stamp (current) = GUINT_TO_POINTER (allocator->last_stamp);
707   allocator->magazines[ix] = current;
708   /* free old magazines beyond a certain threshold */
709   magazine_cache_trim (allocator, ix, allocator->last_stamp);
710   /* g_mutex_unlock (allocator->mutex); was done by magazine_cache_trim() */
711 }
712 
713 static ChunkLink*
magazine_cache_pop_magazine(guint ix,gsize * countp)714 magazine_cache_pop_magazine (guint  ix,
715                              gsize *countp)
716 {
717   g_mutex_lock_a (&allocator->magazine_mutex, &allocator->contention_counters[ix]);
718   if (!allocator->magazines[ix])
719     {
720       guint magazine_threshold = allocator_get_magazine_threshold (allocator, ix);
721       gsize i, chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
722       ChunkLink *chunk, *head;
723       g_mutex_unlock (&allocator->magazine_mutex);
724       g_mutex_lock (&allocator->slab_mutex);
725       head = slab_allocator_alloc_chunk (chunk_size);
726       head->data = NULL;
727       chunk = head;
728       for (i = 1; i < magazine_threshold; i++)
729         {
730           chunk->next = slab_allocator_alloc_chunk (chunk_size);
731           chunk = chunk->next;
732           chunk->data = NULL;
733         }
734       chunk->next = NULL;
735       g_mutex_unlock (&allocator->slab_mutex);
736       *countp = i;
737       return head;
738     }
739   else
740     {
741       ChunkLink *current = allocator->magazines[ix];
742       ChunkLink *prev = magazine_chain_prev (current);
743       ChunkLink *next = magazine_chain_next (current);
744       /* unlink */
745       magazine_chain_next (prev) = next;
746       magazine_chain_prev (next) = prev;
747       allocator->magazines[ix] = next == current ? NULL : next;
748       g_mutex_unlock (&allocator->magazine_mutex);
749       /* clear special fields and hand out */
750       *countp = (gsize) magazine_chain_count (current);
751       magazine_chain_prev (current) = NULL;
752       magazine_chain_next (current) = NULL;
753       magazine_chain_count (current) = NULL;
754       magazine_chain_stamp (current) = NULL;
755       return current;
756     }
757 }
758 
759 /* --- thread magazines --- */
760 static void
private_thread_memory_cleanup(gpointer data)761 private_thread_memory_cleanup (gpointer data)
762 {
763   ThreadMemory *tmem = data;
764   const guint n_magazines = MAX_SLAB_INDEX (allocator);
765   guint ix;
766   for (ix = 0; ix < n_magazines; ix++)
767     {
768       Magazine *mags[2];
769       guint j;
770       mags[0] = &tmem->magazine1[ix];
771       mags[1] = &tmem->magazine2[ix];
772       for (j = 0; j < 2; j++)
773         {
774           Magazine *mag = mags[j];
775           if (mag->count >= MIN_MAGAZINE_SIZE)
776             magazine_cache_push_magazine (ix, mag->chunks, mag->count);
777           else
778             {
779               const gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
780               g_mutex_lock (&allocator->slab_mutex);
781               while (mag->chunks)
782                 {
783                   ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
784                   slab_allocator_free_chunk (chunk_size, chunk);
785                 }
786               g_mutex_unlock (&allocator->slab_mutex);
787             }
788         }
789     }
790   g_free (tmem);
791 }
792 
793 static void
thread_memory_magazine1_reload(ThreadMemory * tmem,guint ix)794 thread_memory_magazine1_reload (ThreadMemory *tmem,
795                                 guint         ix)
796 {
797   Magazine *mag = &tmem->magazine1[ix];
798   mem_assert (mag->chunks == NULL); /* ensure that we may reset mag->count */
799   mag->count = 0;
800   mag->chunks = magazine_cache_pop_magazine (ix, &mag->count);
801 }
802 
803 static void
thread_memory_magazine2_unload(ThreadMemory * tmem,guint ix)804 thread_memory_magazine2_unload (ThreadMemory *tmem,
805                                 guint         ix)
806 {
807   Magazine *mag = &tmem->magazine2[ix];
808   magazine_cache_push_magazine (ix, mag->chunks, mag->count);
809   mag->chunks = NULL;
810   mag->count = 0;
811 }
812 
813 static inline void
thread_memory_swap_magazines(ThreadMemory * tmem,guint ix)814 thread_memory_swap_magazines (ThreadMemory *tmem,
815                               guint         ix)
816 {
817   Magazine xmag = tmem->magazine1[ix];
818   tmem->magazine1[ix] = tmem->magazine2[ix];
819   tmem->magazine2[ix] = xmag;
820 }
821 
822 static inline gboolean
thread_memory_magazine1_is_empty(ThreadMemory * tmem,guint ix)823 thread_memory_magazine1_is_empty (ThreadMemory *tmem,
824                                   guint         ix)
825 {
826   return tmem->magazine1[ix].chunks == NULL;
827 }
828 
829 static inline gboolean
thread_memory_magazine2_is_full(ThreadMemory * tmem,guint ix)830 thread_memory_magazine2_is_full (ThreadMemory *tmem,
831                                  guint         ix)
832 {
833   return tmem->magazine2[ix].count >= allocator_get_magazine_threshold (allocator, ix);
834 }
835 
836 static inline gpointer
thread_memory_magazine1_alloc(ThreadMemory * tmem,guint ix)837 thread_memory_magazine1_alloc (ThreadMemory *tmem,
838                                guint         ix)
839 {
840   Magazine *mag = &tmem->magazine1[ix];
841   ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
842   if (G_LIKELY (mag->count > 0))
843     mag->count--;
844   return chunk;
845 }
846 
847 static inline void
thread_memory_magazine2_free(ThreadMemory * tmem,guint ix,gpointer mem)848 thread_memory_magazine2_free (ThreadMemory *tmem,
849                               guint         ix,
850                               gpointer      mem)
851 {
852   Magazine *mag = &tmem->magazine2[ix];
853   ChunkLink *chunk = mem;
854   chunk->data = NULL;
855   chunk->next = mag->chunks;
856   mag->chunks = chunk;
857   mag->count++;
858 }
859 
860 /* --- API functions --- */
861 
862 /**
863  * g_slice_new:
864  * @type: the type to allocate, typically a structure name
865  *
866  * A convenience macro to allocate a block of memory from the
867  * slice allocator.
868  *
869  * It calls g_slice_alloc() with `sizeof (@type)` and casts the
870  * returned pointer to a pointer of the given type, avoiding a type
871  * cast in the source code. Note that the underlying slice allocation
872  * mechanism can be changed with the [`G_SLICE=always-malloc`][G_SLICE]
873  * environment variable.
874  *
875  * This can never return %NULL as the minimum allocation size from
876  * `sizeof (@type)` is 1 byte.
877  *
878  * Returns: (not nullable): a pointer to the allocated block, cast to a pointer
879  *    to @type
880  *
881  * Since: 2.10
882  */
883 
884 /**
885  * g_slice_new0:
886  * @type: the type to allocate, typically a structure name
887  *
888  * A convenience macro to allocate a block of memory from the
889  * slice allocator and set the memory to 0.
890  *
891  * It calls g_slice_alloc0() with `sizeof (@type)`
892  * and casts the returned pointer to a pointer of the given type,
893  * avoiding a type cast in the source code.
894  * Note that the underlying slice allocation mechanism can
895  * be changed with the [`G_SLICE=always-malloc`][G_SLICE]
896  * environment variable.
897  *
898  * This can never return %NULL as the minimum allocation size from
899  * `sizeof (@type)` is 1 byte.
900  *
901  * Returns: (not nullable): a pointer to the allocated block, cast to a pointer
902  *    to @type
903  *
904  * Since: 2.10
905  */
906 
907 /**
908  * g_slice_dup:
909  * @type: the type to duplicate, typically a structure name
910  * @mem: (not nullable): the memory to copy into the allocated block
911  *
912  * A convenience macro to duplicate a block of memory using
913  * the slice allocator.
914  *
915  * It calls g_slice_copy() with `sizeof (@type)`
916  * and casts the returned pointer to a pointer of the given type,
917  * avoiding a type cast in the source code.
918  * Note that the underlying slice allocation mechanism can
919  * be changed with the [`G_SLICE=always-malloc`][G_SLICE]
920  * environment variable.
921  *
922  * This can never return %NULL.
923  *
924  * Returns: (not nullable): a pointer to the allocated block, cast to a pointer
925  *    to @type
926  *
927  * Since: 2.14
928  */
929 
930 /**
931  * g_slice_free:
932  * @type: the type of the block to free, typically a structure name
933  * @mem: a pointer to the block to free
934  *
935  * A convenience macro to free a block of memory that has
936  * been allocated from the slice allocator.
937  *
938  * It calls g_slice_free1() using `sizeof (type)`
939  * as the block size.
940  * Note that the exact release behaviour can be changed with the
941  * [`G_DEBUG=gc-friendly`][G_DEBUG] environment variable, also see
942  * [`G_SLICE`][G_SLICE] for related debugging options.
943  *
944  * If @mem is %NULL, this macro does nothing.
945  *
946  * Since: 2.10
947  */
948 
949 /**
950  * g_slice_free_chain:
951  * @type: the type of the @mem_chain blocks
952  * @mem_chain: a pointer to the first block of the chain
953  * @next: the field name of the next pointer in @type
954  *
955  * Frees a linked list of memory blocks of structure type @type.
956  * The memory blocks must be equal-sized, allocated via
957  * g_slice_alloc() or g_slice_alloc0() and linked together by
958  * a @next pointer (similar to #GSList). The name of the
959  * @next field in @type is passed as third argument.
960  * Note that the exact release behaviour can be changed with the
961  * [`G_DEBUG=gc-friendly`][G_DEBUG] environment variable, also see
962  * [`G_SLICE`][G_SLICE] for related debugging options.
963  *
964  * If @mem_chain is %NULL, this function does nothing.
965  *
966  * Since: 2.10
967  */
968 
969 /**
970  * g_slice_alloc:
971  * @block_size: the number of bytes to allocate
972  *
973  * Allocates a block of memory from the slice allocator.
974  * The block address handed out can be expected to be aligned
975  * to at least 1 * sizeof (void*),
976  * though in general slices are 2 * sizeof (void*) bytes aligned,
977  * if a malloc() fallback implementation is used instead,
978  * the alignment may be reduced in a libc dependent fashion.
979  * Note that the underlying slice allocation mechanism can
980  * be changed with the [`G_SLICE=always-malloc`][G_SLICE]
981  * environment variable.
982  *
983  * Returns: a pointer to the allocated memory block, which will be %NULL if and
984  *    only if @mem_size is 0
985  *
986  * Since: 2.10
987  */
988 gpointer
g_slice_alloc(gsize mem_size)989 g_slice_alloc (gsize mem_size)
990 {
991   ThreadMemory *tmem;
992   gsize chunk_size;
993   gpointer mem;
994   guint acat;
995 
996   /* This gets the private structure for this thread.  If the private
997    * structure does not yet exist, it is created.
998    *
999    * This has a side effect of causing GSlice to be initialised, so it
1000    * must come first.
1001    */
1002   tmem = thread_memory_from_self ();
1003 
1004   chunk_size = P2ALIGN (mem_size);
1005   acat = allocator_categorize (chunk_size);
1006   if (G_LIKELY (acat == 1))     /* allocate through magazine layer */
1007     {
1008       guint ix = SLAB_INDEX (allocator, chunk_size);
1009       if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
1010         {
1011           thread_memory_swap_magazines (tmem, ix);
1012           if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
1013             thread_memory_magazine1_reload (tmem, ix);
1014         }
1015       mem = thread_memory_magazine1_alloc (tmem, ix);
1016     }
1017   else if (acat == 2)           /* allocate through slab allocator */
1018     {
1019       g_mutex_lock (&allocator->slab_mutex);
1020       mem = slab_allocator_alloc_chunk (chunk_size);
1021       g_mutex_unlock (&allocator->slab_mutex);
1022     }
1023   else                          /* delegate to system malloc */
1024     mem = g_malloc (mem_size);
1025   if (G_UNLIKELY (allocator->config.debug_blocks))
1026     smc_notify_alloc (mem, mem_size);
1027 
1028   TRACE (GLIB_SLICE_ALLOC((void*)mem, mem_size));
1029 
1030   return mem;
1031 }
1032 
1033 /**
1034  * g_slice_alloc0:
1035  * @block_size: the number of bytes to allocate
1036  *
1037  * Allocates a block of memory via g_slice_alloc() and initializes
1038  * the returned memory to 0. Note that the underlying slice allocation
1039  * mechanism can be changed with the [`G_SLICE=always-malloc`][G_SLICE]
1040  * environment variable.
1041  *
1042  * Returns: a pointer to the allocated block, which will be %NULL if and only
1043  *    if @mem_size is 0
1044  *
1045  * Since: 2.10
1046  */
1047 gpointer
g_slice_alloc0(gsize mem_size)1048 g_slice_alloc0 (gsize mem_size)
1049 {
1050   gpointer mem = g_slice_alloc (mem_size);
1051   if (mem)
1052     memset (mem, 0, mem_size);
1053   return mem;
1054 }
1055 
1056 /**
1057  * g_slice_copy:
1058  * @block_size: the number of bytes to allocate
1059  * @mem_block: the memory to copy
1060  *
1061  * Allocates a block of memory from the slice allocator
1062  * and copies @block_size bytes into it from @mem_block.
1063  *
1064  * @mem_block must be non-%NULL if @block_size is non-zero.
1065  *
1066  * Returns: a pointer to the allocated memory block, which will be %NULL if and
1067  *    only if @mem_size is 0
1068  *
1069  * Since: 2.14
1070  */
1071 gpointer
g_slice_copy(gsize mem_size,gconstpointer mem_block)1072 g_slice_copy (gsize         mem_size,
1073               gconstpointer mem_block)
1074 {
1075   gpointer mem = g_slice_alloc (mem_size);
1076   if (mem)
1077     memcpy (mem, mem_block, mem_size);
1078   return mem;
1079 }
1080 
1081 /**
1082  * g_slice_free1:
1083  * @block_size: the size of the block
1084  * @mem_block: a pointer to the block to free
1085  *
1086  * Frees a block of memory.
1087  *
1088  * The memory must have been allocated via g_slice_alloc() or
1089  * g_slice_alloc0() and the @block_size has to match the size
1090  * specified upon allocation. Note that the exact release behaviour
1091  * can be changed with the [`G_DEBUG=gc-friendly`][G_DEBUG] environment
1092  * variable, also see [`G_SLICE`][G_SLICE] for related debugging options.
1093  *
1094  * If @mem_block is %NULL, this function does nothing.
1095  *
1096  * Since: 2.10
1097  */
1098 void
g_slice_free1(gsize mem_size,gpointer mem_block)1099 g_slice_free1 (gsize    mem_size,
1100                gpointer mem_block)
1101 {
1102   gsize chunk_size = P2ALIGN (mem_size);
1103   guint acat = allocator_categorize (chunk_size);
1104   if (G_UNLIKELY (!mem_block))
1105     return;
1106   if (G_UNLIKELY (allocator->config.debug_blocks) &&
1107       !smc_notify_free (mem_block, mem_size))
1108     abort();
1109   if (G_LIKELY (acat == 1))             /* allocate through magazine layer */
1110     {
1111       ThreadMemory *tmem = thread_memory_from_self();
1112       guint ix = SLAB_INDEX (allocator, chunk_size);
1113       if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1114         {
1115           thread_memory_swap_magazines (tmem, ix);
1116           if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1117             thread_memory_magazine2_unload (tmem, ix);
1118         }
1119       if (G_UNLIKELY (g_mem_gc_friendly))
1120         memset (mem_block, 0, chunk_size);
1121       thread_memory_magazine2_free (tmem, ix, mem_block);
1122     }
1123   else if (acat == 2)                   /* allocate through slab allocator */
1124     {
1125       if (G_UNLIKELY (g_mem_gc_friendly))
1126         memset (mem_block, 0, chunk_size);
1127       g_mutex_lock (&allocator->slab_mutex);
1128       slab_allocator_free_chunk (chunk_size, mem_block);
1129       g_mutex_unlock (&allocator->slab_mutex);
1130     }
1131   else                                  /* delegate to system malloc */
1132     {
1133       if (G_UNLIKELY (g_mem_gc_friendly))
1134         memset (mem_block, 0, mem_size);
1135       g_free (mem_block);
1136     }
1137   TRACE (GLIB_SLICE_FREE((void*)mem_block, mem_size));
1138 }
1139 
1140 /**
1141  * g_slice_free_chain_with_offset:
1142  * @block_size: the size of the blocks
1143  * @mem_chain:  a pointer to the first block of the chain
1144  * @next_offset: the offset of the @next field in the blocks
1145  *
1146  * Frees a linked list of memory blocks of structure type @type.
1147  *
1148  * The memory blocks must be equal-sized, allocated via
1149  * g_slice_alloc() or g_slice_alloc0() and linked together by a
1150  * @next pointer (similar to #GSList). The offset of the @next
1151  * field in each block is passed as third argument.
1152  * Note that the exact release behaviour can be changed with the
1153  * [`G_DEBUG=gc-friendly`][G_DEBUG] environment variable, also see
1154  * [`G_SLICE`][G_SLICE] for related debugging options.
1155  *
1156  * If @mem_chain is %NULL, this function does nothing.
1157  *
1158  * Since: 2.10
1159  */
1160 void
g_slice_free_chain_with_offset(gsize mem_size,gpointer mem_chain,gsize next_offset)1161 g_slice_free_chain_with_offset (gsize    mem_size,
1162                                 gpointer mem_chain,
1163                                 gsize    next_offset)
1164 {
1165   gpointer slice = mem_chain;
1166   /* while the thread magazines and the magazine cache are implemented so that
1167    * they can easily be extended to allow for free lists containing more free
1168    * lists for the first level nodes, which would allow O(1) freeing in this
1169    * function, the benefit of such an extension is questionable, because:
1170    * - the magazine size counts will become mere lower bounds which confuses
1171    *   the code adapting to lock contention;
1172    * - freeing a single node to the thread magazines is very fast, so this
1173    *   O(list_length) operation is multiplied by a fairly small factor;
1174    * - memory usage histograms on larger applications seem to indicate that
1175    *   the amount of released multi node lists is negligible in comparison
1176    *   to single node releases.
1177    * - the major performance bottle neck, namely g_private_get() or
1178    *   g_mutex_lock()/g_mutex_unlock() has already been moved out of the
1179    *   inner loop for freeing chained slices.
1180    */
1181   gsize chunk_size = P2ALIGN (mem_size);
1182   guint acat = allocator_categorize (chunk_size);
1183   if (G_LIKELY (acat == 1))             /* allocate through magazine layer */
1184     {
1185       ThreadMemory *tmem = thread_memory_from_self();
1186       guint ix = SLAB_INDEX (allocator, chunk_size);
1187       while (slice)
1188         {
1189           guint8 *current = slice;
1190           slice = *(gpointer*) (current + next_offset);
1191           if (G_UNLIKELY (allocator->config.debug_blocks) &&
1192               !smc_notify_free (current, mem_size))
1193             abort();
1194           if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1195             {
1196               thread_memory_swap_magazines (tmem, ix);
1197               if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1198                 thread_memory_magazine2_unload (tmem, ix);
1199             }
1200           if (G_UNLIKELY (g_mem_gc_friendly))
1201             memset (current, 0, chunk_size);
1202           thread_memory_magazine2_free (tmem, ix, current);
1203         }
1204     }
1205   else if (acat == 2)                   /* allocate through slab allocator */
1206     {
1207       g_mutex_lock (&allocator->slab_mutex);
1208       while (slice)
1209         {
1210           guint8 *current = slice;
1211           slice = *(gpointer*) (current + next_offset);
1212           if (G_UNLIKELY (allocator->config.debug_blocks) &&
1213               !smc_notify_free (current, mem_size))
1214             abort();
1215           if (G_UNLIKELY (g_mem_gc_friendly))
1216             memset (current, 0, chunk_size);
1217           slab_allocator_free_chunk (chunk_size, current);
1218         }
1219       g_mutex_unlock (&allocator->slab_mutex);
1220     }
1221   else                                  /* delegate to system malloc */
1222     while (slice)
1223       {
1224         guint8 *current = slice;
1225         slice = *(gpointer*) (current + next_offset);
1226         if (G_UNLIKELY (allocator->config.debug_blocks) &&
1227             !smc_notify_free (current, mem_size))
1228           abort();
1229         if (G_UNLIKELY (g_mem_gc_friendly))
1230           memset (current, 0, mem_size);
1231         g_free (current);
1232       }
1233 }
1234 
1235 /* --- single page allocator --- */
1236 static void
allocator_slab_stack_push(Allocator * allocator,guint ix,SlabInfo * sinfo)1237 allocator_slab_stack_push (Allocator *allocator,
1238                            guint      ix,
1239                            SlabInfo  *sinfo)
1240 {
1241   /* insert slab at slab ring head */
1242   if (!allocator->slab_stack[ix])
1243     {
1244       sinfo->next = sinfo;
1245       sinfo->prev = sinfo;
1246     }
1247   else
1248     {
1249       SlabInfo *next = allocator->slab_stack[ix], *prev = next->prev;
1250       next->prev = sinfo;
1251       prev->next = sinfo;
1252       sinfo->next = next;
1253       sinfo->prev = prev;
1254     }
1255   allocator->slab_stack[ix] = sinfo;
1256 }
1257 
1258 static gsize
allocator_aligned_page_size(Allocator * allocator,gsize n_bytes)1259 allocator_aligned_page_size (Allocator *allocator,
1260                              gsize      n_bytes)
1261 {
1262   gsize val = 1 << g_bit_storage (n_bytes - 1);
1263   val = MAX (val, allocator->min_page_size);
1264   return val;
1265 }
1266 
1267 static void
allocator_add_slab(Allocator * allocator,guint ix,gsize chunk_size)1268 allocator_add_slab (Allocator *allocator,
1269                     guint      ix,
1270                     gsize      chunk_size)
1271 {
1272   ChunkLink *chunk;
1273   SlabInfo *sinfo;
1274   gsize addr, padding, n_chunks, color = 0;
1275   gsize page_size;
1276   int errsv;
1277   gpointer aligned_memory;
1278   guint8 *mem;
1279   guint i;
1280 
1281   page_size = allocator_aligned_page_size (allocator, SLAB_BPAGE_SIZE (allocator, chunk_size));
1282   /* allocate 1 page for the chunks and the slab */
1283   aligned_memory = allocator_memalign (page_size, page_size - NATIVE_MALLOC_PADDING);
1284   errsv = errno;
1285   mem = aligned_memory;
1286 
1287   if (!mem)
1288     {
1289       const gchar *syserr = strerror (errsv);
1290       mem_error ("failed to allocate %u bytes (alignment: %u): %s\n",
1291                  (guint) (page_size - NATIVE_MALLOC_PADDING), (guint) page_size, syserr);
1292     }
1293   /* mask page address */
1294   addr = ((gsize) mem / page_size) * page_size;
1295   /* assert alignment */
1296   mem_assert (aligned_memory == (gpointer) addr);
1297   /* basic slab info setup */
1298   sinfo = (SlabInfo*) (mem + page_size - SLAB_INFO_SIZE);
1299   sinfo->n_allocated = 0;
1300   sinfo->chunks = NULL;
1301   /* figure cache colorization */
1302   n_chunks = ((guint8*) sinfo - mem) / chunk_size;
1303   padding = ((guint8*) sinfo - mem) - n_chunks * chunk_size;
1304   if (padding)
1305     {
1306       color = (allocator->color_accu * P2ALIGNMENT) % padding;
1307       allocator->color_accu += allocator->config.color_increment;
1308     }
1309   /* add chunks to free list */
1310   chunk = (ChunkLink*) (mem + color);
1311   sinfo->chunks = chunk;
1312   for (i = 0; i < n_chunks - 1; i++)
1313     {
1314       chunk->next = (ChunkLink*) ((guint8*) chunk + chunk_size);
1315       chunk = chunk->next;
1316     }
1317   chunk->next = NULL;   /* last chunk */
1318   /* add slab to slab ring */
1319   allocator_slab_stack_push (allocator, ix, sinfo);
1320 }
1321 
1322 static gpointer
slab_allocator_alloc_chunk(gsize chunk_size)1323 slab_allocator_alloc_chunk (gsize chunk_size)
1324 {
1325   ChunkLink *chunk;
1326   guint ix = SLAB_INDEX (allocator, chunk_size);
1327   /* ensure non-empty slab */
1328   if (!allocator->slab_stack[ix] || !allocator->slab_stack[ix]->chunks)
1329     allocator_add_slab (allocator, ix, chunk_size);
1330   /* allocate chunk */
1331   chunk = allocator->slab_stack[ix]->chunks;
1332   allocator->slab_stack[ix]->chunks = chunk->next;
1333   allocator->slab_stack[ix]->n_allocated++;
1334   /* rotate empty slabs */
1335   if (!allocator->slab_stack[ix]->chunks)
1336     allocator->slab_stack[ix] = allocator->slab_stack[ix]->next;
1337   return chunk;
1338 }
1339 
1340 static void
slab_allocator_free_chunk(gsize chunk_size,gpointer mem)1341 slab_allocator_free_chunk (gsize    chunk_size,
1342                            gpointer mem)
1343 {
1344   ChunkLink *chunk;
1345   gboolean was_empty;
1346   guint ix = SLAB_INDEX (allocator, chunk_size);
1347   gsize page_size = allocator_aligned_page_size (allocator, SLAB_BPAGE_SIZE (allocator, chunk_size));
1348   gsize addr = ((gsize) mem / page_size) * page_size;
1349   /* mask page address */
1350   guint8 *page = (guint8*) addr;
1351   SlabInfo *sinfo = (SlabInfo*) (page + page_size - SLAB_INFO_SIZE);
1352   /* assert valid chunk count */
1353   mem_assert (sinfo->n_allocated > 0);
1354   /* add chunk to free list */
1355   was_empty = sinfo->chunks == NULL;
1356   chunk = (ChunkLink*) mem;
1357   chunk->next = sinfo->chunks;
1358   sinfo->chunks = chunk;
1359   sinfo->n_allocated--;
1360   /* keep slab ring partially sorted, empty slabs at end */
1361   if (was_empty)
1362     {
1363       /* unlink slab */
1364       SlabInfo *next = sinfo->next, *prev = sinfo->prev;
1365       next->prev = prev;
1366       prev->next = next;
1367       if (allocator->slab_stack[ix] == sinfo)
1368         allocator->slab_stack[ix] = next == sinfo ? NULL : next;
1369       /* insert slab at head */
1370       allocator_slab_stack_push (allocator, ix, sinfo);
1371     }
1372   /* eagerly free complete unused slabs */
1373   if (!sinfo->n_allocated)
1374     {
1375       /* unlink slab */
1376       SlabInfo *next = sinfo->next, *prev = sinfo->prev;
1377       next->prev = prev;
1378       prev->next = next;
1379       if (allocator->slab_stack[ix] == sinfo)
1380         allocator->slab_stack[ix] = next == sinfo ? NULL : next;
1381       /* free slab */
1382       allocator_memfree (page_size, page);
1383     }
1384 }
1385 
1386 /* --- memalign implementation --- */
1387 #ifdef HAVE_MALLOC_H
1388 #include <malloc.h>             /* memalign() */
1389 #endif
1390 
1391 /* from config.h:
1392  * define HAVE_POSIX_MEMALIGN           1 // if free(posix_memalign(3)) works, <stdlib.h>
1393  * define HAVE_MEMALIGN                 1 // if free(memalign(3)) works, <malloc.h>
1394  * define HAVE_VALLOC                   1 // if free(valloc(3)) works, <stdlib.h> or <malloc.h>
1395  * if none is provided, we implement malloc(3)-based alloc-only page alignment
1396  */
1397 
1398 #if !(HAVE_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC)
1399 G_GNUC_BEGIN_IGNORE_DEPRECATIONS
1400 static GTrashStack *compat_valloc_trash = NULL;
1401 G_GNUC_END_IGNORE_DEPRECATIONS
1402 #endif
1403 
1404 static gpointer
allocator_memalign(gsize alignment,gsize memsize)1405 allocator_memalign (gsize alignment,
1406                     gsize memsize)
1407 {
1408   gpointer aligned_memory = NULL;
1409   gint err = ENOMEM;
1410 #if     HAVE_POSIX_MEMALIGN
1411   err = posix_memalign (&aligned_memory, alignment, memsize);
1412 #elif   HAVE_MEMALIGN
1413   errno = 0;
1414   aligned_memory = memalign (alignment, memsize);
1415   err = errno;
1416 #elif   HAVE_VALLOC
1417   errno = 0;
1418   aligned_memory = valloc (memsize);
1419   err = errno;
1420 #else
1421   /* simplistic non-freeing page allocator */
1422   mem_assert (alignment == sys_page_size);
1423   mem_assert (memsize <= sys_page_size);
1424   if (!compat_valloc_trash)
1425     {
1426       const guint n_pages = 16;
1427       guint8 *mem = malloc (n_pages * sys_page_size);
1428       err = errno;
1429       if (mem)
1430         {
1431           gint i = n_pages;
1432           guint8 *amem = (guint8*) ALIGN ((gsize) mem, sys_page_size);
1433           if (amem != mem)
1434             i--;        /* mem wasn't page aligned */
1435           G_GNUC_BEGIN_IGNORE_DEPRECATIONS
1436           while (--i >= 0)
1437             g_trash_stack_push (&compat_valloc_trash, amem + i * sys_page_size);
1438           G_GNUC_END_IGNORE_DEPRECATIONS
1439         }
1440     }
1441   G_GNUC_BEGIN_IGNORE_DEPRECATIONS
1442   aligned_memory = g_trash_stack_pop (&compat_valloc_trash);
1443   G_GNUC_END_IGNORE_DEPRECATIONS
1444 #endif
1445   if (!aligned_memory)
1446     errno = err;
1447   return aligned_memory;
1448 }
1449 
1450 static void
allocator_memfree(gsize memsize,gpointer mem)1451 allocator_memfree (gsize    memsize,
1452                    gpointer mem)
1453 {
1454 #if     HAVE_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC
1455   free (mem);
1456 #else
1457   mem_assert (memsize <= sys_page_size);
1458   G_GNUC_BEGIN_IGNORE_DEPRECATIONS
1459   g_trash_stack_push (&compat_valloc_trash, mem);
1460   G_GNUC_END_IGNORE_DEPRECATIONS
1461 #endif
1462 }
1463 
1464 static void
mem_error(const char * format,...)1465 mem_error (const char *format,
1466            ...)
1467 {
1468   const char *pname;
1469   va_list args;
1470   /* at least, put out "MEMORY-ERROR", in case we segfault during the rest of the function */
1471   fputs ("\n***MEMORY-ERROR***: ", stderr);
1472   pname = g_get_prgname();
1473   g_fprintf (stderr, "%s[%ld]: GSlice: ", pname ? pname : "", (long)getpid());
1474   va_start (args, format);
1475   g_vfprintf (stderr, format, args);
1476   va_end (args);
1477   fputs ("\n", stderr);
1478   abort();
1479   _exit (1);
1480 }
1481 
1482 /* --- g-slice memory checker tree --- */
1483 typedef size_t SmcKType;                /* key type */
1484 typedef size_t SmcVType;                /* value type */
1485 typedef struct {
1486   SmcKType key;
1487   SmcVType value;
1488 } SmcEntry;
1489 static void             smc_tree_insert      (SmcKType  key,
1490                                               SmcVType  value);
1491 static gboolean         smc_tree_lookup      (SmcKType  key,
1492                                               SmcVType *value_p);
1493 static gboolean         smc_tree_remove      (SmcKType  key);
1494 
1495 
1496 /* --- g-slice memory checker implementation --- */
1497 static void
smc_notify_alloc(void * pointer,size_t size)1498 smc_notify_alloc (void   *pointer,
1499                   size_t  size)
1500 {
1501   size_t address = (size_t) pointer;
1502   if (pointer)
1503     smc_tree_insert (address, size);
1504 }
1505 
1506 #if 0
1507 static void
1508 smc_notify_ignore (void *pointer)
1509 {
1510   size_t address = (size_t) pointer;
1511   if (pointer)
1512     smc_tree_remove (address);
1513 }
1514 #endif
1515 
1516 static int
smc_notify_free(void * pointer,size_t size)1517 smc_notify_free (void   *pointer,
1518                  size_t  size)
1519 {
1520   size_t address = (size_t) pointer;
1521   SmcVType real_size;
1522   gboolean found_one;
1523 
1524   if (!pointer)
1525     return 1; /* ignore */
1526   found_one = smc_tree_lookup (address, &real_size);
1527   if (!found_one)
1528     {
1529       g_fprintf (stderr, "GSlice: MemChecker: attempt to release non-allocated block: %p size=%" G_GSIZE_FORMAT "\n", pointer, size);
1530       return 0;
1531     }
1532   if (real_size != size && (real_size || size))
1533     {
1534       g_fprintf (stderr, "GSlice: MemChecker: attempt to release block with invalid size: %p size=%" G_GSIZE_FORMAT " invalid-size=%" G_GSIZE_FORMAT "\n", pointer, real_size, size);
1535       return 0;
1536     }
1537   if (!smc_tree_remove (address))
1538     {
1539       g_fprintf (stderr, "GSlice: MemChecker: attempt to release non-allocated block: %p size=%" G_GSIZE_FORMAT "\n", pointer, size);
1540       return 0;
1541     }
1542   return 1; /* all fine */
1543 }
1544 
1545 /* --- g-slice memory checker tree implementation --- */
1546 #define SMC_TRUNK_COUNT     (4093 /* 16381 */)          /* prime, to distribute trunk collisions (big, allocated just once) */
1547 #define SMC_BRANCH_COUNT    (511)                       /* prime, to distribute branch collisions */
1548 #define SMC_TRUNK_EXTENT    (SMC_BRANCH_COUNT * 2039)   /* key address space per trunk, should distribute uniformly across BRANCH_COUNT */
1549 #define SMC_TRUNK_HASH(k)   ((k / SMC_TRUNK_EXTENT) % SMC_TRUNK_COUNT)  /* generate new trunk hash per megabyte (roughly) */
1550 #define SMC_BRANCH_HASH(k)  (k % SMC_BRANCH_COUNT)
1551 
1552 typedef struct {
1553   SmcEntry    *entries;
1554   unsigned int n_entries;
1555 } SmcBranch;
1556 
1557 static SmcBranch     **smc_tree_root = NULL;
1558 
1559 static void
smc_tree_abort(int errval)1560 smc_tree_abort (int errval)
1561 {
1562   const char *syserr = strerror (errval);
1563   mem_error ("MemChecker: failure in debugging tree: %s", syserr);
1564 }
1565 
1566 static inline SmcEntry*
smc_tree_branch_grow_L(SmcBranch * branch,unsigned int index)1567 smc_tree_branch_grow_L (SmcBranch   *branch,
1568                         unsigned int index)
1569 {
1570   unsigned int old_size = branch->n_entries * sizeof (branch->entries[0]);
1571   unsigned int new_size = old_size + sizeof (branch->entries[0]);
1572   SmcEntry *entry;
1573   mem_assert (index <= branch->n_entries);
1574   branch->entries = (SmcEntry*) realloc (branch->entries, new_size);
1575   if (!branch->entries)
1576     smc_tree_abort (errno);
1577   entry = branch->entries + index;
1578   memmove (entry + 1, entry, (branch->n_entries - index) * sizeof (entry[0]));
1579   branch->n_entries += 1;
1580   return entry;
1581 }
1582 
1583 static inline SmcEntry*
smc_tree_branch_lookup_nearest_L(SmcBranch * branch,SmcKType key)1584 smc_tree_branch_lookup_nearest_L (SmcBranch *branch,
1585                                   SmcKType   key)
1586 {
1587   unsigned int n_nodes = branch->n_entries, offs = 0;
1588   SmcEntry *check = branch->entries;
1589   int cmp = 0;
1590   while (offs < n_nodes)
1591     {
1592       unsigned int i = (offs + n_nodes) >> 1;
1593       check = branch->entries + i;
1594       cmp = key < check->key ? -1 : key != check->key;
1595       if (cmp == 0)
1596         return check;                   /* return exact match */
1597       else if (cmp < 0)
1598         n_nodes = i;
1599       else /* (cmp > 0) */
1600         offs = i + 1;
1601     }
1602   /* check points at last mismatch, cmp > 0 indicates greater key */
1603   return cmp > 0 ? check + 1 : check;   /* return insertion position for inexact match */
1604 }
1605 
1606 static void
smc_tree_insert(SmcKType key,SmcVType value)1607 smc_tree_insert (SmcKType key,
1608                  SmcVType value)
1609 {
1610   unsigned int ix0, ix1;
1611   SmcEntry *entry;
1612 
1613   g_mutex_lock (&smc_tree_mutex);
1614   ix0 = SMC_TRUNK_HASH (key);
1615   ix1 = SMC_BRANCH_HASH (key);
1616   if (!smc_tree_root)
1617     {
1618       smc_tree_root = calloc (SMC_TRUNK_COUNT, sizeof (smc_tree_root[0]));
1619       if (!smc_tree_root)
1620         smc_tree_abort (errno);
1621     }
1622   if (!smc_tree_root[ix0])
1623     {
1624       smc_tree_root[ix0] = calloc (SMC_BRANCH_COUNT, sizeof (smc_tree_root[0][0]));
1625       if (!smc_tree_root[ix0])
1626         smc_tree_abort (errno);
1627     }
1628   entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1629   if (!entry ||                                                                         /* need create */
1630       entry >= smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries ||   /* need append */
1631       entry->key != key)                                                                /* need insert */
1632     entry = smc_tree_branch_grow_L (&smc_tree_root[ix0][ix1], entry - smc_tree_root[ix0][ix1].entries);
1633   entry->key = key;
1634   entry->value = value;
1635   g_mutex_unlock (&smc_tree_mutex);
1636 }
1637 
1638 static gboolean
smc_tree_lookup(SmcKType key,SmcVType * value_p)1639 smc_tree_lookup (SmcKType  key,
1640                  SmcVType *value_p)
1641 {
1642   SmcEntry *entry = NULL;
1643   unsigned int ix0 = SMC_TRUNK_HASH (key), ix1 = SMC_BRANCH_HASH (key);
1644   gboolean found_one = FALSE;
1645   *value_p = 0;
1646   g_mutex_lock (&smc_tree_mutex);
1647   if (smc_tree_root && smc_tree_root[ix0])
1648     {
1649       entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1650       if (entry &&
1651           entry < smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries &&
1652           entry->key == key)
1653         {
1654           found_one = TRUE;
1655           *value_p = entry->value;
1656         }
1657     }
1658   g_mutex_unlock (&smc_tree_mutex);
1659   return found_one;
1660 }
1661 
1662 static gboolean
smc_tree_remove(SmcKType key)1663 smc_tree_remove (SmcKType key)
1664 {
1665   unsigned int ix0 = SMC_TRUNK_HASH (key), ix1 = SMC_BRANCH_HASH (key);
1666   gboolean found_one = FALSE;
1667   g_mutex_lock (&smc_tree_mutex);
1668   if (smc_tree_root && smc_tree_root[ix0])
1669     {
1670       SmcEntry *entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1671       if (entry &&
1672           entry < smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries &&
1673           entry->key == key)
1674         {
1675           unsigned int i = entry - smc_tree_root[ix0][ix1].entries;
1676           smc_tree_root[ix0][ix1].n_entries -= 1;
1677           memmove (entry, entry + 1, (smc_tree_root[ix0][ix1].n_entries - i) * sizeof (entry[0]));
1678           if (!smc_tree_root[ix0][ix1].n_entries)
1679             {
1680               /* avoid useless pressure on the memory system */
1681               free (smc_tree_root[ix0][ix1].entries);
1682               smc_tree_root[ix0][ix1].entries = NULL;
1683             }
1684           found_one = TRUE;
1685         }
1686     }
1687   g_mutex_unlock (&smc_tree_mutex);
1688   return found_one;
1689 }
1690 
1691 #ifdef G_ENABLE_DEBUG
1692 void
g_slice_debug_tree_statistics(void)1693 g_slice_debug_tree_statistics (void)
1694 {
1695   g_mutex_lock (&smc_tree_mutex);
1696   if (smc_tree_root)
1697     {
1698       unsigned int i, j, t = 0, o = 0, b = 0, su = 0, ex = 0, en = 4294967295u;
1699       double tf, bf;
1700       for (i = 0; i < SMC_TRUNK_COUNT; i++)
1701         if (smc_tree_root[i])
1702           {
1703             t++;
1704             for (j = 0; j < SMC_BRANCH_COUNT; j++)
1705               if (smc_tree_root[i][j].n_entries)
1706                 {
1707                   b++;
1708                   su += smc_tree_root[i][j].n_entries;
1709                   en = MIN (en, smc_tree_root[i][j].n_entries);
1710                   ex = MAX (ex, smc_tree_root[i][j].n_entries);
1711                 }
1712               else if (smc_tree_root[i][j].entries)
1713                 o++; /* formerly used, now empty */
1714           }
1715       en = b ? en : 0;
1716       tf = MAX (t, 1.0); /* max(1) to be a valid divisor */
1717       bf = MAX (b, 1.0); /* max(1) to be a valid divisor */
1718       g_fprintf (stderr, "GSlice: MemChecker: %u trunks, %u branches, %u old branches\n", t, b, o);
1719       g_fprintf (stderr, "GSlice: MemChecker: %f branches per trunk, %.2f%% utilization\n",
1720                b / tf,
1721                100.0 - (SMC_BRANCH_COUNT - b / tf) / (0.01 * SMC_BRANCH_COUNT));
1722       g_fprintf (stderr, "GSlice: MemChecker: %f entries per branch, %u minimum, %u maximum\n",
1723                su / bf, en, ex);
1724     }
1725   else
1726     g_fprintf (stderr, "GSlice: MemChecker: root=NULL\n");
1727   g_mutex_unlock (&smc_tree_mutex);
1728 
1729   /* sample statistics (beast + GSLice + 24h scripted core & GUI activity):
1730    *  PID %CPU %MEM   VSZ  RSS      COMMAND
1731    * 8887 30.3 45.8 456068 414856   beast-0.7.1 empty.bse
1732    * $ cat /proc/8887/statm # total-program-size resident-set-size shared-pages text/code data/stack library dirty-pages
1733    * 114017 103714 2354 344 0 108676 0
1734    * $ cat /proc/8887/status
1735    * Name:   beast-0.7.1
1736    * VmSize:   456068 kB
1737    * VmLck:         0 kB
1738    * VmRSS:    414856 kB
1739    * VmData:   434620 kB
1740    * VmStk:        84 kB
1741    * VmExe:      1376 kB
1742    * VmLib:     13036 kB
1743    * VmPTE:       456 kB
1744    * Threads:        3
1745    * (gdb) print g_slice_debug_tree_statistics ()
1746    * GSlice: MemChecker: 422 trunks, 213068 branches, 0 old branches
1747    * GSlice: MemChecker: 504.900474 branches per trunk, 98.81% utilization
1748    * GSlice: MemChecker: 4.965039 entries per branch, 1 minimum, 37 maximum
1749    */
1750 }
1751 #endif /* G_ENABLE_DEBUG */
1752