• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef JEMALLOC_INTERNAL_BIN_H
2 #define JEMALLOC_INTERNAL_BIN_H
3 
4 #include "jemalloc/internal/extent_types.h"
5 #include "jemalloc/internal/extent_structs.h"
6 #include "jemalloc/internal/mutex.h"
7 #include "jemalloc/internal/bin_stats.h"
8 
9 /*
10  * A bin contains a set of extents that are currently being used for slab
11  * allocations.
12  */
13 
14 /*
15  * Read-only information associated with each element of arena_t's bins array
16  * is stored separately, partly to reduce memory usage (only one copy, rather
17  * than one per arena), but mainly to avoid false cacheline sharing.
18  *
19  * Each slab has the following layout:
20  *
21  *   /--------------------\
22  *   | region 0           |
23  *   |--------------------|
24  *   | region 1           |
25  *   |--------------------|
26  *   | ...                |
27  *   | ...                |
28  *   | ...                |
29  *   |--------------------|
30  *   | region nregs-1     |
31  *   \--------------------/
32  */
33 typedef struct bin_info_s bin_info_t;
34 struct bin_info_s {
35 	/* Size of regions in a slab for this bin's size class. */
36 	size_t			reg_size;
37 
38 	/* Total size of a slab for this bin's size class. */
39 	size_t			slab_size;
40 
41 	/* Total number of regions in a slab for this bin's size class. */
42 	uint32_t		nregs;
43 
44 	/*
45 	 * Metadata used to manipulate bitmaps for slabs associated with this
46 	 * bin.
47 	 */
48 	bitmap_info_t		bitmap_info;
49 };
50 
51 extern const bin_info_t bin_infos[NBINS];
52 
53 
54 typedef struct bin_s bin_t;
55 struct bin_s {
56 	/* All operations on bin_t fields require lock ownership. */
57 	malloc_mutex_t		lock;
58 
59 	/*
60 	 * Current slab being used to service allocations of this bin's size
61 	 * class.  slabcur is independent of slabs_{nonfull,full}; whenever
62 	 * slabcur is reassigned, the previous slab must be deallocated or
63 	 * inserted into slabs_{nonfull,full}.
64 	 */
65 	extent_t		*slabcur;
66 
67 	/*
68 	 * Heap of non-full slabs.  This heap is used to assure that new
69 	 * allocations come from the non-full slab that is oldest/lowest in
70 	 * memory.
71 	 */
72 	extent_heap_t		slabs_nonfull;
73 
74 	/* List used to track full slabs. */
75 	extent_list_t		slabs_full;
76 
77 	/* Bin statistics. */
78 	bin_stats_t	stats;
79 };
80 
81 /* Initializes a bin to empty.  Returns true on error. */
82 bool bin_init(bin_t *bin);
83 
84 /* Forking. */
85 void bin_prefork(tsdn_t *tsdn, bin_t *bin);
86 void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin);
87 void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
88 
89 /* Stats. */
90 static inline void
bin_stats_merge(tsdn_t * tsdn,bin_stats_t * dst_bin_stats,bin_t * bin)91 bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) {
92 	malloc_mutex_lock(tsdn, &bin->lock);
93 	malloc_mutex_prof_read(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
94 	dst_bin_stats->nmalloc += bin->stats.nmalloc;
95 	dst_bin_stats->ndalloc += bin->stats.ndalloc;
96 	dst_bin_stats->nrequests += bin->stats.nrequests;
97 	dst_bin_stats->curregs += bin->stats.curregs;
98 	dst_bin_stats->nfills += bin->stats.nfills;
99 	dst_bin_stats->nflushes += bin->stats.nflushes;
100 	dst_bin_stats->nslabs += bin->stats.nslabs;
101 	dst_bin_stats->reslabs += bin->stats.reslabs;
102 	dst_bin_stats->curslabs += bin->stats.curslabs;
103 	malloc_mutex_unlock(tsdn, &bin->lock);
104 }
105 
106 #endif /* JEMALLOC_INTERNAL_BIN_H */
107