1 #define JEMALLOC_BASE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3
4 /******************************************************************************/
5 /* Data. */
6
7 static malloc_mutex_t base_mtx;
8 static size_t base_extent_sn_next;
9 static extent_tree_t base_avail_szsnad;
10 static extent_node_t *base_nodes;
11 static size_t base_allocated;
12 static size_t base_resident;
13 static size_t base_mapped;
14
15 /******************************************************************************/
16
17 static extent_node_t *
base_node_try_alloc(tsdn_t * tsdn)18 base_node_try_alloc(tsdn_t *tsdn)
19 {
20 extent_node_t *node;
21
22 malloc_mutex_assert_owner(tsdn, &base_mtx);
23
24 if (base_nodes == NULL)
25 return (NULL);
26 node = base_nodes;
27 base_nodes = *(extent_node_t **)node;
28 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
29 return (node);
30 }
31
32 static void
base_node_dalloc(tsdn_t * tsdn,extent_node_t * node)33 base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
34 {
35
36 malloc_mutex_assert_owner(tsdn, &base_mtx);
37
38 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
39 *(extent_node_t **)node = base_nodes;
40 base_nodes = node;
41 }
42
43 static void
base_extent_node_init(extent_node_t * node,void * addr,size_t size)44 base_extent_node_init(extent_node_t *node, void *addr, size_t size)
45 {
46 size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1;
47
48 extent_node_init(node, NULL, addr, size, sn, true, true);
49 }
50
51 static extent_node_t *
base_chunk_alloc(tsdn_t * tsdn,size_t minsize)52 base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
53 {
54 extent_node_t *node;
55 size_t csize, nsize;
56 void *addr;
57
58 malloc_mutex_assert_owner(tsdn, &base_mtx);
59 assert(minsize != 0);
60 node = base_node_try_alloc(tsdn);
61 /* Allocate enough space to also carve a node out if necessary. */
62 nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
63 csize = CHUNK_CEILING(minsize + nsize);
64 addr = chunk_alloc_base(csize);
65 if (addr == NULL) {
66 if (node != NULL)
67 base_node_dalloc(tsdn, node);
68 return (NULL);
69 }
70 base_mapped += csize;
71 if (node == NULL) {
72 node = (extent_node_t *)addr;
73 addr = (void *)((uintptr_t)addr + nsize);
74 csize -= nsize;
75 if (config_stats) {
76 base_allocated += nsize;
77 base_resident += PAGE_CEILING(nsize);
78 }
79 }
80 base_extent_node_init(node, addr, csize);
81 return (node);
82 }
83
84 /*
85 * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
86 * sparse data structures such as radix tree nodes efficient with respect to
87 * physical memory usage.
88 */
89 void *
base_alloc(tsdn_t * tsdn,size_t size)90 base_alloc(tsdn_t *tsdn, size_t size)
91 {
92 void *ret;
93 size_t csize, usize;
94 extent_node_t *node;
95 extent_node_t key;
96
97 /*
98 * Round size up to nearest multiple of the cacheline size, so that
99 * there is no chance of false cache line sharing.
100 */
101 csize = CACHELINE_CEILING(size);
102
103 usize = s2u(csize);
104 extent_node_init(&key, NULL, NULL, usize, 0, false, false);
105 malloc_mutex_lock(tsdn, &base_mtx);
106 node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key);
107 if (node != NULL) {
108 /* Use existing space. */
109 extent_tree_szsnad_remove(&base_avail_szsnad, node);
110 } else {
111 /* Try to allocate more space. */
112 node = base_chunk_alloc(tsdn, csize);
113 }
114 if (node == NULL) {
115 ret = NULL;
116 goto label_return;
117 }
118
119 ret = extent_node_addr_get(node);
120 if (extent_node_size_get(node) > csize) {
121 extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
122 extent_node_size_set(node, extent_node_size_get(node) - csize);
123 extent_tree_szsnad_insert(&base_avail_szsnad, node);
124 } else
125 base_node_dalloc(tsdn, node);
126 if (config_stats) {
127 base_allocated += csize;
128 /*
129 * Add one PAGE to base_resident for every page boundary that is
130 * crossed by the new allocation.
131 */
132 base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
133 PAGE_CEILING((uintptr_t)ret);
134 }
135 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
136 label_return:
137 malloc_mutex_unlock(tsdn, &base_mtx);
138 return (ret);
139 }
140
141 void
base_stats_get(tsdn_t * tsdn,size_t * allocated,size_t * resident,size_t * mapped)142 base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
143 size_t *mapped)
144 {
145
146 malloc_mutex_lock(tsdn, &base_mtx);
147 assert(base_allocated <= base_resident);
148 assert(base_resident <= base_mapped);
149 *allocated = base_allocated;
150 *resident = base_resident;
151 *mapped = base_mapped;
152 malloc_mutex_unlock(tsdn, &base_mtx);
153 }
154
155 bool
base_boot(void)156 base_boot(void)
157 {
158
159 if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
160 return (true);
161 base_extent_sn_next = 0;
162 extent_tree_szsnad_new(&base_avail_szsnad);
163 base_nodes = NULL;
164
165 return (false);
166 }
167
168 void
base_prefork(tsdn_t * tsdn)169 base_prefork(tsdn_t *tsdn)
170 {
171
172 malloc_mutex_prefork(tsdn, &base_mtx);
173 }
174
175 void
base_postfork_parent(tsdn_t * tsdn)176 base_postfork_parent(tsdn_t *tsdn)
177 {
178
179 malloc_mutex_postfork_parent(tsdn, &base_mtx);
180 }
181
182 void
base_postfork_child(tsdn_t * tsdn)183 base_postfork_child(tsdn_t *tsdn)
184 {
185
186 malloc_mutex_postfork_child(tsdn, &base_mtx);
187 }
188