1 /*
2 * Copyright (C) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <stdarg.h>
17 #include <malloc.h>
18 #include <errno.h>
19 #include <string.h>
20 #include "pthread_impl.h"
21 #include "malloc_impl.h"
22
23 #ifdef MUSL_ITERATE_AND_STATS_API
24 #define STAT_PRINTF_MAX_LEN 255
25 #define ALLOCATOR_VERSION 1
26 #define SEPARATOR_REPEATS 7
27
28 typedef void (write_cb_fun)(void *, const char *);
29
30 typedef enum {
31 TABLE, XML
32 } print_mode;
33
34 typedef struct {
35 size_t mmapped_regions;
36 size_t total_mmapped_memory;
37 size_t total_allocated_memory;
38 size_t total_allocated_heap_space;
39 } malloc_stats_t;
40
stat_printf(write_cb_fun * write_cb,void * write_cb_arg,const char * fmt,...)41 static void stat_printf(write_cb_fun *write_cb, void *write_cb_arg, const char *fmt, ...)
42 {
43 va_list args;
44 va_start(args, fmt);
45 char buf[STAT_PRINTF_MAX_LEN + 1];
46 if (vsnprintf(buf, STAT_PRINTF_MAX_LEN, fmt, args)) {
47 if (write_cb != NULL) {
48 write_cb(write_cb_arg, buf);
49 } else {
50 printf(buf);
51 }
52 } else {
53 fprintf(stderr, "Error writing to buffer");
54 }
55 va_end(args);
56 }
57
print_thread_stats_table(write_cb_fun * write_cb,void * write_cb_arg,int tid,malloc_stats_t * stats)58 static void print_thread_stats_table(
59 write_cb_fun *write_cb,
60 void *write_cb_arg,
61 int tid,
62 malloc_stats_t *stats
63 )
64 {
65 stat_printf(
66 write_cb,
67 write_cb_arg,
68 "%-11d %-23zu %-20zu %-20zu\n",
69 tid,
70 stats->total_allocated_memory,
71 stats->total_mmapped_memory,
72 stats->mmapped_regions
73 );
74 }
75
print_amount_xml(write_cb_fun * write_cb,void * write_cb_arg,const char * name,size_t value)76 static void print_amount_xml(write_cb_fun *write_cb, void *write_cb_arg, const char *name, size_t value)
77 {
78 stat_printf(write_cb, write_cb_arg, "<%s>%zu</%s>\n", name, value, name);
79 }
80
print_thread_specific_amounts_xml(write_cb_fun * write_cb,void * write_cb_arg,malloc_stats_t * stats)81 static void print_thread_specific_amounts_xml(write_cb_fun *write_cb, void *write_cb_arg, malloc_stats_t *stats)
82 {
83 print_amount_xml(write_cb, write_cb_arg, "total_allocated_memory", stats->total_allocated_memory);
84 print_amount_xml(write_cb, write_cb_arg, "total_mmapped_memory", stats->total_mmapped_memory);
85 print_amount_xml(write_cb, write_cb_arg, "mmapped_regions", stats->mmapped_regions);
86 }
87
print_thread_stats_xml(write_cb_fun * write_cb,void * write_cb_arg,int tid,malloc_stats_t * stats)88 static void print_thread_stats_xml(
89 write_cb_fun *write_cb,
90 void *write_cb_arg,
91 int tid,
92 malloc_stats_t *stats
93 )
94 {
95 stat_printf(write_cb, write_cb_arg, "<thread id=\"%d\">\n", tid);
96 print_thread_specific_amounts_xml(write_cb, write_cb_arg, stats);
97 stat_printf(write_cb, write_cb_arg, "</thread>\n");
98 }
99
add_up_chunks(occupied_bin_t * occupied_bin)100 static malloc_stats_t add_up_chunks(occupied_bin_t *occupied_bin)
101 {
102 malloc_stats_t stats = {0, 0, 0, 0};
103 for (struct chunk *c = occupied_bin->head; c != NULL; c = c->next_occupied) {
104 size_t chunk_memory = CHUNK_SIZE(c) - OVERHEAD;
105 stats.total_allocated_memory += chunk_memory;
106 if (IS_MMAPPED(c)) {
107 stats.mmapped_regions++;
108 stats.total_mmapped_memory += chunk_memory;
109 } else {
110 stats.total_allocated_heap_space += chunk_memory;
111 }
112 }
113 return stats;
114 }
115
add_up_chunks_by_threads(occupied_bin_t * occupied_bin,int tid)116 static malloc_stats_t add_up_chunks_by_threads(occupied_bin_t *occupied_bin, int tid)
117 {
118 malloc_stats_t stats = {0, 0, 0, 0};
119 for (struct chunk *c = occupied_bin->head; c != NULL; c = c->next_occupied) {
120 if (c->thread_id == tid) {
121 size_t chunk_memory = CHUNK_SIZE(c) - OVERHEAD;
122 stats.total_allocated_memory += chunk_memory;
123 if (IS_MMAPPED(c)) {
124 stats.mmapped_regions++;
125 stats.total_mmapped_memory += chunk_memory;
126 } else {
127 stats.total_allocated_heap_space += chunk_memory;
128 }
129 }
130 }
131 return stats;
132 }
133
print_threads(write_cb_fun * write_cb,void * write_cb_arg,print_mode mode)134 static size_t print_threads(write_cb_fun *write_cb, void *write_cb_arg, print_mode mode)
135 {
136 size_t total_allocated_heap_space = 0;
137
138 for (size_t i = 0; i < OCCUPIED_BIN_COUNT; ++i) {
139 occupied_bin_t *occupied_bin = __get_occupied_bin_by_idx(i);
140 int min_id = 0;
141 int found;
142 do {
143 found = 0;
144 for (struct chunk *c = occupied_bin->head; c != NULL; c = c->next_occupied) {
145 if (c->thread_id > min_id) {
146 min_id = c->thread_id;
147 found = 1;
148 }
149 }
150 if (found) {
151 malloc_stats_t stats = add_up_chunks_by_threads(occupied_bin, min_id);
152 total_allocated_heap_space += stats.total_allocated_heap_space;
153
154 if (mode == TABLE) {
155 print_thread_stats_table(write_cb, write_cb_arg, min_id, &stats);
156 } else {
157 print_thread_stats_xml(write_cb, write_cb_arg, min_id, &stats);
158 }
159 }
160 } while (found);
161 }
162
163 return total_allocated_heap_space;
164 }
165
print_total_free_heap_space(write_cb_fun * write_cb,void * write_cb_arg,size_t total_allocated_heap_space,print_mode mode)166 static void print_total_free_heap_space(
167 write_cb_fun *write_cb,
168 void *write_cb_arg,
169 size_t total_allocated_heap_space,
170 print_mode mode
171 )
172 {
173 if (mode == TABLE) {
174 stat_printf(write_cb, write_cb_arg, "\n");
175 for (size_t i = 0; i < SEPARATOR_REPEATS; i++) {
176 stat_printf(
177 write_cb,
178 write_cb_arg,
179 "-----------"
180 );
181 }
182 stat_printf(
183 write_cb,
184 write_cb_arg,
185 "\ntotal free heap space: %zu\n",
186 __get_total_heap_space() - total_allocated_heap_space
187 );
188 } else {
189 print_amount_xml(
190 write_cb,
191 write_cb_arg,
192 "total_free_heap_space",
193 __get_total_heap_space() - total_allocated_heap_space
194 );
195 }
196 }
197
print_to_file(void * fp,const char * s)198 static void print_to_file(void *fp, const char *s)
199 {
200 if (fputs(s, fp) == EOF) {
201 fprintf(stderr, "Error writing to file stream: %s", strerror(errno));
202 }
203 }
204
add_stats(malloc_stats_t * destination,const malloc_stats_t * source)205 static void add_stats(malloc_stats_t *destination, const malloc_stats_t *source)
206 {
207 destination->total_allocated_memory += source->total_allocated_memory;
208 destination->total_mmapped_memory += source->total_mmapped_memory;
209 destination->mmapped_regions += source->mmapped_regions;
210 destination->total_allocated_heap_space += source->total_allocated_heap_space;
211 }
212 #endif
213
malloc_info(int options,FILE * fp)214 int malloc_info(int options, FILE* fp)
215 {
216 #ifdef MUSL_ITERATE_AND_STATS_API
217 if (options != 0) {
218 errno = EINVAL;
219 return -1;
220 }
221 malloc_disable();
222 stat_printf(print_to_file, fp, "<?xml version=\"1.0\"?>\n");
223 stat_printf(print_to_file, fp, "<malloc version=\"%d\">\n", ALLOCATOR_VERSION);
224 stat_printf(print_to_file, fp, "<threads>\n");
225 size_t total_allocated_heap_space = print_threads(print_to_file, fp, XML);
226 stat_printf(print_to_file, fp, "</threads>\n");
227 print_total_free_heap_space(print_to_file, fp, total_allocated_heap_space, XML);
228 stat_printf(print_to_file, fp, "</malloc>\n");
229 malloc_enable();
230 #endif
231 return 0;
232 }
233
234 #ifdef USE_JEMALLOC_DFX_INTF
235 extern struct mallinfo je_mallinfo();
236 extern void je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
237 const char *opts);
238 #endif
239
malloc_stats_print(void (* write_cb)(void *,const char *),void * cbopaque,const char * opts)240 void malloc_stats_print(void (*write_cb) (void *, const char *), void *cbopaque, const char *opts)
241 {
242 #ifdef USE_JEMALLOC_DFX_INTF
243 je_malloc_stats_print(write_cb, cbopaque, opts);
244 #elif defined(MUSL_ITERATE_AND_STATS_API)
245 malloc_disable();
246 stat_printf(
247 write_cb,
248 cbopaque,
249 "%-11s %-23s %-20s %-20s\n",
250 "thread_id",
251 "total_allocated_memory",
252 "total_mmapped_memory",
253 "mmapped_regions"
254 );
255 size_t total_allocated_heap_space = print_threads(write_cb, cbopaque, TABLE);
256 print_total_free_heap_space(write_cb, cbopaque, total_allocated_heap_space, TABLE);
257 malloc_enable();
258 #endif
259 }
260
mallinfo2(void)261 struct mallinfo2 mallinfo2(void)
262 {
263 #ifdef USE_JEMALLOC_DFX_INTF
264 struct mallinfo info = je_mallinfo();
265 struct mallinfo2 res = {
266 .hblks = info.hblks,
267 .hblkhd = info.hblkhd,
268 .usmblks = info.usmblks,
269 .uordblks = info.uordblks,
270 .fordblks = info.fordblks,
271 };
272 return res;
273 #elif defined(MUSL_ITERATE_AND_STATS_API)
274 malloc_disable();
275 malloc_stats_t shared_stats = {0, 0, 0, 0};
276 for (size_t i = 0; i < OCCUPIED_BIN_COUNT; ++i) {
277 malloc_stats_t stats = add_up_chunks(__get_occupied_bin_by_idx(i));
278 add_stats(&shared_stats, &stats);
279 }
280
281 struct mallinfo2 res = {
282 .hblks = shared_stats.mmapped_regions,
283 .hblkhd = shared_stats.total_mmapped_memory,
284 .uordblks = shared_stats.total_allocated_memory,
285 .fordblks = __get_total_heap_space() - shared_stats.total_allocated_heap_space
286 };
287 malloc_enable();
288 return res;
289 #endif
290 return (struct mallinfo2){};
291 }
292
mallinfo(void)293 struct mallinfo mallinfo(void)
294 {
295 struct mallinfo2 mallinfo2_res = mallinfo2();
296 return (struct mallinfo) {
297 .hblks = mallinfo2_res.hblks,
298 .hblkhd = mallinfo2_res.hblkhd,
299 .uordblks = mallinfo2_res.uordblks,
300 .fordblks = mallinfo2_res.fordblks,
301 };
302 }
303