• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <stdlib.h>
2 #include <stdint.h>
3 #include <limits.h>
4 #include <string.h>
5 #include <sys/mman.h>
6 #ifndef __LITEOS__
7 #include <sys/prctl.h>
8 #endif
9 #include <errno.h>
10 
11 #include "meta.h"
12 
13 #ifdef USE_JEMALLOC
14 #ifdef USE_JEMALLOC_DFX_INTF
15 extern void je_malloc_disable();
16 extern void je_malloc_enable();
17 extern int je_iterate(uintptr_t base, size_t size,
18 	void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg);
19 extern int je_mallopt(int param, int value);
20 #endif
21 #endif
22 
23 #ifdef MALLOC_SECURE_ALL
24 #include <fcntl.h>
25 #define RANDOM_BUFFER_LEN 512
26 static uint8_t buffer[RANDOM_BUFFER_LEN] = { 0 };
27 static size_t ri = RANDOM_BUFFER_LEN;
28 
get_random8()29 static uint8_t get_random8()
30 {
31 	uint8_t num;
32 	if ((ri >= RANDOM_BUFFER_LEN) || (buffer[0] == 0)) {
33 		int fd = open("/dev/urandom", O_RDONLY);
34 		if (fd < 0) {
35 			num = (uint8_t)get_random_secret();
36 			return num;
37 		}
38 
39 		read(fd, buffer, RANDOM_BUFFER_LEN);
40 		close(fd);
41 		ri = 0;
42 	}
43 	num = buffer[ri];
44 	ri++;
45 	return num;
46 }
47 
get_randomIdx(int avail_mask,int last_idx)48 static int get_randomIdx(int avail_mask, int last_idx)
49 {
50 	uint32_t mask;
51 	uint32_t r;
52 	uint32_t cmask;
53 	int idx;
54 
55 	mask = avail_mask;
56 	r = get_random8() % last_idx;
57 	cmask = ~((2u << (last_idx - r)) - 1);
58 
59 	if (mask & cmask) {
60 		idx = 31 - a_clz_32(mask & cmask);
61 	} else {
62 		idx = a_ctz_32(mask);
63 	}
64 
65 	return idx;
66 }
67 #endif
68 
69 LOCK_OBJ_DEF;
70 
71 const uint16_t size_classes[] = {
72 	1, 2, 3, 4, 5, 6, 7, 8,
73 	9, 10, 12, 15,
74 	18, 20, 25, 31,
75 	36, 42, 50, 63,
76 	72, 84, 102, 127,
77 	146, 170, 204, 255,
78 	292, 340, 409, 511,
79 	584, 682, 818, 1023,
80 	1169, 1364, 1637, 2047,
81 	2340, 2730, 3276, 4095,
82 	4680, 5460, 6552, 8191,
83 };
84 
85 static const uint8_t small_cnt_tab[][3] = {
86 	{ 30, 30, 30 },
87 	{ 31, 15, 15 },
88 	{ 20, 10, 10 },
89 	{ 31, 15, 7 },
90 	{ 25, 12, 6 },
91 	{ 21, 10, 5 },
92 	{ 18, 8, 4 },
93 	{ 31, 15, 7 },
94 	{ 28, 14, 6 },
95 };
96 
97 static const uint8_t med_cnt_tab[4] = { 28, 24, 20, 32 };
98 
99 struct malloc_context ctx = { 0 };
100 
alloc_meta(void)101 struct meta *alloc_meta(void)
102 {
103 	struct meta *m;
104 	unsigned char *p;
105 	if (!ctx.init_done) {
106 #ifndef PAGESIZE
107 		ctx.pagesize = get_page_size();
108 #endif
109 		ctx.secret = get_random_secret();
110 		ctx.init_done = 1;
111 	}
112 	size_t pagesize = PGSZ;
113 	if (pagesize < 4096) pagesize = 4096;
114 	if ((m = dequeue_head(&ctx.free_meta_head))) return m;
115 	if (!ctx.avail_meta_count) {
116 		int need_unprotect = 1;
117 		if (!ctx.avail_meta_area_count && ctx.brk!=-1) {
118 			uintptr_t new = ctx.brk + pagesize;
119 			int need_guard = 0;
120 			if (!ctx.brk) {
121 				need_guard = 1;
122 				ctx.brk = brk(0);
123 				// some ancient kernels returned _ebss
124 				// instead of next page as initial brk.
125 				ctx.brk += -ctx.brk & (pagesize-1);
126 				new = ctx.brk + 2*pagesize;
127 			}
128 			if (brk(new) != new) {
129 				ctx.brk = -1;
130 			} else {
131 #ifndef __LITEOS__
132 				prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ctx.brk, new - ctx.brk, "native_heap:meta");
133 #endif
134 				if (need_guard) mmap((void *)ctx.brk, pagesize,
135 					PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
136 				ctx.brk = new;
137 				ctx.avail_meta_areas = (void *)(new - pagesize);
138 				ctx.avail_meta_area_count = pagesize>>12;
139 				need_unprotect = 0;
140 			}
141 		}
142 		if (!ctx.avail_meta_area_count) {
143 			size_t n = 2UL << ctx.meta_alloc_shift;
144 			p = mmap(0, n*pagesize, PROT_NONE,
145 				MAP_PRIVATE|MAP_ANON, -1, 0);
146 			if (p==MAP_FAILED) return 0;
147 			ctx.avail_meta_areas = p + pagesize;
148 			ctx.avail_meta_area_count = (n-1)*(pagesize>>12);
149 			ctx.meta_alloc_shift++;
150 		}
151 		p = ctx.avail_meta_areas;
152 		if ((uintptr_t)p & (pagesize-1)) need_unprotect = 0;
153 		if (need_unprotect)
154 			if (mprotect(p, pagesize, PROT_READ|PROT_WRITE)
155 			    && errno != ENOSYS)
156 				return 0;
157 		ctx.avail_meta_area_count--;
158 		ctx.avail_meta_areas = p + 4096;
159 		if (ctx.meta_area_tail) {
160 			ctx.meta_area_tail->next = (void *)p;
161 		} else {
162 			ctx.meta_area_head = (void *)p;
163 		}
164 		ctx.meta_area_tail = (void *)p;
165 		ctx.meta_area_tail->check = ctx.secret;
166 		ctx.avail_meta_count = ctx.meta_area_tail->nslots
167 			= (4096-sizeof(struct meta_area))/sizeof *m;
168 		ctx.avail_meta = ctx.meta_area_tail->slots;
169 	}
170 	ctx.avail_meta_count--;
171 	m = ctx.avail_meta++;
172 	m->prev = m->next = 0;
173 	return m;
174 }
175 
try_avail(struct meta ** pm)176 static uint32_t try_avail(struct meta **pm)
177 {
178 	struct meta *m = *pm;
179 	uint32_t first;
180 	if (!m) return 0;
181 	uint32_t mask = m->avail_mask;
182 	if (!mask) {
183 		if (!m) return 0;
184 		if (!m->freed_mask) {
185 			dequeue(pm, m);
186 			m = *pm;
187 			if (!m) return 0;
188 		} else {
189 			m = m->next;
190 			*pm = m;
191 		}
192 
193 		mask = m->freed_mask;
194 
195 		// skip fully-free group unless it's the only one
196 		// or it's a permanently non-freeable group
197 		if (mask == (2u<<m->last_idx)-1 && m->freeable) {
198 			m = m->next;
199 			*pm = m;
200 			mask = m->freed_mask;
201 		}
202 
203 		// activate more slots in a not-fully-active group
204 		// if needed, but only as a last resort. prefer using
205 		// any other group with free slots. this avoids
206 		// touching & dirtying as-yet-unused pages.
207 		if (!(mask & ((2u<<m->mem->active_idx)-1))) {
208 			if (m->next != m) {
209 				m = m->next;
210 				*pm = m;
211 			} else {
212 				int cnt = m->mem->active_idx + 2;
213 				int size = size_classes[m->sizeclass]*UNIT;
214 				int span = UNIT + size*cnt;
215 				// activate up to next 4k boundary
216 				while ((span^(span+size-1)) < 4096) {
217 					cnt++;
218 					span += size;
219 				}
220 				if (cnt > m->last_idx+1)
221 					cnt = m->last_idx+1;
222 				m->mem->active_idx = cnt-1;
223 			}
224 		}
225 		mask = activate_group(m);
226 		assert(mask);
227 		decay_bounces(m->sizeclass);
228 	}
229 #ifdef MALLOC_SECURE_ALL
230 	int idx = get_randomIdx(mask, m->last_idx);
231 	first = 1 << idx;
232 #else
233 	first = mask&-mask;
234 #endif
235 	m->avail_mask = mask-first;
236 	return first;
237 }
238 
239 static int alloc_slot(int, size_t);
240 
alloc_group(int sc,size_t req)241 static struct meta *alloc_group(int sc, size_t req)
242 {
243 	size_t size = UNIT*size_classes[sc];
244 	int i = 0, cnt;
245 	unsigned char *p;
246 	struct meta *m = alloc_meta();
247 	if (!m) return 0;
248 	size_t usage = ctx.usage_by_class[sc];
249 	size_t pagesize = PGSZ;
250 	int active_idx;
251 	if (sc < 9) {
252 		while (i<2 && 4*small_cnt_tab[sc][i] > usage)
253 			i++;
254 		cnt = small_cnt_tab[sc][i];
255 	} else {
256 		// lookup max number of slots fitting in power-of-two size
257 		// from a table, along with number of factors of two we
258 		// can divide out without a remainder or reaching 1.
259 		cnt = med_cnt_tab[sc&3];
260 
261 		// reduce cnt to avoid excessive eagar allocation.
262 		while (!(cnt&1) && 4*cnt > usage)
263 			cnt >>= 1;
264 
265 		// data structures don't support groups whose slot offsets
266 		// in units don't fit in 16 bits.
267 		while (size*cnt >= 65536*UNIT)
268 			cnt >>= 1;
269 	}
270 
271 	// If we selected a count of 1 above but it's not sufficient to use
272 	// mmap, increase to 2. Then it might be; if not it will nest.
273 	if (cnt==1 && size*cnt+UNIT <= pagesize/2) cnt = 2;
274 
275 	// All choices of size*cnt are "just below" a power of two, so anything
276 	// larger than half the page size should be allocated as whole pages.
277 	if (size*cnt+UNIT > pagesize/2) {
278 		// check/update bounce counter to start/increase retention
279 		// of freed maps, and inhibit use of low-count, odd-size
280 		// small mappings and single-slot groups if activated.
281 		int nosmall = is_bouncing(sc);
282 		account_bounce(sc);
283 		step_seq();
284 
285 		// since the following count reduction opportunities have
286 		// an absolute memory usage cost, don't overdo them. count
287 		// coarse usage as part of usage.
288 		if (!(sc&1) && sc<32) usage += ctx.usage_by_class[sc+1];
289 
290 		// try to drop to a lower count if the one found above
291 		// increases usage by more than 25%. these reduced counts
292 		// roughly fill an integral number of pages, just not a
293 		// power of two, limiting amount of unusable space.
294 		if (4*cnt > usage && !nosmall) {
295 			if (0);
296 			else if ((sc&3)==1 && size*cnt>8*pagesize) cnt = 2;
297 			else if ((sc&3)==2 && size*cnt>4*pagesize) cnt = 3;
298 			else if ((sc&3)==0 && size*cnt>8*pagesize) cnt = 3;
299 			else if ((sc&3)==0 && size*cnt>2*pagesize) cnt = 5;
300 		}
301 		size_t needed = size*cnt + UNIT;
302 		needed += -needed & (pagesize-1);
303 
304 		// produce an individually-mmapped allocation if usage is low,
305 		// bounce counter hasn't triggered, and either it saves memory
306 		// or it avoids eagar slot allocation without wasting too much.
307 		if (!nosmall && cnt<=7) {
308 			req += IB + UNIT;
309 			req += -req & (pagesize-1);
310 			if (req<size+UNIT || (req>=4*pagesize && 2*cnt>usage)) {
311 				cnt = 1;
312 				needed = req;
313 			}
314 		}
315 
316 		p = mmap(0, needed, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
317 		if (p==MAP_FAILED) {
318 			free_meta(m);
319 			return 0;
320 		}
321 #ifndef __LITEOS__
322 		prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, needed, "native_heap:brk");
323 #endif
324 		m->maplen = needed>>12;
325 		ctx.mmap_counter++;
326 		active_idx = (4096-UNIT)/size-1;
327 		if (active_idx > cnt-1) active_idx = cnt-1;
328 		if (active_idx < 0) active_idx = 0;
329 	} else {
330 		int j = size_to_class(UNIT+cnt*size-IB);
331 		int idx = alloc_slot(j, UNIT+cnt*size-IB);
332 		if (idx < 0) {
333 			free_meta(m);
334 			return 0;
335 		}
336 		struct meta *g = ctx.active[j];
337 		p = enframe(g, idx, UNIT*size_classes[j]-IB, ctx.mmap_counter);
338 		m->maplen = 0;
339 		p[-3] = (p[-3]&31) | (6<<5);
340 		for (int i=0; i<=cnt; i++)
341 			p[UNIT+i*size-4] = 0;
342 		active_idx = cnt-1;
343 	}
344 	ctx.usage_by_class[sc] += cnt;
345 	m->avail_mask = (2u<<active_idx)-1;
346 	m->freed_mask = (2u<<(cnt-1))-1 - m->avail_mask;
347 	m->mem = (void *)p;
348 	m->mem->meta = encode_ptr(m, ctx.secret);
349 	m->mem->active_idx = active_idx;
350 	m->last_idx = cnt-1;
351 	m->freeable = 1;
352 	m->sizeclass = sc;
353 	return m;
354 }
355 
alloc_slot(int sc,size_t req)356 static int alloc_slot(int sc, size_t req)
357 {
358 	uint32_t first = try_avail(&ctx.active[sc]);
359 	if (first) return a_ctz_32(first);
360 
361 	struct meta *g = alloc_group(sc, req);
362 	if (!g) return -1;
363 
364 	g->avail_mask--;
365 	queue(&ctx.active[sc], g);
366 	return 0;
367 }
368 
malloc(size_t n)369 void *malloc(size_t n)
370 {
371 	if (size_overflows(n)) return 0;
372 	struct meta *g;
373 	uint32_t mask, first;
374 	int sc;
375 	int idx;
376 	int ctr;
377 
378 	if (n >= MMAP_THRESHOLD) {
379 		size_t needed = n + IB + UNIT;
380 		void *p = mmap(0, needed, PROT_READ|PROT_WRITE,
381 			MAP_PRIVATE|MAP_ANON, -1, 0);
382 		if (p==MAP_FAILED) return 0;
383 		wrlock();
384 #ifndef __LITEOS__
385 		prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, needed, "native_heap:mmap");
386 #endif
387 		step_seq();
388 		g = alloc_meta();
389 		if (!g) {
390 			unlock();
391 			munmap(p, needed);
392 			return 0;
393 		}
394 		g->mem = p;
395 		g->mem->meta = encode_ptr(g, ctx.secret);
396 		g->last_idx = 0;
397 		g->freeable = 1;
398 		g->sizeclass = 63;
399 		g->maplen = (needed+4095)/4096;
400 		g->avail_mask = g->freed_mask = 0;
401 		// use a global counter to cycle offset in
402 		// individually-mmapped allocations.
403 		ctx.mmap_counter++;
404 		idx = 0;
405 		goto success;
406 	}
407 
408 	sc = size_to_class(n);
409 
410 	rdlock();
411 	g = ctx.active[sc];
412 
413 	// use coarse size classes initially when there are not yet
414 	// any groups of desired size. this allows counts of 2 or 3
415 	// to be allocated at first rather than having to start with
416 	// 7 or 5, the min counts for even size classes.
417 	if (!g && sc>=4 && sc<32 && sc!=6 && !(sc&1) && !ctx.usage_by_class[sc]) {
418 		size_t usage = ctx.usage_by_class[sc|1];
419 		// if a new group may be allocated, count it toward
420 		// usage in deciding if we can use coarse class.
421 		if (!ctx.active[sc|1] || (!ctx.active[sc|1]->avail_mask
422 		    && !ctx.active[sc|1]->freed_mask))
423 			usage += 3;
424 		if (usage <= 12)
425 			sc |= 1;
426 		g = ctx.active[sc];
427 	}
428 
429 	for (;;) {
430 		mask = g ? g->avail_mask : 0;
431 #ifdef MALLOC_SECURE_ALL
432 		if (!mask) break;
433 		idx = get_randomIdx(mask, g->last_idx);
434 		first = 1u << idx;
435 
436 		if (RDLOCK_IS_EXCLUSIVE || !MT)
437 			g->avail_mask = mask-first;
438 		else if (a_cas(&g->avail_mask, mask, mask-first)!=mask)
439 			continue;
440 #else
441 		first = mask&-mask;
442 		if (!first) break;
443 		if (RDLOCK_IS_EXCLUSIVE || !MT)
444 			g->avail_mask = mask-first;
445 		else if (a_cas(&g->avail_mask, mask, mask-first)!=mask)
446 			continue;
447 		idx = a_ctz_32(first);
448 #endif
449 		goto success;
450 	}
451 	upgradelock();
452 
453 	idx = alloc_slot(sc, n);
454 	if (idx < 0) {
455 		unlock();
456 		return 0;
457 	}
458 	g = ctx.active[sc];
459 
460 success:
461 	ctr = ctx.mmap_counter;
462 	unlock();
463 	return enframe(g, idx, n, ctr);
464 }
465 
is_allzero(void * p)466 int is_allzero(void *p)
467 {
468 	struct meta *g = get_meta(p);
469 	return g->sizeclass >= 48 ||
470 		get_stride(g) < UNIT*size_classes[g->sizeclass];
471 }
472 
mallopt(int param,int value)473 int mallopt(int param, int value)
474 {
475 #ifdef USE_JEMALLOC_DFX_INTF
476 	return je_mallopt(param, value);
477 #endif
478 	return 0;
479 }
480 
malloc_disable(void)481 void malloc_disable(void)
482 {
483 #ifdef USE_JEMALLOC_DFX_INTF
484 	je_malloc_disable();
485 #endif
486 }
487 
malloc_enable(void)488 void malloc_enable(void)
489 {
490 #ifdef USE_JEMALLOC_DFX_INTF
491 	je_malloc_enable();
492 #endif
493 }
494 
malloc_iterate(void * base,size_t size,void (* callback)(void * base,size_t size,void * arg),void * arg)495 int malloc_iterate(void* base, size_t size, void (*callback)(void* base, size_t size, void* arg), void* arg)
496 {
497 #ifdef USE_JEMALLOC_DFX_INTF
498 	return je_iterate(base, size, callback, arg);
499 #endif
500 	return 0;
501 }
502 
malloc_backtrace(void * pointer,uintptr_t * frames,size_t frame_count)503 ssize_t malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count)
504 {
505 	return 0;
506 }
507