1 #define _GNU_SOURCE
2 #include <stdlib.h>
3 #include <string.h>
4 #include <limits.h>
5 #include <stdint.h>
6 #include <errno.h>
7 #include <sys/mman.h>
8 #include "libc.h"
9 #include "atomic.h"
10 #include "pthread_impl.h"
11 #include "malloc_impl.h"
12 #include <sys/prctl.h>
13
14 #if defined(__GNUC__) && defined(__PIC__)
15 #define inline inline __attribute__((always_inline))
16 #endif
17
18 #ifdef HOOK_ENABLE
19 void *__libc_malloc(size_t);
20 void __libc_free(void *p);
21 #endif
22
23 static struct {
24 volatile uint64_t binmap;
25 struct bin bins[64];
26 volatile int free_lock[2];
27 } mal;
28
29 int __malloc_replaced;
30
31 /* Synchronization tools */
32
lock(volatile int * lk)33 static inline void lock(volatile int *lk)
34 {
35 if (libc.threads_minus_1)
36 while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
37 }
38
unlock(volatile int * lk)39 static inline void unlock(volatile int *lk)
40 {
41 if (lk[0]) {
42 a_store(lk, 0);
43 if (lk[1]) __wake(lk, 1, 1);
44 }
45 }
46
lock_bin(int i)47 static inline void lock_bin(int i)
48 {
49 lock(mal.bins[i].lock);
50 if (!mal.bins[i].head)
51 mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i);
52 }
53
unlock_bin(int i)54 static inline void unlock_bin(int i)
55 {
56 unlock(mal.bins[i].lock);
57 }
58
first_set(uint64_t x)59 static int first_set(uint64_t x)
60 {
61 #if 1
62 return a_ctz_64(x);
63 #else
64 static const char debruijn64[64] = {
65 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
66 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
67 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
68 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12
69 };
70 static const char debruijn32[32] = {
71 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
72 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
73 };
74 if (sizeof(long) < 8) {
75 uint32_t y = x;
76 if (!y) {
77 y = x>>32;
78 return 32 + debruijn32[(y&-y)*0x076be629 >> 27];
79 }
80 return debruijn32[(y&-y)*0x076be629 >> 27];
81 }
82 return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58];
83 #endif
84 }
85
86 static const unsigned char bin_tab[60] = {
87 32,33,34,35,36,36,37,37,38,38,39,39,
88 40,40,40,40,41,41,41,41,42,42,42,42,43,43,43,43,
89 44,44,44,44,44,44,44,44,45,45,45,45,45,45,45,45,
90 46,46,46,46,46,46,46,46,47,47,47,47,47,47,47,47,
91 };
92
bin_index(size_t x)93 static int bin_index(size_t x)
94 {
95 x = x / SIZE_ALIGN - 1;
96 if (x <= 32) return x;
97 if (x < 512) return bin_tab[x/8-4];
98 if (x > 0x1c00) return 63;
99 return bin_tab[x/128-4] + 16;
100 }
101
bin_index_up(size_t x)102 static int bin_index_up(size_t x)
103 {
104 x = x / SIZE_ALIGN - 1;
105 if (x <= 32) return x;
106 x--;
107 if (x < 512) return bin_tab[x/8-4] + 1;
108 return bin_tab[x/128-4] + 17;
109 }
110
111 #if 0
112 void __dump_heap(int x)
113 {
114 struct chunk *c;
115 int i;
116 for (c = (void *)mal.heap; CHUNK_SIZE(c); c = NEXT_CHUNK(c))
117 fprintf(stderr, "base %p size %zu (%d) flags %d/%d\n",
118 c, CHUNK_SIZE(c), bin_index(CHUNK_SIZE(c)),
119 c->csize & 15,
120 NEXT_CHUNK(c)->psize & 15);
121 for (i=0; i<64; i++) {
122 if (mal.bins[i].head != BIN_TO_CHUNK(i) && mal.bins[i].head) {
123 fprintf(stderr, "bin %d: %p\n", i, mal.bins[i].head);
124 if (!(mal.binmap & 1ULL<<i))
125 fprintf(stderr, "missing from binmap!\n");
126 } else if (mal.binmap & 1ULL<<i)
127 fprintf(stderr, "binmap wrongly contains %d!\n", i);
128 }
129 }
130 #endif
131
expand_heap(size_t n)132 static struct chunk *expand_heap(size_t n)
133 {
134 static int heap_lock[2];
135 static void *end;
136 void *p;
137 struct chunk *w;
138
139 /* The argument n already accounts for the caller's chunk
140 * overhead needs, but if the heap can't be extended in-place,
141 * we need room for an extra zero-sized sentinel chunk. */
142 n += SIZE_ALIGN;
143
144 lock(heap_lock);
145
146 p = __expand_heap(&n);
147 if (!p) {
148 unlock(heap_lock);
149 return 0;
150 }
151
152 /* If not just expanding existing space, we need to make a
153 * new sentinel chunk below the allocated space. */
154 if (p != end) {
155 /* Valid/safe because of the prologue increment. */
156 n -= SIZE_ALIGN;
157 p = (char *)p + SIZE_ALIGN;
158 w = MEM_TO_CHUNK(p);
159 w->psize = 0 | C_INUSE;
160 }
161
162 /* Record new heap end and fill in footer. */
163 end = (char *)p + n;
164 w = MEM_TO_CHUNK(end);
165 w->psize = n | C_INUSE;
166 w->csize = 0 | C_INUSE;
167
168 /* Fill in header, which may be new or may be replacing a
169 * zero-size sentinel header at the old end-of-heap. */
170 w = MEM_TO_CHUNK(p);
171 w->csize = n | C_INUSE;
172
173 unlock(heap_lock);
174
175 return w;
176 }
177
adjust_size(size_t * n)178 static int adjust_size(size_t *n)
179 {
180 /* Result of pointer difference must fit in ptrdiff_t. */
181 if (*n-1 > PTRDIFF_MAX - SIZE_ALIGN - PAGE_SIZE) {
182 if (*n) {
183 errno = ENOMEM;
184 return -1;
185 } else {
186 *n = SIZE_ALIGN;
187 return 0;
188 }
189 }
190 *n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK;
191 return 0;
192 }
193
unbin(struct chunk * c,int i)194 static void unbin(struct chunk *c, int i)
195 {
196 if (c->prev == c->next)
197 a_and_64(&mal.binmap, ~(1ULL<<i));
198 c->prev->next = c->next;
199 c->next->prev = c->prev;
200 c->csize |= C_INUSE;
201 NEXT_CHUNK(c)->psize |= C_INUSE;
202 }
203
alloc_fwd(struct chunk * c)204 static int alloc_fwd(struct chunk *c)
205 {
206 int i;
207 size_t k;
208 while (!((k=c->csize) & C_INUSE)) {
209 i = bin_index(k);
210 lock_bin(i);
211 if (c->csize == k) {
212 unbin(c, i);
213 unlock_bin(i);
214 return 1;
215 }
216 unlock_bin(i);
217 }
218 return 0;
219 }
220
alloc_rev(struct chunk * c)221 static int alloc_rev(struct chunk *c)
222 {
223 int i;
224 size_t k;
225 while (!((k=c->psize) & C_INUSE)) {
226 i = bin_index(k);
227 lock_bin(i);
228 if (c->psize == k) {
229 unbin(PREV_CHUNK(c), i);
230 unlock_bin(i);
231 return 1;
232 }
233 unlock_bin(i);
234 }
235 return 0;
236 }
237
238
239 /* pretrim - trims a chunk _prior_ to removing it from its bin.
240 * Must be called with i as the ideal bin for size n, j the bin
241 * for the _free_ chunk self, and bin j locked. */
pretrim(struct chunk * self,size_t n,int i,int j)242 static int pretrim(struct chunk *self, size_t n, int i, int j)
243 {
244 size_t n1;
245 struct chunk *next, *split;
246
247 /* We cannot pretrim if it would require re-binning. */
248 if (j < 40) return 0;
249 if (j < i+3) {
250 if (j != 63) return 0;
251 n1 = CHUNK_SIZE(self);
252 if (n1-n <= MMAP_THRESHOLD) return 0;
253 } else {
254 n1 = CHUNK_SIZE(self);
255 }
256 if (bin_index(n1-n) != j) return 0;
257
258 next = NEXT_CHUNK(self);
259 split = (void *)((char *)self + n);
260
261 split->prev = self->prev;
262 split->next = self->next;
263 split->prev->next = split;
264 split->next->prev = split;
265 split->psize = n | C_INUSE;
266 split->csize = n1-n;
267 next->psize = n1-n;
268 self->csize = n | C_INUSE;
269 return 1;
270 }
271
trim(struct chunk * self,size_t n)272 static void trim(struct chunk *self, size_t n)
273 {
274 size_t n1 = CHUNK_SIZE(self);
275 struct chunk *next, *split;
276
277 if (n >= n1 - DONTCARE) return;
278
279 next = NEXT_CHUNK(self);
280 split = (void *)((char *)self + n);
281
282 split->psize = n | C_INUSE;
283 split->csize = n1-n | C_INUSE;
284 next->psize = n1-n | C_INUSE;
285 self->csize = n | C_INUSE;
286
287 __bin_chunk(split);
288 }
289
290 #ifdef HOOK_ENABLE
__libc_malloc(size_t n)291 void *__libc_malloc(size_t n)
292 #else
293 void *malloc(size_t n)
294 #endif
295 {
296 struct chunk *c;
297 int i, j;
298
299 if (adjust_size(&n) < 0) return 0;
300
301 if (n > MMAP_THRESHOLD) {
302 size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE;
303 char *base = __mmap(0, len, PROT_READ|PROT_WRITE,
304 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
305 if (base == (void *)-1) return 0;
306
307 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, base, len, "native_heap:musl");
308
309 c = (void *)(base + SIZE_ALIGN - OVERHEAD);
310 c->csize = len - (SIZE_ALIGN - OVERHEAD);
311 c->psize = SIZE_ALIGN - OVERHEAD;
312 return CHUNK_TO_MEM(c);
313 }
314
315 i = bin_index_up(n);
316 for (;;) {
317 uint64_t mask = mal.binmap & -(1ULL<<i);
318 if (!mask) {
319 c = expand_heap(n);
320 if (!c) return 0;
321 if (alloc_rev(c)) {
322 struct chunk *x = c;
323 c = PREV_CHUNK(c);
324 NEXT_CHUNK(x)->psize = c->csize =
325 x->csize + CHUNK_SIZE(c);
326 }
327 break;
328 }
329 j = first_set(mask);
330 lock_bin(j);
331 c = mal.bins[j].head;
332 if (c != BIN_TO_CHUNK(j)) {
333 if (!pretrim(c, n, i, j)) unbin(c, j);
334 unlock_bin(j);
335 break;
336 }
337 unlock_bin(j);
338 }
339
340 /* Now patch up in case we over-allocated */
341 trim(c, n);
342
343 return CHUNK_TO_MEM(c);
344 }
345
mal0_clear(char * p,size_t pagesz,size_t n)346 static size_t mal0_clear(char *p, size_t pagesz, size_t n)
347 {
348 #ifdef __GNUC__
349 typedef uint64_t __attribute__((__may_alias__)) T;
350 #else
351 typedef unsigned char T;
352 #endif
353 char *pp = p + n;
354 size_t i = (uintptr_t)pp & (pagesz - 1);
355 for (;;) {
356 pp = memset(pp - i, 0, i);
357 if (pp - p < pagesz) return pp - p;
358 for (i = pagesz; i; i -= 2*sizeof(T), pp -= 2*sizeof(T))
359 if (((T *)pp)[-1] | ((T *)pp)[-2])
360 break;
361 }
362 }
363
calloc(size_t m,size_t n)364 void *calloc(size_t m, size_t n)
365 {
366 if (n && m > (size_t)-1/n) {
367 errno = ENOMEM;
368 return 0;
369 }
370 n *= m;
371 void *p = malloc(n);
372 if (!p) return p;
373 if (!__malloc_replaced) {
374 if (IS_MMAPPED(MEM_TO_CHUNK(p)))
375 return p;
376 if (n >= PAGE_SIZE)
377 n = mal0_clear(p, PAGE_SIZE, n);
378 }
379 return memset(p, 0, n);
380 }
381
realloc(void * p,size_t n)382 void *realloc(void *p, size_t n)
383 {
384 struct chunk *self, *next;
385 size_t n0, n1;
386 void *new;
387
388 if (!p) return malloc(n);
389 if (!n) {
390 free(p);
391 return NULL;
392 }
393
394 if (adjust_size(&n) < 0) return 0;
395
396 self = MEM_TO_CHUNK(p);
397 n1 = n0 = CHUNK_SIZE(self);
398
399 if (IS_MMAPPED(self)) {
400 size_t extra = self->psize;
401 char *base = (char *)self - extra;
402 size_t oldlen = n0 + extra;
403 size_t newlen = n + extra;
404 /* Crash on realloc of freed chunk */
405 if (extra & 1) a_crash();
406 if (newlen < PAGE_SIZE && (new = malloc(n-OVERHEAD))) {
407 n0 = n;
408 goto copy_free_ret;
409 }
410 newlen = (newlen + PAGE_SIZE-1) & -PAGE_SIZE;
411 if (oldlen == newlen) return p;
412 base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE);
413 if (base == (void *)-1)
414 goto copy_realloc;
415 self = (void *)(base + extra);
416 self->csize = newlen - extra;
417 return CHUNK_TO_MEM(self);
418 }
419
420 next = NEXT_CHUNK(self);
421
422 /* Crash on corrupted footer (likely from buffer overflow) */
423 if (next->psize != self->csize) a_crash();
424
425 /* Merge adjacent chunks if we need more space. This is not
426 * a waste of time even if we fail to get enough space, because our
427 * subsequent call to free would otherwise have to do the merge. */
428 if (n > n1 && alloc_fwd(next)) {
429 n1 += CHUNK_SIZE(next);
430 next = NEXT_CHUNK(next);
431 }
432 /* FIXME: find what's wrong here and reenable it..? */
433 if (0 && n > n1 && alloc_rev(self)) {
434 self = PREV_CHUNK(self);
435 n1 += CHUNK_SIZE(self);
436 }
437 self->csize = n1 | C_INUSE;
438 next->psize = n1 | C_INUSE;
439
440 /* If we got enough space, split off the excess and return */
441 if (n <= n1) {
442 //memmove(CHUNK_TO_MEM(self), p, n0-OVERHEAD);
443 trim(self, n);
444 return CHUNK_TO_MEM(self);
445 }
446
447 copy_realloc:
448 /* As a last resort, allocate a new chunk and copy to it. */
449 new = malloc(n-OVERHEAD);
450 if (!new) return 0;
451 copy_free_ret:
452 memcpy(new, p, n0-OVERHEAD);
453 free(CHUNK_TO_MEM(self));
454 return new;
455 }
456
__bin_chunk(struct chunk * self)457 void __bin_chunk(struct chunk *self)
458 {
459 struct chunk *next = NEXT_CHUNK(self);
460 size_t final_size, new_size, size;
461 int reclaim=0;
462 int i;
463
464 final_size = new_size = CHUNK_SIZE(self);
465
466 /* Crash on corrupted footer (likely from buffer overflow) */
467 if (next->psize != self->csize) a_crash();
468
469 for (;;) {
470 if (self->psize & next->csize & C_INUSE) {
471 self->csize = final_size | C_INUSE;
472 next->psize = final_size | C_INUSE;
473 i = bin_index(final_size);
474 lock_bin(i);
475 lock(mal.free_lock);
476 if (self->psize & next->csize & C_INUSE)
477 break;
478 unlock(mal.free_lock);
479 unlock_bin(i);
480 }
481
482 if (alloc_rev(self)) {
483 self = PREV_CHUNK(self);
484 size = CHUNK_SIZE(self);
485 final_size += size;
486 if (new_size+size > RECLAIM && (new_size+size^size) > size)
487 reclaim = 1;
488 }
489
490 if (alloc_fwd(next)) {
491 size = CHUNK_SIZE(next);
492 final_size += size;
493 if (new_size+size > RECLAIM && (new_size+size^size) > size)
494 reclaim = 1;
495 next = NEXT_CHUNK(next);
496 }
497 }
498
499 if (!(mal.binmap & 1ULL<<i))
500 a_or_64(&mal.binmap, 1ULL<<i);
501
502 self->csize = final_size;
503 next->psize = final_size;
504 unlock(mal.free_lock);
505
506 self->next = BIN_TO_CHUNK(i);
507 self->prev = mal.bins[i].tail;
508 self->next->prev = self;
509 self->prev->next = self;
510
511 /* Replace middle of large chunks with fresh zero pages */
512 if (reclaim) {
513 uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE;
514 uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE;
515 #if 1
516 __madvise((void *)a, b-a, MADV_DONTNEED);
517 #else
518 __mmap((void *)a, b-a, PROT_READ|PROT_WRITE,
519 MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
520 #endif
521 }
522
523 unlock_bin(i);
524 }
525
unmap_chunk(struct chunk * self)526 static void unmap_chunk(struct chunk *self)
527 {
528 size_t extra = self->psize;
529 char *base = (char *)self - extra;
530 size_t len = CHUNK_SIZE(self) + extra;
531 /* Crash on double free */
532 if (extra & 1) a_crash();
533 __munmap(base, len);
534 }
535
536 #ifdef HOOK_ENABLE
__libc_free(void * p)537 void __libc_free(void *p)
538 #else
539 void free(void *p)
540 #endif
541 {
542 if (!p) return;
543
544 struct chunk *self = MEM_TO_CHUNK(p);
545
546 if (IS_MMAPPED(self))
547 unmap_chunk(self);
548 else
549 __bin_chunk(self);
550 }
551
__malloc_donate(char * start,char * end)552 void __malloc_donate(char *start, char *end)
553 {
554 size_t align_start_up = (SIZE_ALIGN-1) & (-(uintptr_t)start - OVERHEAD);
555 size_t align_end_down = (SIZE_ALIGN-1) & (uintptr_t)end;
556
557 /* Getting past this condition ensures that the padding for alignment
558 * and header overhead will not overflow and will leave a nonzero
559 * multiple of SIZE_ALIGN bytes between start and end. */
560 if (end - start <= OVERHEAD + align_start_up + align_end_down)
561 return;
562 start += align_start_up + OVERHEAD;
563 end -= align_end_down;
564
565 struct chunk *c = MEM_TO_CHUNK(start), *n = MEM_TO_CHUNK(end);
566 c->psize = n->csize = C_INUSE;
567 c->csize = n->psize = C_INUSE | (end-start);
568 __bin_chunk(c);
569 }
570