1 #define _GNU_SOURCE
2 #include <stdlib.h>
3 #include <string.h>
4 #include <limits.h>
5 #include <stdint.h>
6 #include <errno.h>
7 #include <sys/mman.h>
8 #include "libc.h"
9 #include "atomic.h"
10 #include "pthread_impl.h"
11 #include "malloc_impl.h"
12
13 #if defined(__GNUC__) && defined(__PIC__)
14 #define inline inline __attribute__((always_inline))
15 #endif
16
17 static struct {
18 volatile uint64_t binmap;
19 struct bin bins[64];
20 volatile int free_lock[2];
21 } mal;
22
23 int __malloc_replaced;
24
25 /* Synchronization tools */
26
lock(volatile int * lk)27 static inline void lock(volatile int *lk)
28 {
29 if (libc.threads_minus_1)
30 while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
31 }
32
unlock(volatile int * lk)33 static inline void unlock(volatile int *lk)
34 {
35 if (lk[0]) {
36 a_store(lk, 0);
37 if (lk[1]) __wake(lk, 1, 1);
38 }
39 }
40
lock_bin(int i)41 static inline void lock_bin(int i)
42 {
43 lock(mal.bins[i].lock);
44 if (!mal.bins[i].head)
45 mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i);
46 }
47
unlock_bin(int i)48 static inline void unlock_bin(int i)
49 {
50 unlock(mal.bins[i].lock);
51 }
52
first_set(uint64_t x)53 static int first_set(uint64_t x)
54 {
55 #if 1
56 return a_ctz_64(x);
57 #else
58 static const char debruijn64[64] = {
59 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
60 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
61 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
62 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12
63 };
64 static const char debruijn32[32] = {
65 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
66 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
67 };
68 if (sizeof(long) < 8) {
69 uint32_t y = x;
70 if (!y) {
71 y = x>>32;
72 return 32 + debruijn32[(y&-y)*0x076be629 >> 27];
73 }
74 return debruijn32[(y&-y)*0x076be629 >> 27];
75 }
76 return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58];
77 #endif
78 }
79
80 static const unsigned char bin_tab[60] = {
81 32,33,34,35,36,36,37,37,38,38,39,39,
82 40,40,40,40,41,41,41,41,42,42,42,42,43,43,43,43,
83 44,44,44,44,44,44,44,44,45,45,45,45,45,45,45,45,
84 46,46,46,46,46,46,46,46,47,47,47,47,47,47,47,47,
85 };
86
bin_index(size_t x)87 static int bin_index(size_t x)
88 {
89 x = x / SIZE_ALIGN - 1;
90 if (x <= 32) return x;
91 if (x < 512) return bin_tab[x/8-4];
92 if (x > 0x1c00) return 63;
93 return bin_tab[x/128-4] + 16;
94 }
95
bin_index_up(size_t x)96 static int bin_index_up(size_t x)
97 {
98 x = x / SIZE_ALIGN - 1;
99 if (x <= 32) return x;
100 x--;
101 if (x < 512) return bin_tab[x/8-4] + 1;
102 return bin_tab[x/128-4] + 17;
103 }
104
105 #if 0
106 void __dump_heap(int x)
107 {
108 struct chunk *c;
109 int i;
110 for (c = (void *)mal.heap; CHUNK_SIZE(c); c = NEXT_CHUNK(c))
111 fprintf(stderr, "base %p size %zu (%d) flags %d/%d\n",
112 c, CHUNK_SIZE(c), bin_index(CHUNK_SIZE(c)),
113 c->csize & 15,
114 NEXT_CHUNK(c)->psize & 15);
115 for (i=0; i<64; i++) {
116 if (mal.bins[i].head != BIN_TO_CHUNK(i) && mal.bins[i].head) {
117 fprintf(stderr, "bin %d: %p\n", i, mal.bins[i].head);
118 if (!(mal.binmap & 1ULL<<i))
119 fprintf(stderr, "missing from binmap!\n");
120 } else if (mal.binmap & 1ULL<<i)
121 fprintf(stderr, "binmap wrongly contains %d!\n", i);
122 }
123 }
124 #endif
125
expand_heap(size_t n)126 static struct chunk *expand_heap(size_t n)
127 {
128 static int heap_lock[2];
129 static void *end;
130 void *p;
131 struct chunk *w;
132
133 /* The argument n already accounts for the caller's chunk
134 * overhead needs, but if the heap can't be extended in-place,
135 * we need room for an extra zero-sized sentinel chunk. */
136 n += SIZE_ALIGN;
137
138 lock(heap_lock);
139
140 p = __expand_heap(&n);
141 if (!p) {
142 unlock(heap_lock);
143 return 0;
144 }
145
146 /* If not just expanding existing space, we need to make a
147 * new sentinel chunk below the allocated space. */
148 if (p != end) {
149 /* Valid/safe because of the prologue increment. */
150 n -= SIZE_ALIGN;
151 p = (char *)p + SIZE_ALIGN;
152 w = MEM_TO_CHUNK(p);
153 w->psize = 0 | C_INUSE;
154 }
155
156 /* Record new heap end and fill in footer. */
157 end = (char *)p + n;
158 w = MEM_TO_CHUNK(end);
159 w->psize = n | C_INUSE;
160 w->csize = 0 | C_INUSE;
161
162 /* Fill in header, which may be new or may be replacing a
163 * zero-size sentinel header at the old end-of-heap. */
164 w = MEM_TO_CHUNK(p);
165 w->csize = n | C_INUSE;
166
167 unlock(heap_lock);
168
169 return w;
170 }
171
adjust_size(size_t * n)172 static int adjust_size(size_t *n)
173 {
174 /* Result of pointer difference must fit in ptrdiff_t. */
175 if (*n-1 > PTRDIFF_MAX - SIZE_ALIGN - PAGE_SIZE) {
176 if (*n) {
177 errno = ENOMEM;
178 return -1;
179 } else {
180 *n = SIZE_ALIGN;
181 return 0;
182 }
183 }
184 *n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK;
185 return 0;
186 }
187
unbin(struct chunk * c,int i)188 static void unbin(struct chunk *c, int i)
189 {
190 if (c->prev == c->next)
191 a_and_64(&mal.binmap, ~(1ULL<<i));
192 c->prev->next = c->next;
193 c->next->prev = c->prev;
194 c->csize |= C_INUSE;
195 NEXT_CHUNK(c)->psize |= C_INUSE;
196 }
197
alloc_fwd(struct chunk * c)198 static int alloc_fwd(struct chunk *c)
199 {
200 int i;
201 size_t k;
202 while (!((k=c->csize) & C_INUSE)) {
203 i = bin_index(k);
204 lock_bin(i);
205 if (c->csize == k) {
206 unbin(c, i);
207 unlock_bin(i);
208 return 1;
209 }
210 unlock_bin(i);
211 }
212 return 0;
213 }
214
alloc_rev(struct chunk * c)215 static int alloc_rev(struct chunk *c)
216 {
217 int i;
218 size_t k;
219 while (!((k=c->psize) & C_INUSE)) {
220 i = bin_index(k);
221 lock_bin(i);
222 if (c->psize == k) {
223 unbin(PREV_CHUNK(c), i);
224 unlock_bin(i);
225 return 1;
226 }
227 unlock_bin(i);
228 }
229 return 0;
230 }
231
232
233 /* pretrim - trims a chunk _prior_ to removing it from its bin.
234 * Must be called with i as the ideal bin for size n, j the bin
235 * for the _free_ chunk self, and bin j locked. */
pretrim(struct chunk * self,size_t n,int i,int j)236 static int pretrim(struct chunk *self, size_t n, int i, int j)
237 {
238 size_t n1;
239 struct chunk *next, *split;
240
241 /* We cannot pretrim if it would require re-binning. */
242 if (j < 40) return 0;
243 if (j < i+3) {
244 if (j != 63) return 0;
245 n1 = CHUNK_SIZE(self);
246 if (n1-n <= MMAP_THRESHOLD) return 0;
247 } else {
248 n1 = CHUNK_SIZE(self);
249 }
250 if (bin_index(n1-n) != j) return 0;
251
252 next = NEXT_CHUNK(self);
253 split = (void *)((char *)self + n);
254
255 split->prev = self->prev;
256 split->next = self->next;
257 split->prev->next = split;
258 split->next->prev = split;
259 split->psize = n | C_INUSE;
260 split->csize = n1-n;
261 next->psize = n1-n;
262 self->csize = n | C_INUSE;
263 return 1;
264 }
265
trim(struct chunk * self,size_t n)266 static void trim(struct chunk *self, size_t n)
267 {
268 size_t n1 = CHUNK_SIZE(self);
269 struct chunk *next, *split;
270
271 if (n >= n1 - DONTCARE) return;
272
273 next = NEXT_CHUNK(self);
274 split = (void *)((char *)self + n);
275
276 split->psize = n | C_INUSE;
277 split->csize = n1-n | C_INUSE;
278 next->psize = n1-n | C_INUSE;
279 self->csize = n | C_INUSE;
280
281 __bin_chunk(split);
282 }
283
malloc(size_t n)284 void *malloc(size_t n)
285 {
286 struct chunk *c;
287 int i, j;
288
289 if (adjust_size(&n) < 0) return 0;
290
291 if (n > MMAP_THRESHOLD) {
292 size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE;
293 char *base = __mmap(0, len, PROT_READ|PROT_WRITE,
294 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
295 if (base == (void *)-1) return 0;
296
297 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, base, len, "native_heap:musl");
298
299 c = (void *)(base + SIZE_ALIGN - OVERHEAD);
300 c->csize = len - (SIZE_ALIGN - OVERHEAD);
301 c->psize = SIZE_ALIGN - OVERHEAD;
302 return CHUNK_TO_MEM(c);
303 }
304
305 i = bin_index_up(n);
306 for (;;) {
307 uint64_t mask = mal.binmap & -(1ULL<<i);
308 if (!mask) {
309 c = expand_heap(n);
310 if (!c) return 0;
311 if (alloc_rev(c)) {
312 struct chunk *x = c;
313 c = PREV_CHUNK(c);
314 NEXT_CHUNK(x)->psize = c->csize =
315 x->csize + CHUNK_SIZE(c);
316 }
317 break;
318 }
319 j = first_set(mask);
320 lock_bin(j);
321 c = mal.bins[j].head;
322 if (c != BIN_TO_CHUNK(j)) {
323 if (!pretrim(c, n, i, j)) unbin(c, j);
324 unlock_bin(j);
325 break;
326 }
327 unlock_bin(j);
328 }
329
330 /* Now patch up in case we over-allocated */
331 trim(c, n);
332
333 return CHUNK_TO_MEM(c);
334 }
335
mal0_clear(char * p,size_t pagesz,size_t n)336 static size_t mal0_clear(char *p, size_t pagesz, size_t n)
337 {
338 #ifdef __GNUC__
339 typedef uint64_t __attribute__((__may_alias__)) T;
340 #else
341 typedef unsigned char T;
342 #endif
343 char *pp = p + n;
344 size_t i = (uintptr_t)pp & (pagesz - 1);
345 for (;;) {
346 pp = memset(pp - i, 0, i);
347 if (pp - p < pagesz) return pp - p;
348 for (i = pagesz; i; i -= 2*sizeof(T), pp -= 2*sizeof(T))
349 if (((T *)pp)[-1] | ((T *)pp)[-2])
350 break;
351 }
352 }
353
calloc(size_t m,size_t n)354 void *calloc(size_t m, size_t n)
355 {
356 if (n && m > (size_t)-1/n) {
357 errno = ENOMEM;
358 return 0;
359 }
360 n *= m;
361 void *p = malloc(n);
362 if (!p) return p;
363 if (!__malloc_replaced) {
364 if (IS_MMAPPED(MEM_TO_CHUNK(p)))
365 return p;
366 if (n >= PAGE_SIZE)
367 n = mal0_clear(p, PAGE_SIZE, n);
368 }
369 return memset(p, 0, n);
370 }
371
realloc(void * p,size_t n)372 void *realloc(void *p, size_t n)
373 {
374 struct chunk *self, *next;
375 size_t n0, n1;
376 void *new;
377
378 if (!p) return malloc(n);
379
380 if (adjust_size(&n) < 0) return 0;
381
382 self = MEM_TO_CHUNK(p);
383 n1 = n0 = CHUNK_SIZE(self);
384
385 if (IS_MMAPPED(self)) {
386 size_t extra = self->psize;
387 char *base = (char *)self - extra;
388 size_t oldlen = n0 + extra;
389 size_t newlen = n + extra;
390 /* Crash on realloc of freed chunk */
391 if (extra & 1) a_crash();
392 if (newlen < PAGE_SIZE && (new = malloc(n-OVERHEAD))) {
393 n0 = n;
394 goto copy_free_ret;
395 }
396 newlen = (newlen + PAGE_SIZE-1) & -PAGE_SIZE;
397 if (oldlen == newlen) return p;
398 base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE);
399 if (base == (void *)-1)
400 goto copy_realloc;
401 self = (void *)(base + extra);
402 self->csize = newlen - extra;
403 return CHUNK_TO_MEM(self);
404 }
405
406 next = NEXT_CHUNK(self);
407
408 /* Crash on corrupted footer (likely from buffer overflow) */
409 if (next->psize != self->csize) a_crash();
410
411 /* Merge adjacent chunks if we need more space. This is not
412 * a waste of time even if we fail to get enough space, because our
413 * subsequent call to free would otherwise have to do the merge. */
414 if (n > n1 && alloc_fwd(next)) {
415 n1 += CHUNK_SIZE(next);
416 next = NEXT_CHUNK(next);
417 }
418 /* FIXME: find what's wrong here and reenable it..? */
419 if (0 && n > n1 && alloc_rev(self)) {
420 self = PREV_CHUNK(self);
421 n1 += CHUNK_SIZE(self);
422 }
423 self->csize = n1 | C_INUSE;
424 next->psize = n1 | C_INUSE;
425
426 /* If we got enough space, split off the excess and return */
427 if (n <= n1) {
428 //memmove(CHUNK_TO_MEM(self), p, n0-OVERHEAD);
429 trim(self, n);
430 return CHUNK_TO_MEM(self);
431 }
432
433 copy_realloc:
434 /* As a last resort, allocate a new chunk and copy to it. */
435 new = malloc(n-OVERHEAD);
436 if (!new) return 0;
437 copy_free_ret:
438 memcpy(new, p, n0-OVERHEAD);
439 free(CHUNK_TO_MEM(self));
440 return new;
441 }
442
__bin_chunk(struct chunk * self)443 void __bin_chunk(struct chunk *self)
444 {
445 struct chunk *next = NEXT_CHUNK(self);
446 size_t final_size, new_size, size;
447 int reclaim=0;
448 int i;
449
450 final_size = new_size = CHUNK_SIZE(self);
451
452 /* Crash on corrupted footer (likely from buffer overflow) */
453 if (next->psize != self->csize) a_crash();
454
455 for (;;) {
456 if (self->psize & next->csize & C_INUSE) {
457 self->csize = final_size | C_INUSE;
458 next->psize = final_size | C_INUSE;
459 i = bin_index(final_size);
460 lock_bin(i);
461 lock(mal.free_lock);
462 if (self->psize & next->csize & C_INUSE)
463 break;
464 unlock(mal.free_lock);
465 unlock_bin(i);
466 }
467
468 if (alloc_rev(self)) {
469 self = PREV_CHUNK(self);
470 size = CHUNK_SIZE(self);
471 final_size += size;
472 if (new_size+size > RECLAIM && (new_size+size^size) > size)
473 reclaim = 1;
474 }
475
476 if (alloc_fwd(next)) {
477 size = CHUNK_SIZE(next);
478 final_size += size;
479 if (new_size+size > RECLAIM && (new_size+size^size) > size)
480 reclaim = 1;
481 next = NEXT_CHUNK(next);
482 }
483 }
484
485 if (!(mal.binmap & 1ULL<<i))
486 a_or_64(&mal.binmap, 1ULL<<i);
487
488 self->csize = final_size;
489 next->psize = final_size;
490 unlock(mal.free_lock);
491
492 self->next = BIN_TO_CHUNK(i);
493 self->prev = mal.bins[i].tail;
494 self->next->prev = self;
495 self->prev->next = self;
496
497 /* Replace middle of large chunks with fresh zero pages */
498 if (reclaim) {
499 uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE;
500 uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE;
501 #if 1
502 __madvise((void *)a, b-a, MADV_DONTNEED);
503 #else
504 __mmap((void *)a, b-a, PROT_READ|PROT_WRITE,
505 MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
506 #endif
507 }
508
509 unlock_bin(i);
510 }
511
unmap_chunk(struct chunk * self)512 static void unmap_chunk(struct chunk *self)
513 {
514 size_t extra = self->psize;
515 char *base = (char *)self - extra;
516 size_t len = CHUNK_SIZE(self) + extra;
517 /* Crash on double free */
518 if (extra & 1) a_crash();
519 __munmap(base, len);
520 }
521
free(void * p)522 void free(void *p)
523 {
524 if (!p) return;
525
526 struct chunk *self = MEM_TO_CHUNK(p);
527
528 if (IS_MMAPPED(self))
529 unmap_chunk(self);
530 else
531 __bin_chunk(self);
532 }
533
__malloc_donate(char * start,char * end)534 void __malloc_donate(char *start, char *end)
535 {
536 size_t align_start_up = (SIZE_ALIGN-1) & (-(uintptr_t)start - OVERHEAD);
537 size_t align_end_down = (SIZE_ALIGN-1) & (uintptr_t)end;
538
539 /* Getting past this condition ensures that the padding for alignment
540 * and header overhead will not overflow and will leave a nonzero
541 * multiple of SIZE_ALIGN bytes between start and end. */
542 if (end - start <= OVERHEAD + align_start_up + align_end_down)
543 return;
544 start += align_start_up + OVERHEAD;
545 end -= align_end_down;
546
547 struct chunk *c = MEM_TO_CHUNK(start), *n = MEM_TO_CHUNK(end);
548 c->psize = n->csize = C_INUSE;
549 c->csize = n->psize = C_INUSE | (end-start);
550 __bin_chunk(c);
551 }
552