1--- /home/chris/oh_patchlibc_0803/openharmony/third_party/musl/src/malloc/oldmalloc/malloc.c 2023-08-07 15:19:23.270403360 +0800 2+++ topatch/src/malloc/oldmalloc/malloc.c 2023-08-07 16:24:49.177114612 +0800 3@@ -1,3 +1,15 @@ 4+/* 5+ * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU) 6+ * Licensed under the Mulan PSL v2. 7+ * You can use this software according to the terms and conditions of the Mulan PSL v2. 8+ * You may obtain a copy of Mulan PSL v2 at: 9+ * http://license.coscl.org.cn/MulanPSL2 10+ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR 11+ * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR 12+ * PURPOSE. 13+ * See the Mulan PSL v2 for more details. 14+ */ 15+ 16 #define _GNU_SOURCE 17 #include <stdlib.h> 18 #include <string.h> 19@@ -7,105 +19,118 @@ 20 #include <sys/mman.h> 21 #include "libc.h" 22 #include "atomic.h" 23-#include "pthread_impl.h" 24+//#include "pthread_impl.h" 25+#include "syscall.h" 26 #include "malloc_impl.h" 27 #include "fork_impl.h" 28 29-#define malloc __libc_malloc_impl 30+#define malloc __libc_malloc_impl 31 #define realloc __libc_realloc 32-#define free __libc_free 33+#define free __libc_free 34 35 #if defined(__GNUC__) && defined(__PIC__) 36 #define inline inline __attribute__((always_inline)) 37 #endif 38 39 static struct { 40- volatile uint64_t binmap; 41- struct bin bins[64]; 42- volatile int split_merge_lock[2]; 43+ volatile uint64_t binmap; 44+ struct bin bins[64]; 45+ volatile int split_merge_lock[2]; 46 } mal; 47 48 /* Synchronization tools */ 49 50+#include <debug_lock.h> 51+ 52 static inline void lock(volatile int *lk) 53 { 54+ chcore_spin_lock(lk); 55+#if 0 56 int need_locks = libc.need_locks; 57 if (need_locks) { 58 while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1); 59 if (need_locks < 0) libc.need_locks = 0; 60 } 61+#endif 62 } 63 64 static inline void unlock(volatile int *lk) 65 { 66+ chcore_spin_unlock(lk); 67+#if 0 68 if (lk[0]) { 69 a_store(lk, 0); 70 if (lk[1]) __wake(lk, 1, 1); 71 } 72+#endif 73 } 74 75 static inline void lock_bin(int i) 76 { 77- lock(mal.bins[i].lock); 78- if (!mal.bins[i].head) 79- mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i); 80+ lock(mal.bins[i].lock); 81+ if (!mal.bins[i].head) 82+ mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i); 83 } 84 85 static inline void unlock_bin(int i) 86 { 87- unlock(mal.bins[i].lock); 88+ unlock(mal.bins[i].lock); 89 } 90 91 static int first_set(uint64_t x) 92 { 93 #if 1 94- return a_ctz_64(x); 95+ return a_ctz_64(x); 96 #else 97- static const char debruijn64[64] = { 98- 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, 99- 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, 100- 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, 101- 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12 102- }; 103- static const char debruijn32[32] = { 104- 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13, 105- 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14 106- }; 107- if (sizeof(long) < 8) { 108- uint32_t y = x; 109- if (!y) { 110- y = x>>32; 111- return 32 + debruijn32[(y&-y)*0x076be629 >> 27]; 112- } 113- return debruijn32[(y&-y)*0x076be629 >> 27]; 114- } 115- return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58]; 116+ static const char debruijn64[64] = { 117+ 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, 118+ 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, 119+ 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, 120+ 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12}; 121+ static const char debruijn32[32] = {0, 1, 23, 2, 29, 24, 19, 3, 122+ 30, 27, 25, 11, 20, 8, 4, 13, 123+ 31, 22, 28, 18, 26, 10, 7, 12, 124+ 21, 17, 9, 6, 16, 5, 15, 14}; 125+ if (sizeof(long) < 8) { 126+ uint32_t y = x; 127+ if (!y) { 128+ y = x >> 32; 129+ return 32 + debruijn32[(y & -y) * 0x076be629 >> 27]; 130+ } 131+ return debruijn32[(y & -y) * 0x076be629 >> 27]; 132+ } 133+ return debruijn64[(x & -x) * 0x022fdd63cc95386dull >> 58]; 134 #endif 135 } 136 137 static const unsigned char bin_tab[60] = { 138- 32,33,34,35,36,36,37,37,38,38,39,39, 139- 40,40,40,40,41,41,41,41,42,42,42,42,43,43,43,43, 140- 44,44,44,44,44,44,44,44,45,45,45,45,45,45,45,45, 141- 46,46,46,46,46,46,46,46,47,47,47,47,47,47,47,47, 142+ 32, 33, 34, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 40, 143+ 40, 41, 41, 41, 41, 42, 42, 42, 42, 43, 43, 43, 43, 44, 44, 144+ 44, 44, 44, 44, 44, 44, 45, 45, 45, 45, 45, 45, 45, 45, 46, 145+ 46, 46, 46, 46, 46, 46, 46, 47, 47, 47, 47, 47, 47, 47, 47, 146 }; 147 148 static int bin_index(size_t x) 149 { 150- x = x / SIZE_ALIGN - 1; 151- if (x <= 32) return x; 152- if (x < 512) return bin_tab[x/8-4]; 153- if (x > 0x1c00) return 63; 154- return bin_tab[x/128-4] + 16; 155+ x = x / SIZE_ALIGN - 1; 156+ if (x <= 32) 157+ return x; 158+ if (x < 512) 159+ return bin_tab[x / 8 - 4]; 160+ if (x > 0x1c00) 161+ return 63; 162+ return bin_tab[x / 128 - 4] + 16; 163 } 164 165 static int bin_index_up(size_t x) 166 { 167- x = x / SIZE_ALIGN - 1; 168- if (x <= 32) return x; 169- x--; 170- if (x < 512) return bin_tab[x/8-4] + 1; 171- return bin_tab[x/128-4] + 17; 172+ x = x / SIZE_ALIGN - 1; 173+ if (x <= 32) 174+ return x; 175+ x--; 176+ if (x < 512) 177+ return bin_tab[x / 8 - 4] + 1; 178+ return bin_tab[x / 128 - 4] + 17; 179 } 180 181 #if 0 182@@ -137,18 +162,20 @@ 183 184 static int traverses_stack_p(uintptr_t old, uintptr_t new) 185 { 186- const uintptr_t len = 8<<20; 187- uintptr_t a, b; 188+ const uintptr_t len = 8 << 20; 189+ uintptr_t a, b; 190 191- b = (uintptr_t)libc.auxv; 192- a = b > len ? b-len : 0; 193- if (new>a && old<b) return 1; 194- 195- b = (uintptr_t)&b; 196- a = b > len ? b-len : 0; 197- if (new>a && old<b) return 1; 198+ b = (uintptr_t)libc.auxv; 199+ a = b > len ? b - len : 0; 200+ if (new > a && old < b) 201+ return 1; 202+ 203+ b = (uintptr_t)&b; 204+ a = b > len ? b - len : 0; 205+ if (new > a && old < b) 206+ return 1; 207 208- return 0; 209+ return 0; 210 } 211 212 /* Expand the heap in-place if brk can be used, or otherwise via mmap, 213@@ -161,396 +188,416 @@ 214 215 static void *__expand_heap(size_t *pn) 216 { 217- static uintptr_t brk; 218- static unsigned mmap_step; 219- size_t n = *pn; 220- 221- if (n > SIZE_MAX/2 - PAGE_SIZE) { 222- errno = ENOMEM; 223- return 0; 224- } 225- n += -n & PAGE_SIZE-1; 226- 227- if (!brk) { 228- brk = __syscall(SYS_brk, 0); 229- brk += -brk & PAGE_SIZE-1; 230- } 231- 232- if (n < SIZE_MAX-brk && !traverses_stack_p(brk, brk+n) 233- && __syscall(SYS_brk, brk+n)==brk+n) { 234- *pn = n; 235- brk += n; 236- return (void *)(brk-n); 237- } 238- 239- size_t min = (size_t)PAGE_SIZE << mmap_step/2; 240- if (n < min) n = min; 241- void *area = __mmap(0, n, PROT_READ|PROT_WRITE, 242- MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 243- if (area == MAP_FAILED) return 0; 244- *pn = n; 245- mmap_step++; 246- return area; 247+ static uintptr_t brk; 248+ static unsigned mmap_step; 249+ size_t n = *pn; 250+ 251+ if (n > SIZE_MAX / 2 - PAGE_SIZE) { 252+ errno = ENOMEM; 253+ return 0; 254+ } 255+ n += -n & PAGE_SIZE - 1; 256+ 257+ if (!brk) { 258+ brk = __syscall(SYS_brk, 0); 259+ brk += -brk & PAGE_SIZE - 1; 260+ } 261+ 262+ if (n < SIZE_MAX - brk && !traverses_stack_p(brk, brk + n) 263+ && __syscall(SYS_brk, brk + n) == brk + n) { 264+ *pn = n; 265+ brk += n; 266+ return (void *)(brk - n); 267+ } 268+ 269+ size_t min = (size_t)PAGE_SIZE << mmap_step / 2; 270+ if (n < min) 271+ n = min; 272+ void *area = __mmap( 273+ 0, n, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 274+ if (area == MAP_FAILED) 275+ return 0; 276+ *pn = n; 277+ mmap_step++; 278+ return area; 279 } 280 281 static struct chunk *expand_heap(size_t n) 282 { 283- static void *end; 284- void *p; 285- struct chunk *w; 286- 287- /* The argument n already accounts for the caller's chunk 288- * overhead needs, but if the heap can't be extended in-place, 289- * we need room for an extra zero-sized sentinel chunk. */ 290- n += SIZE_ALIGN; 291- 292- p = __expand_heap(&n); 293- if (!p) return 0; 294- 295- /* If not just expanding existing space, we need to make a 296- * new sentinel chunk below the allocated space. */ 297- if (p != end) { 298- /* Valid/safe because of the prologue increment. */ 299- n -= SIZE_ALIGN; 300- p = (char *)p + SIZE_ALIGN; 301- w = MEM_TO_CHUNK(p); 302- w->psize = 0 | C_INUSE; 303- } 304+ static void *end; 305+ void *p; 306+ struct chunk *w; 307+ 308+ /* The argument n already accounts for the caller's chunk 309+ * overhead needs, but if the heap can't be extended in-place, 310+ * we need room for an extra zero-sized sentinel chunk. */ 311+ n += SIZE_ALIGN; 312+ 313+ p = __expand_heap(&n); 314+ if (!p) 315+ return 0; 316+ 317+ /* If not just expanding existing space, we need to make a 318+ * new sentinel chunk below the allocated space. */ 319+ if (p != end) { 320+ /* Valid/safe because of the prologue increment. */ 321+ n -= SIZE_ALIGN; 322+ p = (char *)p + SIZE_ALIGN; 323+ w = MEM_TO_CHUNK(p); 324+ w->psize = 0 | C_INUSE; 325+ } 326+ 327+ /* Record new heap end and fill in footer. */ 328+ end = (char *)p + n; 329+ w = MEM_TO_CHUNK(end); 330+ w->psize = n | C_INUSE; 331+ w->csize = 0 | C_INUSE; 332+ 333+ /* Fill in header, which may be new or may be replacing a 334+ * zero-size sentinel header at the old end-of-heap. */ 335+ w = MEM_TO_CHUNK(p); 336+ w->csize = n | C_INUSE; 337 338- /* Record new heap end and fill in footer. */ 339- end = (char *)p + n; 340- w = MEM_TO_CHUNK(end); 341- w->psize = n | C_INUSE; 342- w->csize = 0 | C_INUSE; 343- 344- /* Fill in header, which may be new or may be replacing a 345- * zero-size sentinel header at the old end-of-heap. */ 346- w = MEM_TO_CHUNK(p); 347- w->csize = n | C_INUSE; 348- 349- return w; 350+ return w; 351 } 352 353 static int adjust_size(size_t *n) 354 { 355- /* Result of pointer difference must fit in ptrdiff_t. */ 356- if (*n-1 > PTRDIFF_MAX - SIZE_ALIGN - PAGE_SIZE) { 357- if (*n) { 358- errno = ENOMEM; 359- return -1; 360- } else { 361- *n = SIZE_ALIGN; 362- return 0; 363- } 364- } 365- *n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK; 366- return 0; 367+ /* Result of pointer difference must fit in ptrdiff_t. */ 368+ if (*n - 1 > PTRDIFF_MAX - SIZE_ALIGN - PAGE_SIZE) { 369+ if (*n) { 370+ errno = ENOMEM; 371+ return -1; 372+ } else { 373+ *n = SIZE_ALIGN; 374+ return 0; 375+ } 376+ } 377+ *n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK; 378+ return 0; 379 } 380 381 static void unbin(struct chunk *c, int i) 382 { 383- if (c->prev == c->next) 384- a_and_64(&mal.binmap, ~(1ULL<<i)); 385- c->prev->next = c->next; 386- c->next->prev = c->prev; 387- c->csize |= C_INUSE; 388- NEXT_CHUNK(c)->psize |= C_INUSE; 389+ if (c->prev == c->next) 390+ a_and_64(&mal.binmap, ~(1ULL << i)); 391+ c->prev->next = c->next; 392+ c->next->prev = c->prev; 393+ c->csize |= C_INUSE; 394+ NEXT_CHUNK(c)->psize |= C_INUSE; 395 } 396 397 static void bin_chunk(struct chunk *self, int i) 398 { 399- self->next = BIN_TO_CHUNK(i); 400- self->prev = mal.bins[i].tail; 401- self->next->prev = self; 402- self->prev->next = self; 403- if (self->prev == BIN_TO_CHUNK(i)) 404- a_or_64(&mal.binmap, 1ULL<<i); 405+ self->next = BIN_TO_CHUNK(i); 406+ self->prev = mal.bins[i].tail; 407+ self->next->prev = self; 408+ self->prev->next = self; 409+ if (self->prev == BIN_TO_CHUNK(i)) 410+ a_or_64(&mal.binmap, 1ULL << i); 411 } 412 413 static void trim(struct chunk *self, size_t n) 414 { 415- size_t n1 = CHUNK_SIZE(self); 416- struct chunk *next, *split; 417+ size_t n1 = CHUNK_SIZE(self); 418+ struct chunk *next, *split; 419 420- if (n >= n1 - DONTCARE) return; 421+ if (n >= n1 - DONTCARE) 422+ return; 423 424- next = NEXT_CHUNK(self); 425- split = (void *)((char *)self + n); 426+ next = NEXT_CHUNK(self); 427+ split = (void *)((char *)self + n); 428 429- split->psize = n | C_INUSE; 430- split->csize = n1-n; 431- next->psize = n1-n; 432- self->csize = n | C_INUSE; 433+ split->psize = n | C_INUSE; 434+ split->csize = n1 - n; 435+ next->psize = n1 - n; 436+ self->csize = n | C_INUSE; 437 438- int i = bin_index(n1-n); 439- lock_bin(i); 440+ int i = bin_index(n1 - n); 441+ lock_bin(i); 442 443- bin_chunk(split, i); 444+ bin_chunk(split, i); 445 446- unlock_bin(i); 447+ unlock_bin(i); 448 } 449 450 void *malloc(size_t n) 451 { 452- struct chunk *c; 453- int i, j; 454- uint64_t mask; 455- 456- if (adjust_size(&n) < 0) return 0; 457- 458- if (n > MMAP_THRESHOLD) { 459- size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE; 460- char *base = __mmap(0, len, PROT_READ|PROT_WRITE, 461- MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 462- if (base == (void *)-1) return 0; 463- c = (void *)(base + SIZE_ALIGN - OVERHEAD); 464- c->csize = len - (SIZE_ALIGN - OVERHEAD); 465- c->psize = SIZE_ALIGN - OVERHEAD; 466- return CHUNK_TO_MEM(c); 467- } 468- 469- i = bin_index_up(n); 470- if (i<63 && (mal.binmap & (1ULL<<i))) { 471- lock_bin(i); 472- c = mal.bins[i].head; 473- if (c != BIN_TO_CHUNK(i) && CHUNK_SIZE(c)-n <= DONTCARE) { 474- unbin(c, i); 475- unlock_bin(i); 476- return CHUNK_TO_MEM(c); 477- } 478- unlock_bin(i); 479- } 480- lock(mal.split_merge_lock); 481- for (mask = mal.binmap & -(1ULL<<i); mask; mask -= (mask&-mask)) { 482- j = first_set(mask); 483- lock_bin(j); 484- c = mal.bins[j].head; 485- if (c != BIN_TO_CHUNK(j)) { 486- unbin(c, j); 487- unlock_bin(j); 488- break; 489- } 490- unlock_bin(j); 491- } 492- if (!mask) { 493- c = expand_heap(n); 494- if (!c) { 495- unlock(mal.split_merge_lock); 496- return 0; 497- } 498- } 499- trim(c, n); 500- unlock(mal.split_merge_lock); 501- return CHUNK_TO_MEM(c); 502+ struct chunk *c; 503+ int i, j; 504+ uint64_t mask; 505+ 506+ if (adjust_size(&n) < 0) 507+ return 0; 508+ 509+ if (n > MMAP_THRESHOLD) { 510+ size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE; 511+ char *base = __mmap( 512+ 0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 513+ if (base == (void *)-1) 514+ return 0; 515+ c = (void *)(base + SIZE_ALIGN - OVERHEAD); 516+ c->csize = len - (SIZE_ALIGN - OVERHEAD); 517+ c->psize = SIZE_ALIGN - OVERHEAD; 518+ return CHUNK_TO_MEM(c); 519+ } 520+ 521+ i = bin_index_up(n); 522+ if (i < 63 && (mal.binmap & (1ULL << i))) { 523+ lock_bin(i); 524+ c = mal.bins[i].head; 525+ if (c != BIN_TO_CHUNK(i) && CHUNK_SIZE(c) - n <= DONTCARE) { 526+ unbin(c, i); 527+ unlock_bin(i); 528+ return CHUNK_TO_MEM(c); 529+ } 530+ unlock_bin(i); 531+ } 532+ lock(mal.split_merge_lock); 533+ for (mask = mal.binmap & -(1ULL << i); mask; mask -= (mask & -mask)) { 534+ j = first_set(mask); 535+ lock_bin(j); 536+ c = mal.bins[j].head; 537+ if (c != BIN_TO_CHUNK(j)) { 538+ unbin(c, j); 539+ unlock_bin(j); 540+ break; 541+ } 542+ unlock_bin(j); 543+ } 544+ if (!mask) { 545+ c = expand_heap(n); 546+ if (!c) { 547+ unlock(mal.split_merge_lock); 548+ return 0; 549+ } 550+ } 551+ trim(c, n); 552+ unlock(mal.split_merge_lock); 553+ return CHUNK_TO_MEM(c); 554 } 555 556 int __malloc_allzerop(void *p) 557 { 558- return IS_MMAPPED(MEM_TO_CHUNK(p)); 559+ return IS_MMAPPED(MEM_TO_CHUNK(p)); 560 } 561 562 void *realloc(void *p, size_t n) 563 { 564- struct chunk *self, *next; 565- size_t n0, n1; 566- void *new; 567- 568- if (!p) return malloc(n); 569- 570- if (adjust_size(&n) < 0) return 0; 571- 572- self = MEM_TO_CHUNK(p); 573- n1 = n0 = CHUNK_SIZE(self); 574- 575- if (n<=n0 && n0-n<=DONTCARE) return p; 576- 577- if (IS_MMAPPED(self)) { 578- size_t extra = self->psize; 579- char *base = (char *)self - extra; 580- size_t oldlen = n0 + extra; 581- size_t newlen = n + extra; 582- /* Crash on realloc of freed chunk */ 583- if (extra & 1) a_crash(); 584- if (newlen < PAGE_SIZE && (new = malloc(n-OVERHEAD))) { 585- n0 = n; 586- goto copy_free_ret; 587- } 588- newlen = (newlen + PAGE_SIZE-1) & -PAGE_SIZE; 589- if (oldlen == newlen) return p; 590- base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE); 591- if (base == (void *)-1) 592- goto copy_realloc; 593- self = (void *)(base + extra); 594- self->csize = newlen - extra; 595- return CHUNK_TO_MEM(self); 596- } 597- 598- next = NEXT_CHUNK(self); 599- 600- /* Crash on corrupted footer (likely from buffer overflow) */ 601- if (next->psize != self->csize) a_crash(); 602- 603- if (n < n0) { 604- int i = bin_index_up(n); 605- int j = bin_index(n0); 606- if (i<j && (mal.binmap & (1ULL << i))) 607- goto copy_realloc; 608- struct chunk *split = (void *)((char *)self + n); 609- self->csize = split->psize = n | C_INUSE; 610- split->csize = next->psize = n0-n | C_INUSE; 611- __bin_chunk(split); 612- return CHUNK_TO_MEM(self); 613- } 614- 615- lock(mal.split_merge_lock); 616- 617- size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next); 618- if (n0+nsize >= n) { 619- int i = bin_index(nsize); 620- lock_bin(i); 621- if (!(next->csize & C_INUSE)) { 622- unbin(next, i); 623- unlock_bin(i); 624- next = NEXT_CHUNK(next); 625- self->csize = next->psize = n0+nsize | C_INUSE; 626- trim(self, n); 627- unlock(mal.split_merge_lock); 628- return CHUNK_TO_MEM(self); 629- } 630- unlock_bin(i); 631- } 632- unlock(mal.split_merge_lock); 633+ struct chunk *self, *next; 634+ size_t n0, n1; 635+ void *new; 636+ 637+ if (!p) 638+ return malloc(n); 639+ 640+ if (adjust_size(&n) < 0) 641+ return 0; 642+ 643+ self = MEM_TO_CHUNK(p); 644+ n1 = n0 = CHUNK_SIZE(self); 645+ 646+ if (n <= n0 && n0 - n <= DONTCARE) 647+ return p; 648+ 649+ if (IS_MMAPPED(self)) { 650+ size_t extra = self->psize; 651+ char *base = (char *)self - extra; 652+ size_t oldlen = n0 + extra; 653+ size_t newlen = n + extra; 654+ /* Crash on realloc of freed chunk */ 655+ if (extra & 1) 656+ a_crash(); 657+ if (newlen < PAGE_SIZE && (new = malloc(n - OVERHEAD))) { 658+ n0 = n; 659+ goto copy_free_ret; 660+ } 661+ newlen = (newlen + PAGE_SIZE - 1) & -PAGE_SIZE; 662+ if (oldlen == newlen) 663+ return p; 664+ base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE); 665+ if (base == (void *)-1) 666+ goto copy_realloc; 667+ self = (void *)(base + extra); 668+ self->csize = newlen - extra; 669+ return CHUNK_TO_MEM(self); 670+ } 671+ 672+ next = NEXT_CHUNK(self); 673+ 674+ /* Crash on corrupted footer (likely from buffer overflow) */ 675+ if (next->psize != self->csize) 676+ a_crash(); 677+ 678+ if (n < n0) { 679+ int i = bin_index_up(n); 680+ int j = bin_index(n0); 681+ if (i < j && (mal.binmap & (1ULL << i))) 682+ goto copy_realloc; 683+ struct chunk *split = (void *)((char *)self + n); 684+ self->csize = split->psize = n | C_INUSE; 685+ split->csize = next->psize = n0 - n | C_INUSE; 686+ __bin_chunk(split); 687+ return CHUNK_TO_MEM(self); 688+ } 689+ 690+ lock(mal.split_merge_lock); 691+ 692+ size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next); 693+ if (n0 + nsize >= n) { 694+ int i = bin_index(nsize); 695+ lock_bin(i); 696+ if (!(next->csize & C_INUSE)) { 697+ unbin(next, i); 698+ unlock_bin(i); 699+ next = NEXT_CHUNK(next); 700+ self->csize = next->psize = n0 + nsize | C_INUSE; 701+ trim(self, n); 702+ unlock(mal.split_merge_lock); 703+ return CHUNK_TO_MEM(self); 704+ } 705+ unlock_bin(i); 706+ } 707+ unlock(mal.split_merge_lock); 708 709 copy_realloc: 710- /* As a last resort, allocate a new chunk and copy to it. */ 711- new = malloc(n-OVERHEAD); 712- if (!new) return 0; 713+ /* As a last resort, allocate a new chunk and copy to it. */ 714+ new = malloc(n - OVERHEAD); 715+ if (!new) 716+ return 0; 717 copy_free_ret: 718- memcpy(new, p, (n<n0 ? n : n0) - OVERHEAD); 719- free(CHUNK_TO_MEM(self)); 720- return new; 721+ memcpy(new, p, (n < n0 ? n : n0) - OVERHEAD); 722+ free(CHUNK_TO_MEM(self)); 723+ return new; 724 } 725 726 void __bin_chunk(struct chunk *self) 727 { 728- struct chunk *next = NEXT_CHUNK(self); 729- 730- /* Crash on corrupted footer (likely from buffer overflow) */ 731- if (next->psize != self->csize) a_crash(); 732- 733- lock(mal.split_merge_lock); 734- 735- size_t osize = CHUNK_SIZE(self), size = osize; 736- 737- /* Since we hold split_merge_lock, only transition from free to 738- * in-use can race; in-use to free is impossible */ 739- size_t psize = self->psize & C_INUSE ? 0 : CHUNK_PSIZE(self); 740- size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next); 741- 742- if (psize) { 743- int i = bin_index(psize); 744- lock_bin(i); 745- if (!(self->psize & C_INUSE)) { 746- struct chunk *prev = PREV_CHUNK(self); 747- unbin(prev, i); 748- self = prev; 749- size += psize; 750- } 751- unlock_bin(i); 752- } 753- if (nsize) { 754- int i = bin_index(nsize); 755- lock_bin(i); 756- if (!(next->csize & C_INUSE)) { 757- unbin(next, i); 758- next = NEXT_CHUNK(next); 759- size += nsize; 760- } 761- unlock_bin(i); 762- } 763- 764- int i = bin_index(size); 765- lock_bin(i); 766+ struct chunk *next = NEXT_CHUNK(self); 767 768- self->csize = size; 769- next->psize = size; 770- bin_chunk(self, i); 771- unlock(mal.split_merge_lock); 772- 773- /* Replace middle of large chunks with fresh zero pages */ 774- if (size > RECLAIM && (size^(size-osize)) > size-osize) { 775- uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE; 776- uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE; 777- int e = errno; 778+ /* Crash on corrupted footer (likely from buffer overflow) */ 779+ if (next->psize != self->csize) 780+ a_crash(); 781+ 782+ lock(mal.split_merge_lock); 783+ 784+ size_t osize = CHUNK_SIZE(self), size = osize; 785+ 786+ /* Since we hold split_merge_lock, only transition from free to 787+ * in-use can race; in-use to free is impossible */ 788+ size_t psize = self->psize & C_INUSE ? 0 : CHUNK_PSIZE(self); 789+ size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next); 790+ 791+ if (psize) { 792+ int i = bin_index(psize); 793+ lock_bin(i); 794+ if (!(self->psize & C_INUSE)) { 795+ struct chunk *prev = PREV_CHUNK(self); 796+ unbin(prev, i); 797+ self = prev; 798+ size += psize; 799+ } 800+ unlock_bin(i); 801+ } 802+ if (nsize) { 803+ int i = bin_index(nsize); 804+ lock_bin(i); 805+ if (!(next->csize & C_INUSE)) { 806+ unbin(next, i); 807+ next = NEXT_CHUNK(next); 808+ size += nsize; 809+ } 810+ unlock_bin(i); 811+ } 812+ 813+ int i = bin_index(size); 814+ lock_bin(i); 815+ 816+ self->csize = size; 817+ next->psize = size; 818+ bin_chunk(self, i); 819+ unlock(mal.split_merge_lock); 820+ 821+ /* Replace middle of large chunks with fresh zero pages */ 822+ if (size > RECLAIM && (size ^ (size - osize)) > size - osize) { 823+ uintptr_t a = (uintptr_t)self + SIZE_ALIGN + PAGE_SIZE - 1 & -PAGE_SIZE; 824+ uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE; 825+ int e = errno; 826 #if 1 827- __madvise((void *)a, b-a, MADV_DONTNEED); 828+ __madvise((void *)a, b - a, MADV_DONTNEED); 829 #else 830- __mmap((void *)a, b-a, PROT_READ|PROT_WRITE, 831- MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0); 832+ __mmap((void *)a, 833+ b - a, 834+ PROT_READ | PROT_WRITE, 835+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, 836+ -1, 837+ 0); 838 #endif 839- errno = e; 840- } 841+ errno = e; 842+ } 843 844- unlock_bin(i); 845+ unlock_bin(i); 846 } 847 848 static void unmap_chunk(struct chunk *self) 849 { 850- size_t extra = self->psize; 851- char *base = (char *)self - extra; 852- size_t len = CHUNK_SIZE(self) + extra; 853- /* Crash on double free */ 854- if (extra & 1) a_crash(); 855- int e = errno; 856- __munmap(base, len); 857- errno = e; 858+ size_t extra = self->psize; 859+ char *base = (char *)self - extra; 860+ size_t len = CHUNK_SIZE(self) + extra; 861+ /* Crash on double free */ 862+ if (extra & 1) 863+ a_crash(); 864+ int e = errno; 865+ __munmap(base, len); 866+ errno = e; 867 } 868 869 void free(void *p) 870 { 871- if (!p) return; 872+ if (!p) 873+ return; 874 875- struct chunk *self = MEM_TO_CHUNK(p); 876+ struct chunk *self = MEM_TO_CHUNK(p); 877 878- if (IS_MMAPPED(self)) 879- unmap_chunk(self); 880- else 881- __bin_chunk(self); 882+ if (IS_MMAPPED(self)) 883+ unmap_chunk(self); 884+ else 885+ __bin_chunk(self); 886 } 887 888 void __malloc_donate(char *start, char *end) 889 { 890- size_t align_start_up = (SIZE_ALIGN-1) & (-(uintptr_t)start - OVERHEAD); 891- size_t align_end_down = (SIZE_ALIGN-1) & (uintptr_t)end; 892+ size_t align_start_up = (SIZE_ALIGN - 1) & (-(uintptr_t)start - OVERHEAD); 893+ size_t align_end_down = (SIZE_ALIGN - 1) & (uintptr_t)end; 894 895- /* Getting past this condition ensures that the padding for alignment 896- * and header overhead will not overflow and will leave a nonzero 897- * multiple of SIZE_ALIGN bytes between start and end. */ 898- if (end - start <= OVERHEAD + align_start_up + align_end_down) 899- return; 900- start += align_start_up + OVERHEAD; 901- end -= align_end_down; 902- 903- struct chunk *c = MEM_TO_CHUNK(start), *n = MEM_TO_CHUNK(end); 904- c->psize = n->csize = C_INUSE; 905- c->csize = n->psize = C_INUSE | (end-start); 906- __bin_chunk(c); 907+ /* Getting past this condition ensures that the padding for alignment 908+ * and header overhead will not overflow and will leave a nonzero 909+ * multiple of SIZE_ALIGN bytes between start and end. */ 910+ if (end - start <= OVERHEAD + align_start_up + align_end_down) 911+ return; 912+ start += align_start_up + OVERHEAD; 913+ end -= align_end_down; 914+ 915+ struct chunk *c = MEM_TO_CHUNK(start), *n = MEM_TO_CHUNK(end); 916+ c->psize = n->csize = C_INUSE; 917+ c->csize = n->psize = C_INUSE | (end - start); 918+ __bin_chunk(c); 919 } 920 921 void __malloc_atfork(int who) 922 { 923- if (who<0) { 924- lock(mal.split_merge_lock); 925- for (int i=0; i<64; i++) 926- lock(mal.bins[i].lock); 927- } else if (!who) { 928- for (int i=0; i<64; i++) 929- unlock(mal.bins[i].lock); 930- unlock(mal.split_merge_lock); 931- } else { 932- for (int i=0; i<64; i++) 933- mal.bins[i].lock[0] = mal.bins[i].lock[1] = 0; 934- mal.split_merge_lock[1] = 0; 935- mal.split_merge_lock[0] = 0; 936- } 937+ if (who < 0) { 938+ lock(mal.split_merge_lock); 939+ for (int i = 0; i < 64; i++) 940+ lock(mal.bins[i].lock); 941+ } else if (!who) { 942+ for (int i = 0; i < 64; i++) 943+ unlock(mal.bins[i].lock); 944+ unlock(mal.split_merge_lock); 945+ } else { 946+ for (int i = 0; i < 64; i++) 947+ mal.bins[i].lock[0] = mal.bins[i].lock[1] = 0; 948+ mal.split_merge_lock[1] = 0; 949+ mal.split_merge_lock[0] = 0; 950+ } 951 } 952