• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 
3    american fuzzy lop++ - dislocator, an abusive allocator
4    -----------------------------------------------------
5 
6    Originally written by Michal Zalewski
7 
8    Copyright 2016 Google Inc. All rights reserved.
9    Copyright 2019-2022 AFLplusplus Project. All rights reserved.
10 
11    Licensed under the Apache License, Version 2.0 (the "License");
12    you may not use this file except in compliance with the License.
13    You may obtain a copy of the License at:
14 
15      http://www.apache.org/licenses/LICENSE-2.0
16 
17    This is a companion library that can be used as a drop-in replacement
18    for the libc allocator in the fuzzed binaries. See README.dislocator.md for
19    more info.
20 
21  */
22 
23 #define _GNU_SOURCE
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <stddef.h>
27 #include <string.h>
28 #include <limits.h>
29 #include <errno.h>
30 #include <sys/mman.h>
31 
32 #ifdef __APPLE__
33   #include <mach/vm_statistics.h>
34 #endif
35 
36 #ifdef __FreeBSD__
37   #include <sys/param.h>
38 #endif
39 
40 #if (defined(__linux__) && !defined(__ANDROID__)) || defined(__HAIKU__)
41   #include <unistd.h>
42   #include <sys/prctl.h>
43   #ifdef __linux__
44     #include <sys/syscall.h>
45     #include <malloc.h>
46   #endif
47   #ifdef __NR_getrandom
48     #define arc4random_buf(p, l)                       \
49       do {                                             \
50                                                        \
51         ssize_t rd = syscall(__NR_getrandom, p, l, 0); \
52         if (rd != l) DEBUGF("getrandom failed");       \
53                                                        \
54       } while (0)
55 
56   #else
57     #include <time.h>
58     #define arc4random_buf(p, l)     \
59       do {                           \
60                                      \
61         srand(time(NULL));           \
62         u32 i;                       \
63         u8 *ptr = (u8 *)p;           \
64         for (i = 0; i < l; i++)      \
65           ptr[i] = rand() % INT_MAX; \
66                                      \
67       } while (0)
68 
69   #endif
70   #ifndef PR_SET_VMA
71     #define PR_SET_VMA 0x53564d41
72     #define PR_SET_VMA_ANON_NAME 0
73   #endif
74 #endif
75 
76 #include "config.h"
77 #include "types.h"
78 
79 #if __STDC_VERSION__ < 201112L || \
80     (defined(__FreeBSD__) && __FreeBSD_version < 1200000)
81 // use this hack if not C11
82 typedef struct {
83 
84   long long   __ll;
85   long double __ld;
86 
87 } max_align_t;
88 
89 #endif
90 
91 #define ALLOC_ALIGN_SIZE (_Alignof(max_align_t))
92 
93 #ifndef PAGE_SIZE
94   #define PAGE_SIZE 4096
95 #endif                                                        /* !PAGE_SIZE */
96 
97 #ifndef MAP_ANONYMOUS
98   #define MAP_ANONYMOUS MAP_ANON
99 #endif                                                    /* !MAP_ANONYMOUS */
100 
101 #define SUPER_PAGE_SIZE 1 << 21
102 
103 /* Error / message handling: */
104 
105 #define DEBUGF(_x...)                 \
106   do {                                \
107                                       \
108     if (alloc_verbose) {              \
109                                       \
110       if (++call_depth == 1) {        \
111                                       \
112         fprintf(stderr, "[AFL] " _x); \
113         fprintf(stderr, "\n");        \
114                                       \
115       }                               \
116       call_depth--;                   \
117                                       \
118     }                                 \
119                                       \
120   } while (0)
121 
122 #define FATAL(_x...)                    \
123   do {                                  \
124                                         \
125     if (++call_depth == 1) {            \
126                                         \
127       fprintf(stderr, "*** [AFL] " _x); \
128       fprintf(stderr, " ***\n");        \
129       abort();                          \
130                                         \
131     }                                   \
132     call_depth--;                       \
133                                         \
134   } while (0)
135 
136 /* Macro to count the number of pages needed to store a buffer: */
137 
138 #define PG_COUNT(_l) (((_l) + (PAGE_SIZE - 1)) / PAGE_SIZE)
139 
140 /* Canary & clobber bytes: */
141 
142 #define ALLOC_CANARY 0xAACCAACC
143 #define ALLOC_CLOBBER 0xCC
144 
145 #define TAIL_ALLOC_CANARY 0xAC
146 
147 #define PTR_C(_p) (((u32 *)(_p))[-1])
148 #define PTR_L(_p) (((u32 *)(_p))[-2])
149 
150 /* Configurable stuff (use AFL_LD_* to set): */
151 
152 static size_t max_mem = MAX_ALLOC;      /* Max heap usage to permit         */
153 static u8     alloc_verbose,            /* Additional debug messages        */
154     hard_fail,                          /* abort() when max_mem exceeded?   */
155     no_calloc_over,                     /* abort() on calloc() overflows?   */
156     align_allocations;                  /* Force alignment to sizeof(void*) */
157 
158 #if defined __OpenBSD__ || defined __APPLE__
159   #define __thread
160   #warning no thread support available
161 #endif
162 static _Atomic size_t total_mem;        /* Currently allocated mem          */
163 
164 static __thread u32 call_depth;         /* To avoid recursion via fprintf() */
165 static u32          alloc_canary;
166 
167 /* This is the main alloc function. It allocates one page more than necessary,
168    sets that tailing page to PROT_NONE, and then increments the return address
169    so that it is right-aligned to that boundary. Since it always uses mmap(),
170    the returned memory will be zeroed. */
171 
__dislocator_alloc(size_t len)172 static void *__dislocator_alloc(size_t len) {
173 
174   u8 *   ret, *base;
175   size_t tlen;
176   int    flags, protflags, fd, sp;
177 
178   if (total_mem + len > max_mem || total_mem + len < total_mem) {
179 
180     if (hard_fail) FATAL("total allocs exceed %zu MB", max_mem / 1024 / 1024);
181 
182     DEBUGF("total allocs exceed %zu MB, returning NULL", max_mem / 1024 / 1024);
183 
184     return NULL;
185 
186   }
187 
188   size_t rlen;
189   if (align_allocations && (len & (ALLOC_ALIGN_SIZE - 1)))
190     rlen = (len & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE;
191   else
192     rlen = len;
193 
194   /* We will also store buffer length and a canary below the actual buffer, so
195      let's add 8 bytes for that. */
196 
197   base = NULL;
198   tlen = (1 + PG_COUNT(rlen + 8)) * PAGE_SIZE;
199   protflags = PROT_READ | PROT_WRITE;
200   flags = MAP_PRIVATE | MAP_ANONYMOUS;
201   fd = -1;
202 #if defined(PROT_MAX)
203   // apply when sysctl vm.imply_prot_max is set to 1
204   // no-op otherwise
205   protflags |= PROT_MAX(PROT_READ | PROT_WRITE);
206 #endif
207 #if defined(USEHUGEPAGE)
208   sp = (rlen >= SUPER_PAGE_SIZE && !(rlen % SUPER_PAGE_SIZE));
209 
210   #if defined(__APPLE__)
211   if (sp) fd = VM_FLAGS_SUPERPAGE_SIZE_2MB;
212   #elif defined(__linux__)
213   if (sp) flags |= MAP_HUGETLB;
214   #elif defined(__FreeBSD__)
215   if (sp) flags |= MAP_ALIGNED_SUPER;
216   #elif defined(__sun)
217   if (sp) {
218 
219     base = (void *)(caddr_t)(1 << 21);
220     flags |= MAP_ALIGN;
221 
222   }
223 
224   #endif
225 #else
226   (void)sp;
227 #endif
228 
229   ret = (u8 *)mmap(base, tlen, protflags, flags, fd, 0);
230 #if defined(USEHUGEPAGE)
231   /* We try one more time with regular call */
232   if (ret == MAP_FAILED) {
233 
234   #if defined(__APPLE__)
235     fd = -1;
236   #elif defined(__linux__)
237     flags &= -MAP_HUGETLB;
238   #elif defined(__FreeBSD__)
239     flags &= -MAP_ALIGNED_SUPER;
240   #elif defined(__sun)
241     flags &= -MAP_ALIGN;
242   #endif
243     ret = (u8 *)mmap(NULL, tlen, protflags, flags, fd, 0);
244 
245   }
246 
247 #endif
248 
249   if (ret == MAP_FAILED) {
250 
251     if (hard_fail) FATAL("mmap() failed on alloc (OOM?)");
252 
253     DEBUGF("mmap() failed on alloc (OOM?)");
254 
255     return NULL;
256 
257   }
258 
259 #if defined(USENAMEDPAGE)
260   #if defined(__linux__)
261   // in the /proc/<pid>/maps file, the anonymous page appears as
262   // `<start>-<end> ---p 00000000 00:00 0 [anon:libdislocator]`
263   if (prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)ret, tlen,
264             (unsigned long)"libdislocator") < 0) {
265 
266     DEBUGF("prctl() failed");
267 
268   }
269 
270   #endif
271 #endif
272 
273   /* Set PROT_NONE on the last page. */
274 
275   if (mprotect(ret + PG_COUNT(rlen + 8) * PAGE_SIZE, PAGE_SIZE, PROT_NONE))
276     FATAL("mprotect() failed when allocating memory");
277 
278   /* Offset the return pointer so that it's right-aligned to the page
279      boundary. */
280 
281   ret += PAGE_SIZE * PG_COUNT(rlen + 8) - rlen - 8;
282 
283   /* Store allocation metadata. */
284 
285   ret += 8;
286 
287   PTR_L(ret) = len;
288   PTR_C(ret) = alloc_canary;
289 
290   total_mem += len;
291 
292   if (rlen != len) {
293 
294     size_t i;
295     for (i = len; i < rlen; ++i)
296       ret[i] = TAIL_ALLOC_CANARY;
297 
298   }
299 
300   return ret;
301 
302 }
303 
304 /* The "user-facing" wrapper for calloc(). This just checks for overflows and
305    displays debug messages if requested. */
306 
calloc(size_t elem_len,size_t elem_cnt)307 void *calloc(size_t elem_len, size_t elem_cnt) {
308 
309   void *ret;
310 
311   size_t len = elem_len * elem_cnt;
312 
313   /* Perform some sanity checks to detect obvious issues... */
314 
315   if (elem_cnt && len / elem_cnt != elem_len) {
316 
317     if (no_calloc_over) {
318 
319       DEBUGF("calloc(%zu, %zu) would overflow, returning NULL", elem_len,
320              elem_cnt);
321       return NULL;
322 
323     }
324 
325     FATAL("calloc(%zu, %zu) would overflow", elem_len, elem_cnt);
326 
327   }
328 
329   ret = __dislocator_alloc(len);
330 
331   DEBUGF("calloc(%zu, %zu) = %p [%zu total]", elem_len, elem_cnt, ret,
332          total_mem);
333 
334   return ret;
335 
336 }
337 
338 /* The wrapper for malloc(). Roughly the same, also clobbers the returned
339    memory (unlike calloc(), malloc() is not guaranteed to return zeroed
340    memory). */
341 
malloc(size_t len)342 void *malloc(size_t len) {
343 
344   void *ret;
345 
346   ret = __dislocator_alloc(len);
347 
348   DEBUGF("malloc(%zu) = %p [%zu total]", len, ret, total_mem);
349 
350   if (ret && len) memset(ret, ALLOC_CLOBBER, len);
351 
352   return ret;
353 
354 }
355 
356 /* The wrapper for free(). This simply marks the entire region as PROT_NONE.
357    If the region is already freed, the code will segfault during the attempt to
358    read the canary. Not very graceful, but works, right? */
359 
free(void * ptr)360 void free(void *ptr) {
361 
362   u32 len;
363 
364   DEBUGF("free(%p)", ptr);
365 
366   if (!ptr) return;
367 
368   if (PTR_C(ptr) != alloc_canary) FATAL("bad allocator canary on free()");
369 
370   len = PTR_L(ptr);
371 
372   total_mem -= len;
373   u8 *ptr_ = ptr;
374 
375   if (align_allocations && (len & (ALLOC_ALIGN_SIZE - 1))) {
376 
377     size_t rlen = (len & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE;
378     for (; len < rlen; ++len)
379       if (ptr_[len] != TAIL_ALLOC_CANARY)
380         FATAL("bad tail allocator canary on free()");
381 
382   }
383 
384   /* Protect everything. Note that the extra page at the end is already
385      set as PROT_NONE, so we don't need to touch that. */
386 
387   ptr_ -= PAGE_SIZE * PG_COUNT(len + 8) - len - 8;
388 
389   if (mprotect(ptr_ - 8, PG_COUNT(len + 8) * PAGE_SIZE, PROT_NONE))
390     FATAL("mprotect() failed when freeing memory");
391 
392   ptr = ptr_;
393 
394   /* Keep the mapping; this is wasteful, but prevents ptr reuse. */
395 
396 }
397 
398 /* Realloc is pretty straightforward, too. We forcibly reallocate the buffer,
399    move data, and then free (aka mprotect()) the original one. */
400 
realloc(void * ptr,size_t len)401 void *realloc(void *ptr, size_t len) {
402 
403   void *ret;
404 
405   ret = malloc(len);
406 
407   if (ret && ptr) {
408 
409     if (PTR_C(ptr) != alloc_canary) FATAL("bad allocator canary on realloc()");
410     // Here the tail canary check is delayed to free()
411 
412     memcpy(ret, ptr, MIN(len, PTR_L(ptr)));
413     free(ptr);
414 
415   }
416 
417   DEBUGF("realloc(%p, %zu) = %p [%zu total]", ptr, len, ret, total_mem);
418 
419   return ret;
420 
421 }
422 
423 /* posix_memalign we mainly check the proper alignment argument
424    if the requested size fits within the alignment we do
425    a normal request */
426 
posix_memalign(void ** ptr,size_t align,size_t len)427 int posix_memalign(void **ptr, size_t align, size_t len) {
428 
429   // if (*ptr == NULL) return EINVAL; // (andrea) Why? I comment it out for now
430   if ((align % 2) || (align % sizeof(void *))) return EINVAL;
431   if (len == 0) {
432 
433     *ptr = NULL;
434     return 0;
435 
436   }
437 
438   size_t rem = len % align;
439   if (rem) len += align - rem;
440 
441   *ptr = __dislocator_alloc(len);
442 
443   if (*ptr && len) memset(*ptr, ALLOC_CLOBBER, len);
444 
445   DEBUGF("posix_memalign(%p %zu, %zu) [*ptr = %p]", ptr, align, len, *ptr);
446 
447   return 0;
448 
449 }
450 
451 /* just the non-posix fashion */
452 
memalign(size_t align,size_t len)453 void *memalign(size_t align, size_t len) {
454 
455   void *ret = NULL;
456 
457   if (posix_memalign(&ret, align, len)) {
458 
459     DEBUGF("memalign(%zu, %zu) failed", align, len);
460 
461   }
462 
463   return ret;
464 
465 }
466 
467 /* sort of C11 alias of memalign only more severe, alignment-wise */
468 
aligned_alloc(size_t align,size_t len)469 void *aligned_alloc(size_t align, size_t len) {
470 
471   void *ret = NULL;
472 
473   if ((len % align)) return NULL;
474 
475   if (posix_memalign(&ret, align, len)) {
476 
477     DEBUGF("aligned_alloc(%zu, %zu) failed", align, len);
478 
479   }
480 
481   return ret;
482 
483 }
484 
485 /* specific BSD api mainly checking possible overflow for the size */
486 
reallocarray(void * ptr,size_t elem_len,size_t elem_cnt)487 void *reallocarray(void *ptr, size_t elem_len, size_t elem_cnt) {
488 
489   const size_t elem_lim = 1UL << (sizeof(size_t) * 4);
490   const size_t elem_tot = elem_len * elem_cnt;
491   void *       ret = NULL;
492 
493   if ((elem_len >= elem_lim || elem_cnt >= elem_lim) && elem_len > 0 &&
494       elem_cnt > (SIZE_MAX / elem_len)) {
495 
496     DEBUGF("reallocarray size overflow (%zu)", elem_tot);
497 
498   } else {
499 
500     ret = realloc(ptr, elem_tot);
501 
502   }
503 
504   return ret;
505 
506 }
507 
508 #if !defined(__ANDROID__)
malloc_usable_size(void * ptr)509 size_t malloc_usable_size(void *ptr) {
510 
511 #else
512 size_t malloc_usable_size(const void *ptr) {
513 
514 #endif
515 
516   return ptr ? PTR_L(ptr) : 0;
517 
518 }
519 
520 __attribute__((constructor)) void __dislocator_init(void) {
521 
522   char *tmp = getenv("AFL_LD_LIMIT_MB");
523 
524   if (tmp) {
525 
526     char *             tok;
527     unsigned long long mmem = strtoull(tmp, &tok, 10);
528     if (*tok != '\0' || errno == ERANGE || mmem > SIZE_MAX / 1024 / 1024)
529       FATAL("Bad value for AFL_LD_LIMIT_MB");
530     max_mem = mmem * 1024 * 1024;
531 
532   }
533 
534   alloc_canary = ALLOC_CANARY;
535   tmp = getenv("AFL_RANDOM_ALLOC_CANARY");
536 
537   if (tmp) arc4random_buf(&alloc_canary, sizeof(alloc_canary));
538 
539   alloc_verbose = !!getenv("AFL_LD_VERBOSE");
540   hard_fail = !!getenv("AFL_LD_HARD_FAIL");
541   no_calloc_over = !!getenv("AFL_LD_NO_CALLOC_OVER");
542   align_allocations = !!getenv("AFL_ALIGNED_ALLOC");
543 
544 }
545 
546 /* NetBSD fault handler specific api subset */
547 
548 void (*esetfunc(void (*fn)(int, const char *, ...)))(int, const char *, ...) {
549 
550   /* Might not be meaningful to implement; upper calls already report errors */
551   return NULL;
552 
553 }
554 
555 void *emalloc(size_t len) {
556 
557   return malloc(len);
558 
559 }
560 
561 void *ecalloc(size_t elem_len, size_t elem_cnt) {
562 
563   return calloc(elem_len, elem_cnt);
564 
565 }
566 
567 void *erealloc(void *ptr, size_t len) {
568 
569   return realloc(ptr, len);
570 
571 }
572 
573