• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    american fuzzy lop++ - instrumentation bootstrap
3    ------------------------------------------------
4 
5    Copyright 2015, 2016 Google Inc. All rights reserved.
6    Copyright 2019-2022 AFLplusplus Project. All rights reserved.
7 
8    Licensed under the Apache License, Version 2.0 (the "License");
9    you may not use this file except in compliance with the License.
10    You may obtain a copy of the License at:
11 
12      https://www.apache.org/licenses/LICENSE-2.0
13 
14 
15 */
16 
17 #ifdef __ANDROID__
18   #include "android-ashmem.h"
19 #endif
20 #include "config.h"
21 #include "types.h"
22 #include "cmplog.h"
23 #include "llvm-alternative-coverage.h"
24 
25 #define XXH_INLINE_ALL
26 #include "xxhash.h"
27 #undef XXH_INLINE_ALL
28 
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <signal.h>
32 #include <unistd.h>
33 #include <string.h>
34 #include <assert.h>
35 #include <stdint.h>
36 #include <stddef.h>
37 #include <limits.h>
38 #include <errno.h>
39 
40 #include <sys/mman.h>
41 #ifndef __HAIKU__
42   #include <sys/syscall.h>
43 #endif
44 #ifndef USEMMAP
45   #include <sys/shm.h>
46 #endif
47 #include <sys/wait.h>
48 #include <sys/types.h>
49 
50 #if !__GNUC__
51   #include "llvm/Config/llvm-config.h"
52 #endif
53 
54 #ifdef __linux__
55   #include "snapshot-inl.h"
56 #endif
57 
58 /* This is a somewhat ugly hack for the experimental 'trace-pc-guard' mode.
59    Basically, we need to make sure that the forkserver is initialized after
60    the LLVM-generated runtime initialization pass, not before. */
61 
62 #ifndef MAP_FIXED_NOREPLACE
63   #ifdef MAP_EXCL
64     #define MAP_FIXED_NOREPLACE MAP_EXCL | MAP_FIXED
65   #else
66     #define MAP_FIXED_NOREPLACE MAP_FIXED
67   #endif
68 #endif
69 
70 #define CTOR_PRIO 3
71 #define EARLY_FS_PRIO 5
72 
73 #include <sys/mman.h>
74 #include <fcntl.h>
75 
76 /* Globals needed by the injected instrumentation. The __afl_area_initial region
77    is used for instrumentation output before __afl_map_shm() has a chance to
78    run. It will end up as .comm, so it shouldn't be too wasteful. */
79 
80 #if MAP_SIZE <= 65536
81   #define MAP_INITIAL_SIZE 2097152
82 #else
83   #define MAP_INITIAL_SIZE MAP_SIZE
84 #endif
85 
86 #if defined(__HAIKU__)
87 extern ssize_t _kern_write(int fd, off_t pos, const void *buffer,
88                            size_t bufferSize);
89 #endif  // HAIKU
90 
91 static u8  __afl_area_initial[MAP_INITIAL_SIZE];
92 static u8 *__afl_area_ptr_dummy = __afl_area_initial;
93 static u8 *__afl_area_ptr_backup = __afl_area_initial;
94 
95 u8 *       __afl_area_ptr = __afl_area_initial;
96 u8 *       __afl_dictionary;
97 u8 *       __afl_fuzz_ptr;
98 static u32 __afl_fuzz_len_dummy;
99 u32 *      __afl_fuzz_len = &__afl_fuzz_len_dummy;
100 
101 u32 __afl_final_loc;
102 u32 __afl_map_size = MAP_SIZE;
103 u32 __afl_dictionary_len;
104 u64 __afl_map_addr;
105 
106 // for the __AFL_COVERAGE_ON/__AFL_COVERAGE_OFF features to work:
107 int        __afl_selective_coverage __attribute__((weak));
108 int        __afl_selective_coverage_start_off __attribute__((weak));
109 static int __afl_selective_coverage_temp = 1;
110 
111 #if defined(__ANDROID__) || defined(__HAIKU__)
112 PREV_LOC_T __afl_prev_loc[NGRAM_SIZE_MAX];
113 PREV_LOC_T __afl_prev_caller[CTX_MAX_K];
114 u32        __afl_prev_ctx;
115 #else
116 __thread PREV_LOC_T __afl_prev_loc[NGRAM_SIZE_MAX];
117 __thread PREV_LOC_T __afl_prev_caller[CTX_MAX_K];
118 __thread u32        __afl_prev_ctx;
119 #endif
120 
121 int __afl_sharedmem_fuzzing __attribute__((weak));
122 
123 struct cmp_map *__afl_cmp_map;
124 struct cmp_map *__afl_cmp_map_backup;
125 
126 /* Child pid? */
127 
128 static s32 child_pid;
129 static void (*old_sigterm_handler)(int) = 0;
130 
131 /* Running in persistent mode? */
132 
133 static u8 is_persistent;
134 
135 /* Are we in sancov mode? */
136 
137 static u8 _is_sancov;
138 
139 /* Debug? */
140 
141 static u32 __afl_debug;
142 
143 /* Already initialized markers */
144 
145 u32 __afl_already_initialized_shm;
146 u32 __afl_already_initialized_forkserver;
147 u32 __afl_already_initialized_first;
148 u32 __afl_already_initialized_second;
149 u32 __afl_already_initialized_init;
150 
151 /* Dummy pipe for area_is_valid() */
152 
153 static int __afl_dummy_fd[2] = {2, 2};
154 
155 /* ensure we kill the child on termination */
156 
at_exit(int signal)157 static void at_exit(int signal) {
158 
159   if (unlikely(child_pid > 0)) {
160 
161     kill(child_pid, SIGKILL);
162     child_pid = -1;
163 
164   }
165 
166   _exit(0);
167 
168 }
169 
170 #define default_hash(a, b) XXH3_64bits(a, b)
171 
172 /* Uninspired gcc plugin instrumentation */
173 
__afl_trace(const u32 x)174 void __afl_trace(const u32 x) {
175 
176   PREV_LOC_T prev = __afl_prev_loc[0];
177   __afl_prev_loc[0] = (x >> 1);
178 
179   u8 *p = &__afl_area_ptr[prev ^ x];
180 
181 #if 1                                      /* enable for neverZero feature. */
182   #if __GNUC__
183   u8 c = __builtin_add_overflow(*p, 1, p);
184   *p += c;
185   #else
186   *p += 1 + ((u8)(1 + *p) == 0);
187   #endif
188 #else
189   ++*p;
190 #endif
191 
192   return;
193 
194 }
195 
196 /* Error reporting to forkserver controller */
197 
send_forkserver_error(int error)198 static void send_forkserver_error(int error) {
199 
200   u32 status;
201   if (!error || error > 0xffff) return;
202   status = (FS_OPT_ERROR | FS_OPT_SET_ERROR(error));
203   if (write(FORKSRV_FD + 1, (char *)&status, 4) != 4) { return; }
204 
205 }
206 
207 /* SHM fuzzing setup. */
208 
__afl_map_shm_fuzz()209 static void __afl_map_shm_fuzz() {
210 
211   char *id_str = getenv(SHM_FUZZ_ENV_VAR);
212 
213   if (__afl_debug) {
214 
215     fprintf(stderr, "DEBUG: fuzzcase shmem %s\n", id_str ? id_str : "none");
216 
217   }
218 
219   if (id_str) {
220 
221     u8 *map = NULL;
222 
223 #ifdef USEMMAP
224     const char *shm_file_path = id_str;
225     int         shm_fd = -1;
226 
227     /* create the shared memory segment as if it was a file */
228     shm_fd = shm_open(shm_file_path, O_RDWR, DEFAULT_PERMISSION);
229     if (shm_fd == -1) {
230 
231       fprintf(stderr, "shm_open() failed for fuzz\n");
232       send_forkserver_error(FS_ERROR_SHM_OPEN);
233       exit(1);
234 
235     }
236 
237     map =
238         (u8 *)mmap(0, MAX_FILE + sizeof(u32), PROT_READ, MAP_SHARED, shm_fd, 0);
239 
240 #else
241     u32 shm_id = atoi(id_str);
242     map = (u8 *)shmat(shm_id, NULL, 0);
243 
244 #endif
245 
246     /* Whooooops. */
247 
248     if (!map || map == (void *)-1) {
249 
250       perror("Could not access fuzzing shared memory");
251       send_forkserver_error(FS_ERROR_SHM_OPEN);
252       exit(1);
253 
254     }
255 
256     __afl_fuzz_len = (u32 *)map;
257     __afl_fuzz_ptr = map + sizeof(u32);
258 
259     if (__afl_debug) {
260 
261       fprintf(stderr, "DEBUG: successfully got fuzzing shared memory\n");
262 
263     }
264 
265   } else {
266 
267     fprintf(stderr, "Error: variable for fuzzing shared memory is not set\n");
268     send_forkserver_error(FS_ERROR_SHM_OPEN);
269     exit(1);
270 
271   }
272 
273 }
274 
275 /* SHM setup. */
276 
__afl_map_shm(void)277 static void __afl_map_shm(void) {
278 
279   if (__afl_already_initialized_shm) return;
280   __afl_already_initialized_shm = 1;
281 
282   // if we are not running in afl ensure the map exists
283   if (!__afl_area_ptr) { __afl_area_ptr = __afl_area_ptr_dummy; }
284 
285   char *id_str = getenv(SHM_ENV_VAR);
286 
287   if (__afl_final_loc) {
288 
289     __afl_map_size = ++__afl_final_loc;  // as we count starting 0
290 
291     if (__afl_final_loc > MAP_SIZE) {
292 
293       char *ptr;
294       u32   val = 0;
295       if ((ptr = getenv("AFL_MAP_SIZE")) != NULL) val = atoi(ptr);
296       if (val < __afl_final_loc) {
297 
298         if (__afl_final_loc > FS_OPT_MAX_MAPSIZE) {
299 
300           if (!getenv("AFL_QUIET"))
301             fprintf(stderr,
302                     "Error: AFL++ tools *require* to set AFL_MAP_SIZE to %u "
303                     "to be able to run this instrumented program!\n",
304                     __afl_final_loc);
305 
306           if (id_str) {
307 
308             send_forkserver_error(FS_ERROR_MAP_SIZE);
309             exit(-1);
310 
311           }
312 
313         } else {
314 
315           if (!getenv("AFL_QUIET"))
316             fprintf(stderr,
317                     "Warning: AFL++ tools might need to set AFL_MAP_SIZE to %u "
318                     "to be able to run this instrumented program if this "
319                     "crashes!\n",
320                     __afl_final_loc);
321 
322         }
323 
324       }
325 
326     }
327 
328   }
329 
330   /* If we're running under AFL, attach to the appropriate region, replacing the
331      early-stage __afl_area_initial region that is needed to allow some really
332      hacky .init code to work correctly in projects such as OpenSSL. */
333 
334   if (__afl_debug) {
335 
336     fprintf(
337         stderr,
338         "DEBUG: (1) id_str %s, __afl_area_ptr %p, __afl_area_initial %p, "
339         "__afl_area_ptr_dummy %p, __afl_map_addr 0x%llx, MAP_SIZE %u, "
340         "__afl_final_loc %u, __afl_map_size %u, max_size_forkserver %u/0x%x\n",
341         id_str == NULL ? "<null>" : id_str, __afl_area_ptr, __afl_area_initial,
342         __afl_area_ptr_dummy, __afl_map_addr, MAP_SIZE, __afl_final_loc,
343         __afl_map_size, FS_OPT_MAX_MAPSIZE, FS_OPT_MAX_MAPSIZE);
344 
345   }
346 
347   if (id_str) {
348 
349     if (__afl_area_ptr && __afl_area_ptr != __afl_area_initial &&
350         __afl_area_ptr != __afl_area_ptr_dummy) {
351 
352       if (__afl_map_addr) {
353 
354         munmap((void *)__afl_map_addr, __afl_final_loc);
355 
356       } else {
357 
358         free(__afl_area_ptr);
359 
360       }
361 
362       __afl_area_ptr = __afl_area_ptr_dummy;
363 
364     }
365 
366 #ifdef USEMMAP
367     const char *   shm_file_path = id_str;
368     int            shm_fd = -1;
369     unsigned char *shm_base = NULL;
370 
371     /* create the shared memory segment as if it was a file */
372     shm_fd = shm_open(shm_file_path, O_RDWR, DEFAULT_PERMISSION);
373     if (shm_fd == -1) {
374 
375       fprintf(stderr, "shm_open() failed\n");
376       send_forkserver_error(FS_ERROR_SHM_OPEN);
377       exit(1);
378 
379     }
380 
381     /* map the shared memory segment to the address space of the process */
382     if (__afl_map_addr) {
383 
384       shm_base =
385           mmap((void *)__afl_map_addr, __afl_map_size, PROT_READ | PROT_WRITE,
386                MAP_FIXED_NOREPLACE | MAP_SHARED, shm_fd, 0);
387 
388     } else {
389 
390       shm_base = mmap(0, __afl_map_size, PROT_READ | PROT_WRITE, MAP_SHARED,
391                       shm_fd, 0);
392 
393     }
394 
395     close(shm_fd);
396     shm_fd = -1;
397 
398     if (shm_base == MAP_FAILED) {
399 
400       fprintf(stderr, "mmap() failed\n");
401       perror("mmap for map");
402 
403       if (__afl_map_addr)
404         send_forkserver_error(FS_ERROR_MAP_ADDR);
405       else
406         send_forkserver_error(FS_ERROR_MMAP);
407 
408       exit(2);
409 
410     }
411 
412     __afl_area_ptr = shm_base;
413 #else
414     u32 shm_id = atoi(id_str);
415 
416     if (__afl_map_size && __afl_map_size > MAP_SIZE) {
417 
418       u8 *map_env = (u8 *)getenv("AFL_MAP_SIZE");
419       if (!map_env || atoi((char *)map_env) < MAP_SIZE) {
420 
421         send_forkserver_error(FS_ERROR_MAP_SIZE);
422         _exit(1);
423 
424       }
425 
426     }
427 
428     __afl_area_ptr = (u8 *)shmat(shm_id, (void *)__afl_map_addr, 0);
429 
430     /* Whooooops. */
431 
432     if (!__afl_area_ptr || __afl_area_ptr == (void *)-1) {
433 
434       if (__afl_map_addr)
435         send_forkserver_error(FS_ERROR_MAP_ADDR);
436       else
437         send_forkserver_error(FS_ERROR_SHMAT);
438 
439       perror("shmat for map");
440       _exit(1);
441 
442     }
443 
444 #endif
445 
446     /* Write something into the bitmap so that even with low AFL_INST_RATIO,
447        our parent doesn't give up on us. */
448 
449     __afl_area_ptr[0] = 1;
450 
451   } else if ((!__afl_area_ptr || __afl_area_ptr == __afl_area_initial) &&
452 
453              __afl_map_addr) {
454 
455     __afl_area_ptr = (u8 *)mmap(
456         (void *)__afl_map_addr, __afl_map_size, PROT_READ | PROT_WRITE,
457         MAP_FIXED_NOREPLACE | MAP_SHARED | MAP_ANONYMOUS, -1, 0);
458 
459     if (__afl_area_ptr == MAP_FAILED) {
460 
461       fprintf(stderr, "can not acquire mmap for address %p\n",
462               (void *)__afl_map_addr);
463       send_forkserver_error(FS_ERROR_SHM_OPEN);
464       exit(1);
465 
466     }
467 
468   } else if (_is_sancov && __afl_area_ptr != __afl_area_initial) {
469 
470     free(__afl_area_ptr);
471     __afl_area_ptr = NULL;
472 
473     if (__afl_final_loc > MAP_INITIAL_SIZE) {
474 
475       __afl_area_ptr = (u8 *)malloc(__afl_final_loc);
476 
477     }
478 
479     if (!__afl_area_ptr) { __afl_area_ptr = __afl_area_ptr_dummy; }
480 
481   }
482 
483   __afl_area_ptr_backup = __afl_area_ptr;
484 
485   if (__afl_debug) {
486 
487     fprintf(stderr,
488             "DEBUG: (2) id_str %s, __afl_area_ptr %p, __afl_area_initial %p, "
489             "__afl_area_ptr_dummy %p, __afl_map_addr 0x%llx, MAP_SIZE "
490             "%u, __afl_final_loc %u, __afl_map_size %u,"
491             "max_size_forkserver %u/0x%x\n",
492             id_str == NULL ? "<null>" : id_str, __afl_area_ptr,
493             __afl_area_initial, __afl_area_ptr_dummy, __afl_map_addr, MAP_SIZE,
494             __afl_final_loc, __afl_map_size, FS_OPT_MAX_MAPSIZE,
495             FS_OPT_MAX_MAPSIZE);
496 
497   }
498 
499   if (__afl_selective_coverage) {
500 
501     if (__afl_map_size > MAP_INITIAL_SIZE) {
502 
503       __afl_area_ptr_dummy = (u8 *)malloc(__afl_map_size);
504 
505       if (__afl_area_ptr_dummy) {
506 
507         if (__afl_selective_coverage_start_off) {
508 
509           __afl_area_ptr = __afl_area_ptr_dummy;
510 
511         }
512 
513       } else {
514 
515         fprintf(stderr, "Error: __afl_selective_coverage failed!\n");
516         __afl_selective_coverage = 0;
517         // continue;
518 
519       }
520 
521     }
522 
523   }
524 
525   id_str = getenv(CMPLOG_SHM_ENV_VAR);
526 
527   if (__afl_debug) {
528 
529     fprintf(stderr, "DEBUG: cmplog id_str %s\n",
530             id_str == NULL ? "<null>" : id_str);
531 
532   }
533 
534   if (id_str) {
535 
536     if ((__afl_dummy_fd[1] = open("/dev/null", O_WRONLY)) < 0) {
537 
538       if (pipe(__afl_dummy_fd) < 0) { __afl_dummy_fd[1] = 1; }
539 
540     }
541 
542 #ifdef USEMMAP
543     const char *    shm_file_path = id_str;
544     int             shm_fd = -1;
545     struct cmp_map *shm_base = NULL;
546 
547     /* create the shared memory segment as if it was a file */
548     shm_fd = shm_open(shm_file_path, O_RDWR, DEFAULT_PERMISSION);
549     if (shm_fd == -1) {
550 
551       perror("shm_open() failed\n");
552       send_forkserver_error(FS_ERROR_SHM_OPEN);
553       exit(1);
554 
555     }
556 
557     /* map the shared memory segment to the address space of the process */
558     shm_base = mmap(0, sizeof(struct cmp_map), PROT_READ | PROT_WRITE,
559                     MAP_SHARED, shm_fd, 0);
560     if (shm_base == MAP_FAILED) {
561 
562       close(shm_fd);
563       shm_fd = -1;
564 
565       fprintf(stderr, "mmap() failed\n");
566       send_forkserver_error(FS_ERROR_SHM_OPEN);
567       exit(2);
568 
569     }
570 
571     __afl_cmp_map = shm_base;
572 #else
573     u32 shm_id = atoi(id_str);
574 
575     __afl_cmp_map = (struct cmp_map *)shmat(shm_id, NULL, 0);
576 #endif
577 
578     __afl_cmp_map_backup = __afl_cmp_map;
579 
580     if (!__afl_cmp_map || __afl_cmp_map == (void *)-1) {
581 
582       perror("shmat for cmplog");
583       send_forkserver_error(FS_ERROR_SHM_OPEN);
584       _exit(1);
585 
586     }
587 
588   }
589 
590 }
591 
592 /* unmap SHM. */
593 
__afl_unmap_shm(void)594 static void __afl_unmap_shm(void) {
595 
596   if (!__afl_already_initialized_shm) return;
597 
598   char *id_str = getenv(SHM_ENV_VAR);
599 
600   if (id_str) {
601 
602 #ifdef USEMMAP
603 
604     munmap((void *)__afl_area_ptr, __afl_map_size);
605 
606 #else
607 
608     shmdt((void *)__afl_area_ptr);
609 
610 #endif
611 
612   } else if ((!__afl_area_ptr || __afl_area_ptr == __afl_area_initial) &&
613 
614              __afl_map_addr) {
615 
616     munmap((void *)__afl_map_addr, __afl_map_size);
617 
618   }
619 
620   __afl_area_ptr = __afl_area_ptr_dummy;
621 
622   id_str = getenv(CMPLOG_SHM_ENV_VAR);
623 
624   if (id_str) {
625 
626 #ifdef USEMMAP
627 
628     munmap((void *)__afl_cmp_map, __afl_map_size);
629 
630 #else
631 
632     shmdt((void *)__afl_cmp_map);
633 
634 #endif
635 
636     __afl_cmp_map = NULL;
637     __afl_cmp_map_backup = NULL;
638 
639   }
640 
641   __afl_already_initialized_shm = 0;
642 
643 }
644 
645 #define write_error(text) write_error_with_location(text, __FILE__, __LINE__)
646 
write_error_with_location(char * text,char * filename,int linenumber)647 void write_error_with_location(char *text, char *filename, int linenumber) {
648 
649   u8 *  o = getenv("__AFL_OUT_DIR");
650   char *e = strerror(errno);
651 
652   if (o) {
653 
654     char buf[4096];
655     snprintf(buf, sizeof(buf), "%s/error.txt", o);
656     FILE *f = fopen(buf, "a");
657 
658     if (f) {
659 
660       fprintf(f, "File %s, line %d: Error(%s): %s\n", filename, linenumber,
661               text, e);
662       fclose(f);
663 
664     }
665 
666   }
667 
668   fprintf(stderr, "File %s, line %d: Error(%s): %s\n", filename, linenumber,
669           text, e);
670 
671 }
672 
673 #ifdef __linux__
__afl_start_snapshots(void)674 static void __afl_start_snapshots(void) {
675 
676   static u8 tmp[4] = {0, 0, 0, 0};
677   u32       status = 0;
678   u32       already_read_first = 0;
679   u32       was_killed;
680 
681   u8 child_stopped = 0;
682 
683   void (*old_sigchld_handler)(int) = signal(SIGCHLD, SIG_DFL);
684 
685   /* Phone home and tell the parent that we're OK. If parent isn't there,
686      assume we're not running in forkserver mode and just execute program. */
687 
688   status |= (FS_OPT_ENABLED | FS_OPT_SNAPSHOT | FS_OPT_NEWCMPLOG);
689   if (__afl_sharedmem_fuzzing != 0) status |= FS_OPT_SHDMEM_FUZZ;
690   if (__afl_map_size <= FS_OPT_MAX_MAPSIZE)
691     status |= (FS_OPT_SET_MAPSIZE(__afl_map_size) | FS_OPT_MAPSIZE);
692   if (__afl_dictionary_len && __afl_dictionary) status |= FS_OPT_AUTODICT;
693   memcpy(tmp, &status, 4);
694 
695   if (write(FORKSRV_FD + 1, tmp, 4) != 4) { return; }
696 
697   if (__afl_sharedmem_fuzzing || (__afl_dictionary_len && __afl_dictionary)) {
698 
699     if (read(FORKSRV_FD, &was_killed, 4) != 4) {
700 
701       write_error("read to afl-fuzz");
702       _exit(1);
703 
704     }
705 
706     if (__afl_debug) {
707 
708       fprintf(stderr, "target forkserver recv: %08x\n", was_killed);
709 
710     }
711 
712     if ((was_killed & (FS_OPT_ENABLED | FS_OPT_SHDMEM_FUZZ)) ==
713         (FS_OPT_ENABLED | FS_OPT_SHDMEM_FUZZ)) {
714 
715       __afl_map_shm_fuzz();
716 
717     }
718 
719     if ((was_killed & (FS_OPT_ENABLED | FS_OPT_AUTODICT)) ==
720             (FS_OPT_ENABLED | FS_OPT_AUTODICT) &&
721         __afl_dictionary_len && __afl_dictionary) {
722 
723       // great lets pass the dictionary through the forkserver FD
724       u32 len = __afl_dictionary_len, offset = 0;
725       s32 ret;
726 
727       if (write(FORKSRV_FD + 1, &len, 4) != 4) {
728 
729         write(2, "Error: could not send dictionary len\n",
730               strlen("Error: could not send dictionary len\n"));
731         _exit(1);
732 
733       }
734 
735       while (len != 0) {
736 
737         ret = write(FORKSRV_FD + 1, __afl_dictionary + offset, len);
738 
739         if (ret < 1) {
740 
741           write(2, "Error: could not send dictionary\n",
742                 strlen("Error: could not send dictionary\n"));
743           _exit(1);
744 
745         }
746 
747         len -= ret;
748         offset += ret;
749 
750       }
751 
752     } else {
753 
754       // uh this forkserver does not understand extended option passing
755       // or does not want the dictionary
756       if (!__afl_fuzz_ptr) already_read_first = 1;
757 
758     }
759 
760   }
761 
762   while (1) {
763 
764     int status;
765 
766     if (already_read_first) {
767 
768       already_read_first = 0;
769 
770     } else {
771 
772       /* Wait for parent by reading from the pipe. Abort if read fails. */
773       if (read(FORKSRV_FD, &was_killed, 4) != 4) {
774 
775         write_error("reading from afl-fuzz");
776         _exit(1);
777 
778       }
779 
780     }
781 
782   #ifdef _AFL_DOCUMENT_MUTATIONS
783     if (__afl_fuzz_ptr) {
784 
785       static uint32_t counter = 0;
786       char            fn[32];
787       sprintf(fn, "%09u:forkserver", counter);
788       s32 fd_doc = open(fn, O_WRONLY | O_CREAT | O_TRUNC, DEFAULT_PERMISSION);
789       if (fd_doc >= 0) {
790 
791         if (write(fd_doc, __afl_fuzz_ptr, *__afl_fuzz_len) != *__afl_fuzz_len) {
792 
793           fprintf(stderr, "write of mutation file failed: %s\n", fn);
794           unlink(fn);
795 
796         }
797 
798         close(fd_doc);
799 
800       }
801 
802       counter++;
803 
804     }
805 
806   #endif
807 
808     /* If we stopped the child in persistent mode, but there was a race
809        condition and afl-fuzz already issued SIGKILL, write off the old
810        process. */
811 
812     if (child_stopped && was_killed) {
813 
814       child_stopped = 0;
815       if (waitpid(child_pid, &status, 0) < 0) {
816 
817         write_error("child_stopped && was_killed");
818         _exit(1);  // TODO why exit?
819 
820       }
821 
822     }
823 
824     if (!child_stopped) {
825 
826       /* Once woken up, create a clone of our process. */
827 
828       child_pid = fork();
829       if (child_pid < 0) {
830 
831         write_error("fork");
832         _exit(1);
833 
834       }
835 
836       /* In child process: close fds, resume execution. */
837 
838       if (!child_pid) {
839 
840         //(void)nice(-20);  // does not seem to improve
841 
842         signal(SIGCHLD, old_sigchld_handler);
843         signal(SIGTERM, old_sigterm_handler);
844 
845         close(FORKSRV_FD);
846         close(FORKSRV_FD + 1);
847 
848         if (!afl_snapshot_take(AFL_SNAPSHOT_MMAP | AFL_SNAPSHOT_FDS |
849                                AFL_SNAPSHOT_REGS | AFL_SNAPSHOT_EXIT)) {
850 
851           raise(SIGSTOP);
852 
853         }
854 
855         __afl_area_ptr[0] = 1;
856         memset(__afl_prev_loc, 0, NGRAM_SIZE_MAX * sizeof(PREV_LOC_T));
857 
858         return;
859 
860       }
861 
862     } else {
863 
864       /* Special handling for persistent mode: if the child is alive but
865          currently stopped, simply restart it with SIGCONT. */
866 
867       kill(child_pid, SIGCONT);
868       child_stopped = 0;
869 
870     }
871 
872     /* In parent process: write PID to pipe, then wait for child. */
873 
874     if (write(FORKSRV_FD + 1, &child_pid, 4) != 4) {
875 
876       write_error("write to afl-fuzz");
877       _exit(1);
878 
879     }
880 
881     if (waitpid(child_pid, &status, WUNTRACED) < 0) {
882 
883       write_error("waitpid");
884       _exit(1);
885 
886     }
887 
888     /* In persistent mode, the child stops itself with SIGSTOP to indicate
889        a successful run. In this case, we want to wake it up without forking
890        again. */
891 
892     if (WIFSTOPPED(status)) child_stopped = 1;
893 
894     /* Relay wait status to pipe, then loop back. */
895 
896     if (write(FORKSRV_FD + 1, &status, 4) != 4) {
897 
898       write_error("writing to afl-fuzz");
899       _exit(1);
900 
901     }
902 
903   }
904 
905 }
906 
907 #endif
908 
909 /* Fork server logic. */
910 
__afl_start_forkserver(void)911 static void __afl_start_forkserver(void) {
912 
913   if (__afl_already_initialized_forkserver) return;
914   __afl_already_initialized_forkserver = 1;
915 
916   struct sigaction orig_action;
917   sigaction(SIGTERM, NULL, &orig_action);
918   old_sigterm_handler = orig_action.sa_handler;
919   signal(SIGTERM, at_exit);
920 
921 #ifdef __linux__
922   if (/*!is_persistent &&*/ !__afl_cmp_map && !getenv("AFL_NO_SNAPSHOT") &&
923       afl_snapshot_init() >= 0) {
924 
925     __afl_start_snapshots();
926     return;
927 
928   }
929 
930 #endif
931 
932   u8  tmp[4] = {0, 0, 0, 0};
933   u32 status_for_fsrv = 0;
934   u32 already_read_first = 0;
935   u32 was_killed;
936 
937   u8 child_stopped = 0;
938 
939   void (*old_sigchld_handler)(int) = signal(SIGCHLD, SIG_DFL);
940 
941   if (__afl_map_size <= FS_OPT_MAX_MAPSIZE) {
942 
943     status_for_fsrv |= (FS_OPT_SET_MAPSIZE(__afl_map_size) | FS_OPT_MAPSIZE);
944 
945   }
946 
947   if (__afl_dictionary_len && __afl_dictionary) {
948 
949     status_for_fsrv |= FS_OPT_AUTODICT;
950 
951   }
952 
953   if (__afl_sharedmem_fuzzing != 0) { status_for_fsrv |= FS_OPT_SHDMEM_FUZZ; }
954   if (status_for_fsrv) {
955 
956     status_for_fsrv |= (FS_OPT_ENABLED | FS_OPT_NEWCMPLOG);
957 
958   }
959 
960   memcpy(tmp, &status_for_fsrv, 4);
961 
962   /* Phone home and tell the parent that we're OK. If parent isn't there,
963      assume we're not running in forkserver mode and just execute program. */
964 
965   if (write(FORKSRV_FD + 1, tmp, 4) != 4) { return; }
966 
967   if (__afl_sharedmem_fuzzing || (__afl_dictionary_len && __afl_dictionary)) {
968 
969     if (read(FORKSRV_FD, &was_killed, 4) != 4) _exit(1);
970 
971     if (__afl_debug) {
972 
973       fprintf(stderr, "target forkserver recv: %08x\n", was_killed);
974 
975     }
976 
977     if ((was_killed & (FS_OPT_ENABLED | FS_OPT_SHDMEM_FUZZ)) ==
978         (FS_OPT_ENABLED | FS_OPT_SHDMEM_FUZZ)) {
979 
980       __afl_map_shm_fuzz();
981 
982     }
983 
984     if ((was_killed & (FS_OPT_ENABLED | FS_OPT_AUTODICT)) ==
985             (FS_OPT_ENABLED | FS_OPT_AUTODICT) &&
986         __afl_dictionary_len && __afl_dictionary) {
987 
988       // great lets pass the dictionary through the forkserver FD
989       u32 len = __afl_dictionary_len, offset = 0;
990 
991       if (write(FORKSRV_FD + 1, &len, 4) != 4) {
992 
993         write(2, "Error: could not send dictionary len\n",
994               strlen("Error: could not send dictionary len\n"));
995         _exit(1);
996 
997       }
998 
999       while (len != 0) {
1000 
1001         s32 ret;
1002         ret = write(FORKSRV_FD + 1, __afl_dictionary + offset, len);
1003 
1004         if (ret < 1) {
1005 
1006           write(2, "Error: could not send dictionary\n",
1007                 strlen("Error: could not send dictionary\n"));
1008           _exit(1);
1009 
1010         }
1011 
1012         len -= ret;
1013         offset += ret;
1014 
1015       }
1016 
1017     } else {
1018 
1019       // uh this forkserver does not understand extended option passing
1020       // or does not want the dictionary
1021       if (!__afl_fuzz_ptr) already_read_first = 1;
1022 
1023     }
1024 
1025   }
1026 
1027   while (1) {
1028 
1029     int status;
1030 
1031     /* Wait for parent by reading from the pipe. Abort if read fails. */
1032 
1033     if (already_read_first) {
1034 
1035       already_read_first = 0;
1036 
1037     } else {
1038 
1039       if (read(FORKSRV_FD, &was_killed, 4) != 4) {
1040 
1041         // write_error("read from afl-fuzz");
1042         _exit(1);
1043 
1044       }
1045 
1046     }
1047 
1048 #ifdef _AFL_DOCUMENT_MUTATIONS
1049     if (__afl_fuzz_ptr) {
1050 
1051       static uint32_t counter = 0;
1052       char            fn[32];
1053       sprintf(fn, "%09u:forkserver", counter);
1054       s32 fd_doc = open(fn, O_WRONLY | O_CREAT | O_TRUNC, DEFAULT_PERMISSION);
1055       if (fd_doc >= 0) {
1056 
1057         if (write(fd_doc, __afl_fuzz_ptr, *__afl_fuzz_len) != *__afl_fuzz_len) {
1058 
1059           fprintf(stderr, "write of mutation file failed: %s\n", fn);
1060           unlink(fn);
1061 
1062         }
1063 
1064         close(fd_doc);
1065 
1066       }
1067 
1068       counter++;
1069 
1070     }
1071 
1072 #endif
1073 
1074     /* If we stopped the child in persistent mode, but there was a race
1075        condition and afl-fuzz already issued SIGKILL, write off the old
1076        process. */
1077 
1078     if (child_stopped && was_killed) {
1079 
1080       child_stopped = 0;
1081       if (waitpid(child_pid, &status, 0) < 0) {
1082 
1083         write_error("child_stopped && was_killed");
1084         _exit(1);
1085 
1086       }
1087 
1088     }
1089 
1090     if (!child_stopped) {
1091 
1092       /* Once woken up, create a clone of our process. */
1093 
1094       child_pid = fork();
1095       if (child_pid < 0) {
1096 
1097         write_error("fork");
1098         _exit(1);
1099 
1100       }
1101 
1102       /* In child process: close fds, resume execution. */
1103 
1104       if (!child_pid) {
1105 
1106         //(void)nice(-20);
1107 
1108         signal(SIGCHLD, old_sigchld_handler);
1109         signal(SIGTERM, old_sigterm_handler);
1110 
1111         close(FORKSRV_FD);
1112         close(FORKSRV_FD + 1);
1113         return;
1114 
1115       }
1116 
1117     } else {
1118 
1119       /* Special handling for persistent mode: if the child is alive but
1120          currently stopped, simply restart it with SIGCONT. */
1121 
1122       kill(child_pid, SIGCONT);
1123       child_stopped = 0;
1124 
1125     }
1126 
1127     /* In parent process: write PID to pipe, then wait for child. */
1128 
1129     if (write(FORKSRV_FD + 1, &child_pid, 4) != 4) {
1130 
1131       write_error("write to afl-fuzz");
1132       _exit(1);
1133 
1134     }
1135 
1136     if (waitpid(child_pid, &status, is_persistent ? WUNTRACED : 0) < 0) {
1137 
1138       write_error("waitpid");
1139       _exit(1);
1140 
1141     }
1142 
1143     /* In persistent mode, the child stops itself with SIGSTOP to indicate
1144        a successful run. In this case, we want to wake it up without forking
1145        again. */
1146 
1147     if (WIFSTOPPED(status)) child_stopped = 1;
1148 
1149     /* Relay wait status to pipe, then loop back. */
1150 
1151     if (write(FORKSRV_FD + 1, &status, 4) != 4) {
1152 
1153       write_error("writing to afl-fuzz");
1154       _exit(1);
1155 
1156     }
1157 
1158   }
1159 
1160 }
1161 
1162 /* A simplified persistent mode handler, used as explained in
1163  * README.llvm.md. */
1164 
__afl_persistent_loop(unsigned int max_cnt)1165 int __afl_persistent_loop(unsigned int max_cnt) {
1166 
1167   static u8  first_pass = 1;
1168   static u32 cycle_cnt;
1169 
1170   if (first_pass) {
1171 
1172     /* Make sure that every iteration of __AFL_LOOP() starts with a clean slate.
1173        On subsequent calls, the parent will take care of that, but on the first
1174        iteration, it's our job to erase any trace of whatever happened
1175        before the loop. */
1176 
1177     if (is_persistent) {
1178 
1179       memset(__afl_area_ptr, 0, __afl_map_size);
1180       __afl_area_ptr[0] = 1;
1181       memset(__afl_prev_loc, 0, NGRAM_SIZE_MAX * sizeof(PREV_LOC_T));
1182 
1183     }
1184 
1185     cycle_cnt = max_cnt;
1186     first_pass = 0;
1187     __afl_selective_coverage_temp = 1;
1188 
1189     return 1;
1190 
1191   }
1192 
1193   if (is_persistent) {
1194 
1195     if (--cycle_cnt) {
1196 
1197       raise(SIGSTOP);
1198 
1199       __afl_area_ptr[0] = 1;
1200       memset(__afl_prev_loc, 0, NGRAM_SIZE_MAX * sizeof(PREV_LOC_T));
1201       __afl_selective_coverage_temp = 1;
1202 
1203       return 1;
1204 
1205     } else {
1206 
1207       /* When exiting __AFL_LOOP(), make sure that the subsequent code that
1208          follows the loop is not traced. We do that by pivoting back to the
1209          dummy output region. */
1210 
1211       __afl_area_ptr = __afl_area_ptr_dummy;
1212 
1213     }
1214 
1215   }
1216 
1217   return 0;
1218 
1219 }
1220 
1221 /* This one can be called from user code when deferred forkserver mode
1222     is enabled. */
1223 
__afl_manual_init(void)1224 void __afl_manual_init(void) {
1225 
1226   static u8 init_done;
1227 
1228   if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) {
1229 
1230     init_done = 1;
1231     is_persistent = 0;
1232     __afl_sharedmem_fuzzing = 0;
1233     if (__afl_area_ptr == NULL) __afl_area_ptr = __afl_area_ptr_dummy;
1234 
1235     if (__afl_debug) {
1236 
1237       fprintf(stderr,
1238               "DEBUG: disabled instrumentation because of "
1239               "AFL_DISABLE_LLVM_INSTRUMENTATION\n");
1240 
1241     }
1242 
1243   }
1244 
1245   if (!init_done) {
1246 
1247     __afl_start_forkserver();
1248     init_done = 1;
1249 
1250   }
1251 
1252 }
1253 
1254 /* Initialization of the forkserver - latest possible */
1255 
__afl_auto_init(void)1256 __attribute__((constructor())) void __afl_auto_init(void) {
1257 
1258   if (__afl_already_initialized_init) { return; }
1259 
1260 #ifdef __ANDROID__
1261   // Disable handlers in linker/debuggerd, check include/debuggerd/handler.h
1262   signal(SIGABRT, SIG_DFL);
1263   signal(SIGBUS, SIG_DFL);
1264   signal(SIGFPE, SIG_DFL);
1265   signal(SIGILL, SIG_DFL);
1266   signal(SIGSEGV, SIG_DFL);
1267   signal(SIGSTKFLT, SIG_DFL);
1268   signal(SIGSYS, SIG_DFL);
1269   signal(SIGTRAP, SIG_DFL);
1270 #endif
1271 
1272   __afl_already_initialized_init = 1;
1273 
1274   if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) return;
1275 
1276   if (getenv(DEFER_ENV_VAR)) return;
1277 
1278   __afl_manual_init();
1279 
1280 }
1281 
1282 /* Optionally run an early forkserver */
1283 
__early_forkserver(void)1284 __attribute__((constructor(EARLY_FS_PRIO))) void __early_forkserver(void) {
1285 
1286   if (getenv("AFL_EARLY_FORKSERVER")) { __afl_auto_init(); }
1287 
1288 }
1289 
1290 /* Initialization of the shmem - earliest possible because of LTO fixed mem. */
1291 
__afl_auto_early(void)1292 __attribute__((constructor(CTOR_PRIO))) void __afl_auto_early(void) {
1293 
1294   is_persistent = !!getenv(PERSIST_ENV_VAR);
1295 
1296   if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) return;
1297 
1298   __afl_map_shm();
1299 
1300 }
1301 
1302 /* preset __afl_area_ptr #2 */
1303 
__afl_auto_second(void)1304 __attribute__((constructor(1))) void __afl_auto_second(void) {
1305 
1306   if (__afl_already_initialized_second) return;
1307   __afl_already_initialized_second = 1;
1308 
1309   if (getenv("AFL_DEBUG")) {
1310 
1311     __afl_debug = 1;
1312     fprintf(stderr, "DEBUG: debug enabled\n");
1313 
1314   }
1315 
1316   if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) return;
1317   u8 *ptr;
1318 
1319   if (__afl_final_loc) {
1320 
1321     if (__afl_area_ptr && __afl_area_ptr != __afl_area_initial)
1322       free(__afl_area_ptr);
1323 
1324     if (__afl_map_addr)
1325       ptr = (u8 *)mmap((void *)__afl_map_addr, __afl_final_loc,
1326                        PROT_READ | PROT_WRITE,
1327                        MAP_FIXED_NOREPLACE | MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1328     else
1329       ptr = (u8 *)malloc(__afl_final_loc);
1330 
1331     if (ptr && (ssize_t)ptr != -1) {
1332 
1333       __afl_area_ptr = ptr;
1334       __afl_area_ptr_backup = __afl_area_ptr;
1335 
1336     }
1337 
1338   }
1339 
1340 }  // ptr memleak report is a false positive
1341 
1342 /* preset __afl_area_ptr #1 - at constructor level 0 global variables have
1343    not been set */
1344 
__afl_auto_first(void)1345 __attribute__((constructor(0))) void __afl_auto_first(void) {
1346 
1347   if (__afl_already_initialized_first) return;
1348   __afl_already_initialized_first = 1;
1349 
1350   if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) return;
1351   u8 *ptr = (u8 *)malloc(MAP_INITIAL_SIZE);
1352 
1353   if (ptr && (ssize_t)ptr != -1) {
1354 
1355     __afl_area_ptr = ptr;
1356     __afl_area_ptr_backup = __afl_area_ptr;
1357 
1358   }
1359 
1360 }  // ptr memleak report is a false positive
1361 
1362 /* The following stuff deals with supporting -fsanitize-coverage=trace-pc-guard.
1363    It remains non-operational in the traditional, plugin-backed LLVM mode.
1364    For more info about 'trace-pc-guard', see README.llvm.md.
1365 
1366    The first function (__sanitizer_cov_trace_pc_guard) is called back on every
1367    edge (as opposed to every basic block). */
1368 
__sanitizer_cov_trace_pc_guard(uint32_t * guard)1369 void __sanitizer_cov_trace_pc_guard(uint32_t *guard) {
1370 
1371   // For stability analysis, if you want to know to which function unstable
1372   // edge IDs belong - uncomment, recompile+install llvm_mode, recompile
1373   // the target. libunwind and libbacktrace are better solutions.
1374   // Set AFL_DEBUG_CHILD=1 and run afl-fuzz with 2>file to capture
1375   // the backtrace output
1376   /*
1377   uint32_t unstable[] = { ... unstable edge IDs };
1378   uint32_t idx;
1379   char bt[1024];
1380   for (idx = 0; i < sizeof(unstable)/sizeof(uint32_t); i++) {
1381 
1382     if (unstable[idx] == __afl_area_ptr[*guard]) {
1383 
1384       int bt_size = backtrace(bt, 256);
1385       if (bt_size > 0) {
1386 
1387         char **bt_syms = backtrace_symbols(bt, bt_size);
1388         if (bt_syms) {
1389 
1390           fprintf(stderr, "DEBUG: edge=%u caller=%s\n", unstable[idx],
1391   bt_syms[0]);
1392           free(bt_syms);
1393 
1394         }
1395 
1396       }
1397 
1398     }
1399 
1400   }
1401 
1402   */
1403 
1404 #if (LLVM_VERSION_MAJOR < 9)
1405 
1406   __afl_area_ptr[*guard]++;
1407 
1408 #else
1409 
1410   __afl_area_ptr[*guard] =
1411       __afl_area_ptr[*guard] + 1 + (__afl_area_ptr[*guard] == 255 ? 1 : 0);
1412 
1413 #endif
1414 
1415 }
1416 
1417 /* Init callback. Populates instrumentation IDs. Note that we're using
1418    ID of 0 as a special value to indicate non-instrumented bits. That may
1419    still touch the bitmap, but in a fairly harmless way. */
1420 
__sanitizer_cov_trace_pc_guard_init(uint32_t * start,uint32_t * stop)1421 void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) {
1422 
1423   u32   inst_ratio = 100;
1424   char *x;
1425 
1426   _is_sancov = 1;
1427 
1428   if (__afl_debug) {
1429 
1430     fprintf(stderr,
1431             "Running __sanitizer_cov_trace_pc_guard_init: %p-%p (%lu edges) "
1432             "after_fs=%u\n",
1433             start, stop, (unsigned long)(stop - start),
1434             __afl_already_initialized_forkserver);
1435 
1436   }
1437 
1438   if (start == stop || *start) return;
1439 
1440   // If a dlopen of an instrumented library happens after the forkserver then
1441   // we have a problem as we cannot increase the coverage map anymore.
1442   if (__afl_already_initialized_forkserver) {
1443 
1444     if (!getenv("AFL_IGNORE_PROBLEMS")) {
1445 
1446       fprintf(
1447           stderr,
1448           "[-] FATAL: forkserver is already up, but an instrumented dlopen() "
1449           "library loaded afterwards. You must AFL_PRELOAD such libraries to "
1450           "be able to fuzz them or LD_PRELOAD to run outside of afl-fuzz.\n"
1451           "To ignore this set AFL_IGNORE_PROBLEMS=1.\n");
1452       abort();
1453 
1454     } else {
1455 
1456       static u32 offset = 4;
1457 
1458       while (start < stop) {
1459 
1460         *(start++) = offset;
1461         if (unlikely(++offset >= __afl_final_loc)) { offset = 4; }
1462 
1463       }
1464 
1465     }
1466 
1467   }
1468 
1469   x = getenv("AFL_INST_RATIO");
1470   if (x) { inst_ratio = (u32)atoi(x); }
1471 
1472   if (!inst_ratio || inst_ratio > 100) {
1473 
1474     fprintf(stderr, "[-] ERROR: Invalid AFL_INST_RATIO (must be 1-100).\n");
1475     abort();
1476 
1477   }
1478 
1479   /* instrumented code is loaded *after* our forkserver is up. this is a
1480      problem. We cannot prevent collisions then :( */
1481   /*
1482   if (__afl_already_initialized_forkserver &&
1483       __afl_final_loc + 1 + stop - start > __afl_map_size) {
1484 
1485     if (__afl_debug) {
1486 
1487       fprintf(stderr, "Warning: new instrumented code after the forkserver!\n");
1488 
1489     }
1490 
1491     __afl_final_loc = 2;
1492 
1493     if (1 + stop - start > __afl_map_size) {
1494 
1495       *(start++) = ++__afl_final_loc;
1496 
1497       while (start < stop) {
1498 
1499         if (R(100) < inst_ratio)
1500           *start = ++__afl_final_loc % __afl_map_size;
1501         else
1502           *start = 4;
1503 
1504         start++;
1505 
1506       }
1507 
1508       return;
1509 
1510     }
1511 
1512   }
1513 
1514   */
1515 
1516   /* Make sure that the first element in the range is always set - we use that
1517      to avoid duplicate calls (which can happen as an artifact of the underlying
1518      implementation in LLVM). */
1519 
1520   *(start++) = ++__afl_final_loc;
1521 
1522   while (start < stop) {
1523 
1524     if (R(100) < inst_ratio)
1525       *start = ++__afl_final_loc;
1526     else
1527       *start = 4;
1528 
1529     start++;
1530 
1531   }
1532 
1533   if (__afl_debug) {
1534 
1535     fprintf(stderr,
1536             "Done __sanitizer_cov_trace_pc_guard_init: __afl_final_loc = %u\n",
1537             __afl_final_loc);
1538 
1539   }
1540 
1541   if (__afl_already_initialized_shm && __afl_final_loc > __afl_map_size) {
1542 
1543     if (__afl_debug) {
1544 
1545       fprintf(stderr, "Reinit shm necessary (+%u)\n",
1546               __afl_final_loc - __afl_map_size);
1547 
1548     }
1549 
1550     __afl_unmap_shm();
1551     __afl_map_shm();
1552 
1553   }
1554 
1555 }
1556 
1557 ///// CmpLog instrumentation
1558 
__cmplog_ins_hook1(uint8_t arg1,uint8_t arg2,uint8_t attr)1559 void __cmplog_ins_hook1(uint8_t arg1, uint8_t arg2, uint8_t attr) {
1560 
1561   // fprintf(stderr, "hook1 arg0=%02x arg1=%02x attr=%u\n",
1562   //         (u8) arg1, (u8) arg2, attr);
1563 
1564   if (unlikely(!__afl_cmp_map || arg1 == arg2)) return;
1565 
1566   uintptr_t k = (uintptr_t)__builtin_return_address(0);
1567   k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
1568 
1569   u32 hits;
1570 
1571   if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) {
1572 
1573     __afl_cmp_map->headers[k].type = CMP_TYPE_INS;
1574     hits = 0;
1575     __afl_cmp_map->headers[k].hits = 1;
1576     __afl_cmp_map->headers[k].shape = 0;
1577 
1578   } else {
1579 
1580     hits = __afl_cmp_map->headers[k].hits++;
1581 
1582   }
1583 
1584   __afl_cmp_map->headers[k].attribute = attr;
1585 
1586   hits &= CMP_MAP_H - 1;
1587   __afl_cmp_map->log[k][hits].v0 = arg1;
1588   __afl_cmp_map->log[k][hits].v1 = arg2;
1589 
1590 }
1591 
__cmplog_ins_hook2(uint16_t arg1,uint16_t arg2,uint8_t attr)1592 void __cmplog_ins_hook2(uint16_t arg1, uint16_t arg2, uint8_t attr) {
1593 
1594   if (unlikely(!__afl_cmp_map || arg1 == arg2)) return;
1595 
1596   uintptr_t k = (uintptr_t)__builtin_return_address(0);
1597   k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
1598 
1599   u32 hits;
1600 
1601   if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) {
1602 
1603     __afl_cmp_map->headers[k].type = CMP_TYPE_INS;
1604     hits = 0;
1605     __afl_cmp_map->headers[k].hits = 1;
1606     __afl_cmp_map->headers[k].shape = 1;
1607 
1608   } else {
1609 
1610     hits = __afl_cmp_map->headers[k].hits++;
1611 
1612     if (!__afl_cmp_map->headers[k].shape) {
1613 
1614       __afl_cmp_map->headers[k].shape = 1;
1615 
1616     }
1617 
1618   }
1619 
1620   __afl_cmp_map->headers[k].attribute = attr;
1621 
1622   hits &= CMP_MAP_H - 1;
1623   __afl_cmp_map->log[k][hits].v0 = arg1;
1624   __afl_cmp_map->log[k][hits].v1 = arg2;
1625 
1626 }
1627 
__cmplog_ins_hook4(uint32_t arg1,uint32_t arg2,uint8_t attr)1628 void __cmplog_ins_hook4(uint32_t arg1, uint32_t arg2, uint8_t attr) {
1629 
1630   // fprintf(stderr, "hook4 arg0=%x arg1=%x attr=%u\n", arg1, arg2, attr);
1631 
1632   if (unlikely(!__afl_cmp_map || arg1 == arg2)) return;
1633 
1634   uintptr_t k = (uintptr_t)__builtin_return_address(0);
1635   k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
1636 
1637   u32 hits;
1638 
1639   if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) {
1640 
1641     __afl_cmp_map->headers[k].type = CMP_TYPE_INS;
1642     hits = 0;
1643     __afl_cmp_map->headers[k].hits = 1;
1644     __afl_cmp_map->headers[k].shape = 3;
1645 
1646   } else {
1647 
1648     hits = __afl_cmp_map->headers[k].hits++;
1649 
1650     if (__afl_cmp_map->headers[k].shape < 3) {
1651 
1652       __afl_cmp_map->headers[k].shape = 3;
1653 
1654     }
1655 
1656   }
1657 
1658   __afl_cmp_map->headers[k].attribute = attr;
1659 
1660   hits &= CMP_MAP_H - 1;
1661   __afl_cmp_map->log[k][hits].v0 = arg1;
1662   __afl_cmp_map->log[k][hits].v1 = arg2;
1663 
1664 }
1665 
__cmplog_ins_hook8(uint64_t arg1,uint64_t arg2,uint8_t attr)1666 void __cmplog_ins_hook8(uint64_t arg1, uint64_t arg2, uint8_t attr) {
1667 
1668   // fprintf(stderr, "hook8 arg0=%lx arg1=%lx attr=%u\n", arg1, arg2, attr);
1669 
1670   if (unlikely(!__afl_cmp_map || arg1 == arg2)) return;
1671 
1672   uintptr_t k = (uintptr_t)__builtin_return_address(0);
1673   k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
1674 
1675   u32 hits;
1676 
1677   if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) {
1678 
1679     __afl_cmp_map->headers[k].type = CMP_TYPE_INS;
1680     hits = 0;
1681     __afl_cmp_map->headers[k].hits = 1;
1682     __afl_cmp_map->headers[k].shape = 7;
1683 
1684   } else {
1685 
1686     hits = __afl_cmp_map->headers[k].hits++;
1687 
1688     if (__afl_cmp_map->headers[k].shape < 7) {
1689 
1690       __afl_cmp_map->headers[k].shape = 7;
1691 
1692     }
1693 
1694   }
1695 
1696   __afl_cmp_map->headers[k].attribute = attr;
1697 
1698   hits &= CMP_MAP_H - 1;
1699   __afl_cmp_map->log[k][hits].v0 = arg1;
1700   __afl_cmp_map->log[k][hits].v1 = arg2;
1701 
1702 }
1703 
1704 #ifdef WORD_SIZE_64
1705 // support for u24 to u120 via llvm _ExitInt(). size is in bytes minus 1
__cmplog_ins_hookN(uint128_t arg1,uint128_t arg2,uint8_t attr,uint8_t size)1706 void __cmplog_ins_hookN(uint128_t arg1, uint128_t arg2, uint8_t attr,
1707                         uint8_t size) {
1708 
1709   // fprintf(stderr, "hookN arg0=%llx:%llx arg1=%llx:%llx bytes=%u attr=%u\n",
1710   // (u64)(arg1 >> 64), (u64)arg1, (u64)(arg2 >> 64), (u64)arg2, size + 1,
1711   // attr);
1712 
1713   if (unlikely(!__afl_cmp_map || arg1 == arg2)) return;
1714 
1715   uintptr_t k = (uintptr_t)__builtin_return_address(0);
1716   k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
1717 
1718   u32 hits;
1719 
1720   if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) {
1721 
1722     __afl_cmp_map->headers[k].type = CMP_TYPE_INS;
1723     hits = 0;
1724     __afl_cmp_map->headers[k].hits = 1;
1725     __afl_cmp_map->headers[k].shape = size;
1726 
1727   } else {
1728 
1729     hits = __afl_cmp_map->headers[k].hits++;
1730 
1731     if (__afl_cmp_map->headers[k].shape < size) {
1732 
1733       __afl_cmp_map->headers[k].shape = size;
1734 
1735     }
1736 
1737   }
1738 
1739   __afl_cmp_map->headers[k].attribute = attr;
1740 
1741   hits &= CMP_MAP_H - 1;
1742   __afl_cmp_map->log[k][hits].v0 = (u64)arg1;
1743   __afl_cmp_map->log[k][hits].v1 = (u64)arg2;
1744 
1745   if (size > 7) {
1746 
1747     __afl_cmp_map->log[k][hits].v0_128 = (u64)(arg1 >> 64);
1748     __afl_cmp_map->log[k][hits].v1_128 = (u64)(arg2 >> 64);
1749 
1750   }
1751 
1752 }
1753 
__cmplog_ins_hook16(uint128_t arg1,uint128_t arg2,uint8_t attr)1754 void __cmplog_ins_hook16(uint128_t arg1, uint128_t arg2, uint8_t attr) {
1755 
1756   if (likely(!__afl_cmp_map)) return;
1757 
1758   uintptr_t k = (uintptr_t)__builtin_return_address(0);
1759   k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
1760 
1761   u32 hits;
1762 
1763   if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) {
1764 
1765     __afl_cmp_map->headers[k].type = CMP_TYPE_INS;
1766     hits = 0;
1767     __afl_cmp_map->headers[k].hits = 1;
1768     __afl_cmp_map->headers[k].shape = 15;
1769 
1770   } else {
1771 
1772     hits = __afl_cmp_map->headers[k].hits++;
1773 
1774     if (__afl_cmp_map->headers[k].shape < 15) {
1775 
1776       __afl_cmp_map->headers[k].shape = 15;
1777 
1778     }
1779 
1780   }
1781 
1782   __afl_cmp_map->headers[k].attribute = attr;
1783 
1784   hits &= CMP_MAP_H - 1;
1785   __afl_cmp_map->log[k][hits].v0 = (u64)arg1;
1786   __afl_cmp_map->log[k][hits].v1 = (u64)arg2;
1787   __afl_cmp_map->log[k][hits].v0_128 = (u64)(arg1 >> 64);
1788   __afl_cmp_map->log[k][hits].v1_128 = (u64)(arg2 >> 64);
1789 
1790 }
1791 
1792 #endif
1793 
__sanitizer_cov_trace_cmp1(uint8_t arg1,uint8_t arg2)1794 void __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2) {
1795 
1796   __cmplog_ins_hook1(arg1, arg2, 0);
1797 
1798 }
1799 
__sanitizer_cov_trace_const_cmp1(uint8_t arg1,uint8_t arg2)1800 void __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2) {
1801 
1802   __cmplog_ins_hook1(arg1, arg2, 0);
1803 
1804 }
1805 
__sanitizer_cov_trace_cmp2(uint16_t arg1,uint16_t arg2)1806 void __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2) {
1807 
1808   __cmplog_ins_hook2(arg1, arg2, 0);
1809 
1810 }
1811 
__sanitizer_cov_trace_const_cmp2(uint16_t arg1,uint16_t arg2)1812 void __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2) {
1813 
1814   __cmplog_ins_hook2(arg1, arg2, 0);
1815 
1816 }
1817 
__sanitizer_cov_trace_cmp4(uint32_t arg1,uint32_t arg2)1818 void __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2) {
1819 
1820   __cmplog_ins_hook4(arg1, arg2, 0);
1821 
1822 }
1823 
__sanitizer_cov_trace_const_cmp4(uint32_t arg1,uint32_t arg2)1824 void __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2) {
1825 
1826   __cmplog_ins_hook4(arg1, arg2, 0);
1827 
1828 }
1829 
__sanitizer_cov_trace_cmp8(uint64_t arg1,uint64_t arg2)1830 void __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2) {
1831 
1832   __cmplog_ins_hook8(arg1, arg2, 0);
1833 
1834 }
1835 
__sanitizer_cov_trace_const_cmp8(uint64_t arg1,uint64_t arg2)1836 void __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2) {
1837 
1838   __cmplog_ins_hook8(arg1, arg2, 0);
1839 
1840 }
1841 
1842 #ifdef WORD_SIZE_64
__sanitizer_cov_trace_cmp16(uint128_t arg1,uint128_t arg2)1843 void __sanitizer_cov_trace_cmp16(uint128_t arg1, uint128_t arg2) {
1844 
1845   __cmplog_ins_hook16(arg1, arg2, 0);
1846 
1847 }
1848 
__sanitizer_cov_trace_const_cmp16(uint128_t arg1,uint128_t arg2)1849 void __sanitizer_cov_trace_const_cmp16(uint128_t arg1, uint128_t arg2) {
1850 
1851   __cmplog_ins_hook16(arg1, arg2, 0);
1852 
1853 }
1854 
1855 #endif
1856 
__sanitizer_cov_trace_switch(uint64_t val,uint64_t * cases)1857 void __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases) {
1858 
1859   if (likely(!__afl_cmp_map)) return;
1860 
1861   for (uint64_t i = 0; i < cases[0]; i++) {
1862 
1863     uintptr_t k = (uintptr_t)__builtin_return_address(0) + i;
1864     k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) &
1865                     (CMP_MAP_W - 1));
1866 
1867     u32 hits;
1868 
1869     if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) {
1870 
1871       __afl_cmp_map->headers[k].type = CMP_TYPE_INS;
1872       hits = 0;
1873       __afl_cmp_map->headers[k].hits = 1;
1874       __afl_cmp_map->headers[k].shape = 7;
1875 
1876     } else {
1877 
1878       hits = __afl_cmp_map->headers[k].hits++;
1879 
1880       if (__afl_cmp_map->headers[k].shape < 7) {
1881 
1882         __afl_cmp_map->headers[k].shape = 7;
1883 
1884       }
1885 
1886     }
1887 
1888     __afl_cmp_map->headers[k].attribute = 1;
1889 
1890     hits &= CMP_MAP_H - 1;
1891     __afl_cmp_map->log[k][hits].v0 = val;
1892     __afl_cmp_map->log[k][hits].v1 = cases[i + 2];
1893 
1894   }
1895 
1896 }
1897 
__asan_region_is_poisoned(void * beg,size_t size)1898 __attribute__((weak)) void *__asan_region_is_poisoned(void *beg, size_t size) {
1899 
1900   return NULL;
1901 
1902 }
1903 
1904 // POSIX shenanigan to see if an area is mapped.
1905 // If it is mapped as X-only, we have a problem, so maybe we should add a check
1906 // to avoid to call it on .text addresses
area_is_valid(void * ptr,size_t len)1907 static int area_is_valid(void *ptr, size_t len) {
1908 
1909   if (unlikely(!ptr || __asan_region_is_poisoned(ptr, len))) { return 0; }
1910 
1911 #ifndef __HAIKU__
1912   long r = syscall(SYS_write, __afl_dummy_fd[1], ptr, len);
1913 #else
1914   long r = _kern_write(__afl_dummy_fd[1], -1, ptr, len);
1915 #endif  // HAIKU
1916 
1917   if (r <= 0 || r > len) return 0;
1918 
1919   // even if the write succeed this can be a false positive if we cross
1920   // a page boundary. who knows why.
1921 
1922   char *p = (char *)ptr;
1923   long  page_size = sysconf(_SC_PAGE_SIZE);
1924   char *page = (char *)((uintptr_t)p & ~(page_size - 1)) + page_size;
1925 
1926   if (page > p + len) {
1927 
1928     // no, not crossing a page boundary
1929     return (int)r;
1930 
1931   } else {
1932 
1933     // yes it crosses a boundary, hence we can only return the length of
1934     // rest of the first page, we cannot detect if the next page is valid
1935     // or not, neither by SYS_write nor msync() :-(
1936     return (int)(page - p);
1937 
1938   }
1939 
1940 }
1941 
1942 /* hook for string with length functions, eg. strncmp, strncasecmp etc.
1943    Note that we ignore the len parameter and take longer strings if present. */
__cmplog_rtn_hook_strn(u8 * ptr1,u8 * ptr2,u64 len)1944 void __cmplog_rtn_hook_strn(u8 *ptr1, u8 *ptr2, u64 len) {
1945 
1946   // fprintf(stderr, "RTN1 %p %p %u\n", ptr1, ptr2, len);
1947   if (likely(!__afl_cmp_map)) return;
1948   if (unlikely(!len)) return;
1949   int len0 = MIN(len, 31);
1950   int len1 = strnlen(ptr1, len0);
1951   if (len1 < 31) len1 = area_is_valid(ptr1, len1 + 1);
1952   int len2 = strnlen(ptr2, len0);
1953   if (len2 < 31) len2 = area_is_valid(ptr1, len2 + 1);
1954   int l = MAX(len1, len2);
1955   if (l < 2) return;
1956 
1957   uintptr_t k = (uintptr_t)__builtin_return_address(0);
1958   k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
1959 
1960   u32 hits;
1961 
1962   if (__afl_cmp_map->headers[k].type != CMP_TYPE_RTN) {
1963 
1964     __afl_cmp_map->headers[k].type = CMP_TYPE_RTN;
1965     __afl_cmp_map->headers[k].hits = 1;
1966     __afl_cmp_map->headers[k].shape = l - 1;
1967     hits = 0;
1968 
1969   } else {
1970 
1971     hits = __afl_cmp_map->headers[k].hits++;
1972 
1973     if (__afl_cmp_map->headers[k].shape < l) {
1974 
1975       __afl_cmp_map->headers[k].shape = l - 1;
1976 
1977     }
1978 
1979   }
1980 
1981   struct cmpfn_operands *cmpfn = (struct cmpfn_operands *)__afl_cmp_map->log[k];
1982   hits &= CMP_MAP_RTN_H - 1;
1983 
1984   cmpfn[hits].v0_len = 0x80 + l;
1985   cmpfn[hits].v1_len = 0x80 + l;
1986   __builtin_memcpy(cmpfn[hits].v0, ptr1, len1);
1987   __builtin_memcpy(cmpfn[hits].v1, ptr2, len2);
1988   // fprintf(stderr, "RTN3\n");
1989 
1990 }
1991 
1992 /* hook for string functions, eg. strcmp, strcasecmp etc. */
__cmplog_rtn_hook_str(u8 * ptr1,u8 * ptr2)1993 void __cmplog_rtn_hook_str(u8 *ptr1, u8 *ptr2) {
1994 
1995   // fprintf(stderr, "RTN1 %p %p\n", ptr1, ptr2);
1996   if (likely(!__afl_cmp_map)) return;
1997   if (unlikely(!ptr1 || !ptr2)) return;
1998   int len1 = strnlen(ptr1, 30) + 1;
1999   int len2 = strnlen(ptr2, 30) + 1;
2000   int l = MAX(len1, len2);
2001   if (l < 3) return;
2002 
2003   uintptr_t k = (uintptr_t)__builtin_return_address(0);
2004   k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
2005 
2006   u32 hits;
2007 
2008   if (__afl_cmp_map->headers[k].type != CMP_TYPE_RTN) {
2009 
2010     __afl_cmp_map->headers[k].type = CMP_TYPE_RTN;
2011     __afl_cmp_map->headers[k].hits = 1;
2012     __afl_cmp_map->headers[k].shape = l - 1;
2013     hits = 0;
2014 
2015   } else {
2016 
2017     hits = __afl_cmp_map->headers[k].hits++;
2018 
2019     if (__afl_cmp_map->headers[k].shape < l) {
2020 
2021       __afl_cmp_map->headers[k].shape = l - 1;
2022 
2023     }
2024 
2025   }
2026 
2027   struct cmpfn_operands *cmpfn = (struct cmpfn_operands *)__afl_cmp_map->log[k];
2028   hits &= CMP_MAP_RTN_H - 1;
2029 
2030   cmpfn[hits].v0_len = 0x80 + len1;
2031   cmpfn[hits].v1_len = 0x80 + len2;
2032   __builtin_memcpy(cmpfn[hits].v0, ptr1, len1);
2033   __builtin_memcpy(cmpfn[hits].v1, ptr2, len2);
2034   // fprintf(stderr, "RTN3\n");
2035 
2036 }
2037 
2038 /* hook function for all other func(ptr, ptr, ...) variants */
__cmplog_rtn_hook(u8 * ptr1,u8 * ptr2)2039 void __cmplog_rtn_hook(u8 *ptr1, u8 *ptr2) {
2040 
2041   /*
2042     u32 i;
2043     if (area_is_valid(ptr1, 31) <= 0 || area_is_valid(ptr2, 31) <= 0) return;
2044     fprintf(stderr, "rtn arg0=");
2045     for (i = 0; i < 32; i++)
2046       fprintf(stderr, "%02x", ptr1[i]);
2047     fprintf(stderr, " arg1=");
2048     for (i = 0; i < 32; i++)
2049       fprintf(stderr, "%02x", ptr2[i]);
2050     fprintf(stderr, "\n");
2051   */
2052 
2053   // fprintf(stderr, "RTN1 %p %p\n", ptr1, ptr2);
2054   if (likely(!__afl_cmp_map)) return;
2055   int l1, l2;
2056   if ((l1 = area_is_valid(ptr1, 31)) <= 0 ||
2057       (l2 = area_is_valid(ptr2, 31)) <= 0)
2058     return;
2059   int len = MIN(31, MIN(l1, l2));
2060 
2061   // fprintf(stderr, "RTN2 %u\n", len);
2062   uintptr_t k = (uintptr_t)__builtin_return_address(0);
2063   k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
2064 
2065   u32 hits;
2066 
2067   if (__afl_cmp_map->headers[k].type != CMP_TYPE_RTN) {
2068 
2069     __afl_cmp_map->headers[k].type = CMP_TYPE_RTN;
2070     __afl_cmp_map->headers[k].hits = 1;
2071     __afl_cmp_map->headers[k].shape = len - 1;
2072     hits = 0;
2073 
2074   } else {
2075 
2076     hits = __afl_cmp_map->headers[k].hits++;
2077 
2078     if (__afl_cmp_map->headers[k].shape < len) {
2079 
2080       __afl_cmp_map->headers[k].shape = len - 1;
2081 
2082     }
2083 
2084   }
2085 
2086   struct cmpfn_operands *cmpfn = (struct cmpfn_operands *)__afl_cmp_map->log[k];
2087   hits &= CMP_MAP_RTN_H - 1;
2088 
2089   cmpfn[hits].v0_len = len;
2090   cmpfn[hits].v1_len = len;
2091   __builtin_memcpy(cmpfn[hits].v0, ptr1, len);
2092   __builtin_memcpy(cmpfn[hits].v1, ptr2, len);
2093   // fprintf(stderr, "RTN3\n");
2094 
2095 }
2096 
2097 /* hook for func(ptr, ptr, len, ...) looking functions.
2098    Note that for the time being we ignore len as this could be wrong
2099    information and pass it on to the standard binary rtn hook */
__cmplog_rtn_hook_n(u8 * ptr1,u8 * ptr2,u64 len)2100 void __cmplog_rtn_hook_n(u8 *ptr1, u8 *ptr2, u64 len) {
2101 
2102   (void)(len);
2103   __cmplog_rtn_hook(ptr1, ptr2);
2104 
2105 #if 0
2106   /*
2107     u32 i;
2108     if (area_is_valid(ptr1, 31) <= 0 || area_is_valid(ptr2, 31) <= 0) return;
2109     fprintf(stderr, "rtn_n len=%u arg0=", len);
2110     for (i = 0; i < len; i++)
2111       fprintf(stderr, "%02x", ptr1[i]);
2112     fprintf(stderr, " arg1=");
2113     for (i = 0; i < len; i++)
2114       fprintf(stderr, "%02x", ptr2[i]);
2115     fprintf(stderr, "\n");
2116   */
2117 
2118   // fprintf(stderr, "RTN1 %p %p %u\n", ptr1, ptr2, len);
2119   if (likely(!__afl_cmp_map)) return;
2120   if (unlikely(!len)) return;
2121   int l = MIN(31, len);
2122 
2123   if ((l = area_is_valid(ptr1, l)) <= 0 || (l = area_is_valid(ptr2, l)) <= 0)
2124     return;
2125 
2126   // fprintf(stderr, "RTN2 %u\n", l);
2127   uintptr_t k = (uintptr_t)__builtin_return_address(0);
2128   k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
2129 
2130   u32 hits;
2131 
2132   if (__afl_cmp_map->headers[k].type != CMP_TYPE_RTN) {
2133 
2134     __afl_cmp_map->headers[k].type = CMP_TYPE_RTN;
2135     __afl_cmp_map->headers[k].hits = 1;
2136     __afl_cmp_map->headers[k].shape = l - 1;
2137     hits = 0;
2138 
2139   } else {
2140 
2141     hits = __afl_cmp_map->headers[k].hits++;
2142 
2143     if (__afl_cmp_map->headers[k].shape < l) {
2144 
2145       __afl_cmp_map->headers[k].shape = l - 1;
2146 
2147     }
2148 
2149   }
2150 
2151   struct cmpfn_operands *cmpfn = (struct cmpfn_operands *)__afl_cmp_map->log[k];
2152   hits &= CMP_MAP_RTN_H - 1;
2153 
2154   cmpfn[hits].v0_len = l;
2155   cmpfn[hits].v1_len = l;
2156   __builtin_memcpy(cmpfn[hits].v0, ptr1, l);
2157   __builtin_memcpy(cmpfn[hits].v1, ptr2, l);
2158   // fprintf(stderr, "RTN3\n");
2159 #endif
2160 
2161 }
2162 
2163 // gcc libstdc++
2164 // _ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7compareEPKc
get_gcc_stdstring(u8 * string)2165 static u8 *get_gcc_stdstring(u8 *string) {
2166 
2167   u32 *len = (u32 *)(string + 8);
2168 
2169   if (*len < 16) {  // in structure
2170 
2171     return (string + 16);
2172 
2173   } else {  // in memory
2174 
2175     u8 **ptr = (u8 **)string;
2176     return (*ptr);
2177 
2178   }
2179 
2180 }
2181 
2182 // llvm libc++ _ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocator
2183 //             IcEEE7compareEmmPKcm
get_llvm_stdstring(u8 * string)2184 static u8 *get_llvm_stdstring(u8 *string) {
2185 
2186   // length is in: if ((string[0] & 1) == 0) u8 len = (string[0] >> 1);
2187   // or: if (string[0] & 1) u32 *len = (u32 *) (string + 8);
2188 
2189   if (string[0] & 1) {  // in memory
2190 
2191     u8 **ptr = (u8 **)(string + 16);
2192     return (*ptr);
2193 
2194   } else {  // in structure
2195 
2196     return (string + 1);
2197 
2198   }
2199 
2200 }
2201 
__cmplog_rtn_gcc_stdstring_cstring(u8 * stdstring,u8 * cstring)2202 void __cmplog_rtn_gcc_stdstring_cstring(u8 *stdstring, u8 *cstring) {
2203 
2204   if (likely(!__afl_cmp_map)) return;
2205   if (area_is_valid(stdstring, 32) <= 0 || area_is_valid(cstring, 32) <= 0)
2206     return;
2207 
2208   __cmplog_rtn_hook(get_gcc_stdstring(stdstring), cstring);
2209 
2210 }
2211 
__cmplog_rtn_gcc_stdstring_stdstring(u8 * stdstring1,u8 * stdstring2)2212 void __cmplog_rtn_gcc_stdstring_stdstring(u8 *stdstring1, u8 *stdstring2) {
2213 
2214   if (likely(!__afl_cmp_map)) return;
2215   if (area_is_valid(stdstring1, 32) <= 0 || area_is_valid(stdstring2, 32) <= 0)
2216     return;
2217 
2218   __cmplog_rtn_hook(get_gcc_stdstring(stdstring1),
2219                     get_gcc_stdstring(stdstring2));
2220 
2221 }
2222 
__cmplog_rtn_llvm_stdstring_cstring(u8 * stdstring,u8 * cstring)2223 void __cmplog_rtn_llvm_stdstring_cstring(u8 *stdstring, u8 *cstring) {
2224 
2225   if (likely(!__afl_cmp_map)) return;
2226   if (area_is_valid(stdstring, 32) <= 0 || area_is_valid(cstring, 32) <= 0)
2227     return;
2228 
2229   __cmplog_rtn_hook(get_llvm_stdstring(stdstring), cstring);
2230 
2231 }
2232 
__cmplog_rtn_llvm_stdstring_stdstring(u8 * stdstring1,u8 * stdstring2)2233 void __cmplog_rtn_llvm_stdstring_stdstring(u8 *stdstring1, u8 *stdstring2) {
2234 
2235   if (likely(!__afl_cmp_map)) return;
2236   if (area_is_valid(stdstring1, 32) <= 0 || area_is_valid(stdstring2, 32) <= 0)
2237     return;
2238 
2239   __cmplog_rtn_hook(get_llvm_stdstring(stdstring1),
2240                     get_llvm_stdstring(stdstring2));
2241 
2242 }
2243 
2244 /* COVERAGE manipulation features */
2245 
2246 // this variable is then used in the shm setup to create an additional map
2247 // if __afl_map_size > MAP_SIZE or cmplog is used.
2248 // Especially with cmplog this would result in a ~260MB mem increase per
2249 // target run.
2250 
2251 // disable coverage from this point onwards until turned on again
__afl_coverage_off()2252 void __afl_coverage_off() {
2253 
2254   if (likely(__afl_selective_coverage)) {
2255 
2256     __afl_area_ptr = __afl_area_ptr_dummy;
2257     __afl_cmp_map = NULL;
2258 
2259   }
2260 
2261 }
2262 
2263 // enable coverage
__afl_coverage_on()2264 void __afl_coverage_on() {
2265 
2266   if (likely(__afl_selective_coverage && __afl_selective_coverage_temp)) {
2267 
2268     __afl_area_ptr = __afl_area_ptr_backup;
2269     if (__afl_cmp_map_backup) { __afl_cmp_map = __afl_cmp_map_backup; }
2270 
2271   }
2272 
2273 }
2274 
2275 // discard all coverage up to this point
__afl_coverage_discard()2276 void __afl_coverage_discard() {
2277 
2278   memset(__afl_area_ptr_backup, 0, __afl_map_size);
2279   __afl_area_ptr_backup[0] = 1;
2280 
2281   if (__afl_cmp_map) { memset(__afl_cmp_map, 0, sizeof(struct cmp_map)); }
2282 
2283 }
2284 
2285 // discard the testcase
__afl_coverage_skip()2286 void __afl_coverage_skip() {
2287 
2288   __afl_coverage_discard();
2289 
2290   if (likely(is_persistent && __afl_selective_coverage)) {
2291 
2292     __afl_coverage_off();
2293     __afl_selective_coverage_temp = 0;
2294 
2295   } else {
2296 
2297     exit(0);
2298 
2299   }
2300 
2301 }
2302 
2303 // mark this area as especially interesting
__afl_coverage_interesting(u8 val,u32 id)2304 void __afl_coverage_interesting(u8 val, u32 id) {
2305 
2306   __afl_area_ptr[id] = val;
2307 
2308 }
2309 
__afl_set_persistent_mode(u8 mode)2310 void __afl_set_persistent_mode(u8 mode) {
2311 
2312   is_persistent = mode;
2313 
2314 }
2315 
2316 #undef write_error
2317 
2318