1 /*
2 american fuzzy lop++ - target execution related routines
3 --------------------------------------------------------
4
5 Originally written by Michal Zalewski
6
7 Now maintained by Marc Heuse <mh@mh-sec.de>,
8 Heiko Eißfeldt <heiko.eissfeldt@hexco.de> and
9 Andrea Fioraldi <andreafioraldi@gmail.com> and
10 Dominik Maier <mail@dmnk.co>
11
12 Copyright 2016, 2017 Google Inc. All rights reserved.
13 Copyright 2019-2022 AFLplusplus Project. All rights reserved.
14
15 Licensed under the Apache License, Version 2.0 (the "License");
16 you may not use this file except in compliance with the License.
17 You may obtain a copy of the License at:
18
19 https://www.apache.org/licenses/LICENSE-2.0
20
21 This is the real deal: the program takes an instrumented binary and
22 attempts a variety of basic fuzzing tricks, paying close attention to
23 how they affect the execution path.
24
25 */
26
27 #include "afl-fuzz.h"
28 #include <sys/time.h>
29 #include <signal.h>
30 #include <limits.h>
31 #if !defined NAME_MAX
32 #define NAME_MAX _XOPEN_NAME_MAX
33 #endif
34
35 #include "cmplog.h"
36
37 #ifdef PROFILING
38 u64 time_spent_working = 0;
39 #endif
40
41 /* Execute target application, monitoring for timeouts. Return status
42 information. The called program will update afl->fsrv->trace_bits. */
43
44 fsrv_run_result_t __attribute__((hot))
fuzz_run_target(afl_state_t * afl,afl_forkserver_t * fsrv,u32 timeout)45 fuzz_run_target(afl_state_t *afl, afl_forkserver_t *fsrv, u32 timeout) {
46
47 #ifdef PROFILING
48 static u64 time_spent_start = 0;
49 struct timespec spec;
50 if (time_spent_start) {
51
52 u64 current;
53 clock_gettime(CLOCK_REALTIME, &spec);
54 current = (spec.tv_sec * 1000000000) + spec.tv_nsec;
55 time_spent_working += (current - time_spent_start);
56
57 }
58
59 #endif
60
61 fsrv_run_result_t res = afl_fsrv_run_target(fsrv, timeout, &afl->stop_soon);
62
63 #ifdef PROFILING
64 clock_gettime(CLOCK_REALTIME, &spec);
65 time_spent_start = (spec.tv_sec * 1000000000) + spec.tv_nsec;
66 #endif
67
68 return res;
69
70 }
71
72 /* Write modified data to file for testing. If afl->fsrv.out_file is set, the
73 old file is unlinked and a new one is created. Otherwise, afl->fsrv.out_fd is
74 rewound and truncated. */
75
76 u32 __attribute__((hot))
write_to_testcase(afl_state_t * afl,void ** mem,u32 len,u32 fix)77 write_to_testcase(afl_state_t *afl, void **mem, u32 len, u32 fix) {
78
79 #ifdef _AFL_DOCUMENT_MUTATIONS
80 s32 doc_fd;
81 char fn[PATH_MAX];
82 snprintf(fn, PATH_MAX, "%s/mutations/%09u:%s", afl->out_dir,
83 afl->document_counter++,
84 describe_op(afl, 0, NAME_MAX - strlen("000000000:")));
85
86 if ((doc_fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, DEFAULT_PERMISSION)) >=
87 0) {
88
89 if (write(doc_fd, *mem, len) != len)
90 PFATAL("write to mutation file failed: %s", fn);
91 close(doc_fd);
92
93 }
94
95 #endif
96
97 if (unlikely(afl->custom_mutators_count)) {
98
99 ssize_t new_size = len;
100 u8 * new_mem = *mem;
101 u8 * new_buf = NULL;
102
103 LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
104
105 if (el->afl_custom_post_process) {
106
107 new_size =
108 el->afl_custom_post_process(el->data, new_mem, new_size, &new_buf);
109
110 if (unlikely(!new_buf && new_size <= 0)) {
111
112 FATAL("Custom_post_process failed (ret: %lu)",
113 (long unsigned)new_size);
114
115 }
116
117 new_mem = new_buf;
118
119 }
120
121 });
122
123 if (unlikely(new_size < afl->min_length && !fix)) {
124
125 new_size = afl->min_length;
126
127 } else if (unlikely(new_size > afl->max_length)) {
128
129 new_size = afl->max_length;
130
131 }
132
133 if (new_mem != *mem) {
134
135 *mem = new_mem;
136
137 }
138
139 /* everything as planned. use the potentially new data. */
140 afl_fsrv_write_to_testcase(&afl->fsrv, *mem, new_size);
141 len = new_size;
142
143 } else {
144
145 if (unlikely(len < afl->min_length && !fix)) {
146
147 len = afl->min_length;
148
149 } else if (unlikely(len > afl->max_length)) {
150
151 len = afl->max_length;
152
153 }
154
155 /* boring uncustom. */
156 afl_fsrv_write_to_testcase(&afl->fsrv, *mem, len);
157
158 }
159
160 return len;
161
162 }
163
164 /* The same, but with an adjustable gap. Used for trimming. */
165
write_with_gap(afl_state_t * afl,u8 * mem,u32 len,u32 skip_at,u32 skip_len)166 static void write_with_gap(afl_state_t *afl, u8 *mem, u32 len, u32 skip_at,
167 u32 skip_len) {
168
169 s32 fd = afl->fsrv.out_fd;
170 u32 tail_len = len - skip_at - skip_len;
171
172 /*
173 This memory is used to carry out the post_processing(if present) after copying
174 the testcase by removing the gaps. This can break though
175 */
176 u8 *mem_trimmed = afl_realloc(AFL_BUF_PARAM(out_scratch), len - skip_len + 1);
177 if (unlikely(!mem_trimmed)) { PFATAL("alloc"); }
178
179 ssize_t new_size = len - skip_len;
180 u8 * new_mem = mem;
181
182 bool post_process_skipped = true;
183
184 if (unlikely(afl->custom_mutators_count)) {
185
186 u8 *new_buf = NULL;
187 new_mem = mem_trimmed;
188
189 LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
190
191 if (el->afl_custom_post_process) {
192
193 // We copy into the mem_trimmed only if we actually have custom mutators
194 // *with* post_processing installed
195
196 if (post_process_skipped) {
197
198 if (skip_at) { memcpy(mem_trimmed, (u8 *)mem, skip_at); }
199
200 if (tail_len) {
201
202 memcpy(mem_trimmed + skip_at, (u8 *)mem + skip_at + skip_len,
203 tail_len);
204
205 }
206
207 post_process_skipped = false;
208
209 }
210
211 new_size =
212 el->afl_custom_post_process(el->data, new_mem, new_size, &new_buf);
213
214 if (unlikely(!new_buf || new_size <= 0)) {
215
216 FATAL("Custom_post_process failed (ret: %lu)",
217 (long unsigned)new_size);
218
219 }
220
221 new_mem = new_buf;
222
223 }
224
225 });
226
227 }
228
229 if (likely(afl->fsrv.use_shmem_fuzz)) {
230
231 if (!post_process_skipped) {
232
233 // If we did post_processing, copy directly from the new_mem buffer
234
235 memcpy(afl->fsrv.shmem_fuzz, new_mem, new_size);
236
237 } else {
238
239 memcpy(afl->fsrv.shmem_fuzz, mem, skip_at);
240
241 memcpy(afl->fsrv.shmem_fuzz + skip_at, mem + skip_at + skip_len,
242 tail_len);
243
244 }
245
246 *afl->fsrv.shmem_fuzz_len = new_size;
247
248 #ifdef _DEBUG
249 if (afl->debug) {
250
251 fprintf(
252 stderr, "FS crc: %16llx len: %u\n",
253 hash64(afl->fsrv.shmem_fuzz, *afl->fsrv.shmem_fuzz_len, HASH_CONST),
254 *afl->fsrv.shmem_fuzz_len);
255 fprintf(stderr, "SHM :");
256 for (u32 i = 0; i < *afl->fsrv.shmem_fuzz_len; i++)
257 fprintf(stderr, "%02x", afl->fsrv.shmem_fuzz[i]);
258 fprintf(stderr, "\nORIG:");
259 for (u32 i = 0; i < *afl->fsrv.shmem_fuzz_len; i++)
260 fprintf(stderr, "%02x", (u8)((u8 *)mem)[i]);
261 fprintf(stderr, "\n");
262
263 }
264
265 #endif
266
267 return;
268
269 } else if (unlikely(!afl->fsrv.use_stdin)) {
270
271 if (unlikely(afl->no_unlink)) {
272
273 fd = open(afl->fsrv.out_file, O_WRONLY | O_CREAT | O_TRUNC,
274 DEFAULT_PERMISSION);
275
276 } else {
277
278 unlink(afl->fsrv.out_file); /* Ignore errors. */
279 fd = open(afl->fsrv.out_file, O_WRONLY | O_CREAT | O_EXCL,
280 DEFAULT_PERMISSION);
281
282 }
283
284 if (fd < 0) { PFATAL("Unable to create '%s'", afl->fsrv.out_file); }
285
286 } else {
287
288 lseek(fd, 0, SEEK_SET);
289
290 }
291
292 if (!post_process_skipped) {
293
294 ck_write(fd, new_mem, new_size, afl->fsrv.out_file);
295
296 } else {
297
298 ck_write(fd, mem, skip_at, afl->fsrv.out_file);
299
300 ck_write(fd, mem + skip_at + skip_len, tail_len, afl->fsrv.out_file);
301
302 }
303
304 if (afl->fsrv.use_stdin) {
305
306 if (ftruncate(fd, new_size)) { PFATAL("ftruncate() failed"); }
307 lseek(fd, 0, SEEK_SET);
308
309 } else {
310
311 close(fd);
312
313 }
314
315 }
316
317 /* Calibrate a new test case. This is done when processing the input directory
318 to warn about flaky or otherwise problematic test cases early on; and when
319 new paths are discovered to detect variable behavior and so on. */
320
calibrate_case(afl_state_t * afl,struct queue_entry * q,u8 * use_mem,u32 handicap,u8 from_queue)321 u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
322 u32 handicap, u8 from_queue) {
323
324 u8 fault = 0, new_bits = 0, var_detected = 0, hnb = 0,
325 first_run = (q->exec_cksum == 0);
326 u64 start_us, stop_us, diff_us;
327 s32 old_sc = afl->stage_cur, old_sm = afl->stage_max;
328 u32 use_tmout = afl->fsrv.exec_tmout;
329 u8 *old_sn = afl->stage_name;
330
331 if (unlikely(afl->shm.cmplog_mode)) { q->exec_cksum = 0; }
332
333 /* Be a bit more generous about timeouts when resuming sessions, or when
334 trying to calibrate already-added finds. This helps avoid trouble due
335 to intermittent latency. */
336
337 if (!from_queue || afl->resuming_fuzz) {
338
339 use_tmout = MAX(afl->fsrv.exec_tmout + CAL_TMOUT_ADD,
340 afl->fsrv.exec_tmout * CAL_TMOUT_PERC / 100);
341
342 }
343
344 ++q->cal_failed;
345
346 afl->stage_name = "calibration";
347 afl->stage_max = afl->afl_env.afl_cal_fast ? 3 : CAL_CYCLES;
348
349 /* Make sure the forkserver is up before we do anything, and let's not
350 count its spin-up time toward binary calibration. */
351
352 if (!afl->fsrv.fsrv_pid) {
353
354 if (afl->fsrv.cmplog_binary &&
355 afl->fsrv.init_child_func != cmplog_exec_child) {
356
357 FATAL("BUG in afl-fuzz detected. Cmplog mode not set correctly.");
358
359 }
360
361 afl_fsrv_start(&afl->fsrv, afl->argv, &afl->stop_soon,
362 afl->afl_env.afl_debug_child);
363
364 if (afl->fsrv.support_shmem_fuzz && !afl->fsrv.use_shmem_fuzz) {
365
366 afl_shm_deinit(afl->shm_fuzz);
367 ck_free(afl->shm_fuzz);
368 afl->shm_fuzz = NULL;
369 afl->fsrv.support_shmem_fuzz = 0;
370 afl->fsrv.shmem_fuzz = NULL;
371
372 }
373
374 }
375
376 /* we need a dummy run if this is LTO + cmplog */
377 if (unlikely(afl->shm.cmplog_mode)) {
378
379 (void)write_to_testcase(afl, (void **)&use_mem, q->len, 1);
380
381 fault = fuzz_run_target(afl, &afl->fsrv, use_tmout);
382
383 /* afl->stop_soon is set by the handler for Ctrl+C. When it's pressed,
384 we want to bail out quickly. */
385
386 if (afl->stop_soon || fault != afl->crash_mode) { goto abort_calibration; }
387
388 if (!afl->non_instrumented_mode && !afl->stage_cur &&
389 !count_bytes(afl, afl->fsrv.trace_bits)) {
390
391 fault = FSRV_RUN_NOINST;
392 goto abort_calibration;
393
394 }
395
396 #ifdef INTROSPECTION
397 if (unlikely(!q->bitsmap_size)) q->bitsmap_size = afl->bitsmap_size;
398 #endif
399
400 }
401
402 if (q->exec_cksum) {
403
404 memcpy(afl->first_trace, afl->fsrv.trace_bits, afl->fsrv.map_size);
405 hnb = has_new_bits(afl, afl->virgin_bits);
406 if (hnb > new_bits) { new_bits = hnb; }
407
408 }
409
410 start_us = get_cur_time_us();
411
412 for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
413
414 if (unlikely(afl->debug)) {
415
416 DEBUGF("calibration stage %d/%d\n", afl->stage_cur + 1, afl->stage_max);
417
418 }
419
420 u64 cksum;
421
422 (void)write_to_testcase(afl, (void **)&use_mem, q->len, 1);
423
424 fault = fuzz_run_target(afl, &afl->fsrv, use_tmout);
425
426 /* afl->stop_soon is set by the handler for Ctrl+C. When it's pressed,
427 we want to bail out quickly. */
428
429 if (afl->stop_soon || fault != afl->crash_mode) { goto abort_calibration; }
430
431 if (!afl->non_instrumented_mode && !afl->stage_cur &&
432 !count_bytes(afl, afl->fsrv.trace_bits)) {
433
434 fault = FSRV_RUN_NOINST;
435 goto abort_calibration;
436
437 }
438
439 #ifdef INTROSPECTION
440 if (unlikely(!q->bitsmap_size)) q->bitsmap_size = afl->bitsmap_size;
441 #endif
442
443 classify_counts(&afl->fsrv);
444 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
445 if (q->exec_cksum != cksum) {
446
447 hnb = has_new_bits(afl, afl->virgin_bits);
448 if (hnb > new_bits) { new_bits = hnb; }
449
450 if (q->exec_cksum) {
451
452 u32 i;
453
454 for (i = 0; i < afl->fsrv.map_size; ++i) {
455
456 if (unlikely(!afl->var_bytes[i]) &&
457 unlikely(afl->first_trace[i] != afl->fsrv.trace_bits[i])) {
458
459 afl->var_bytes[i] = 1;
460 // ignore the variable edge by setting it to fully discovered
461 afl->virgin_bits[i] = 0;
462
463 }
464
465 }
466
467 if (unlikely(!var_detected)) {
468
469 // note: from_queue seems to only be set during initialization
470 if (afl->afl_env.afl_no_ui || from_queue) {
471
472 WARNF("instability detected during calibration");
473
474 } else if (afl->debug) {
475
476 DEBUGF("instability detected during calibration\n");
477
478 }
479
480 }
481
482 var_detected = 1;
483 afl->stage_max =
484 afl->afl_env.afl_cal_fast ? CAL_CYCLES : CAL_CYCLES_LONG;
485
486 } else {
487
488 q->exec_cksum = cksum;
489 memcpy(afl->first_trace, afl->fsrv.trace_bits, afl->fsrv.map_size);
490
491 }
492
493 }
494
495 }
496
497 if (unlikely(afl->fixed_seed)) {
498
499 diff_us = (u64)(afl->fsrv.exec_tmout - 1) * (u64)afl->stage_max;
500
501 } else {
502
503 stop_us = get_cur_time_us();
504 diff_us = stop_us - start_us;
505 if (unlikely(!diff_us)) { ++diff_us; }
506
507 }
508
509 afl->total_cal_us += diff_us;
510 afl->total_cal_cycles += afl->stage_max;
511
512 /* OK, let's collect some stats about the performance of this test case.
513 This is used for fuzzing air time calculations in calculate_score(). */
514
515 if (unlikely(!afl->stage_max)) {
516
517 // Pretty sure this cannot happen, yet scan-build complains.
518 FATAL("BUG: stage_max should not be 0 here! Please report this condition.");
519
520 }
521
522 q->exec_us = diff_us / afl->stage_max;
523 q->bitmap_size = count_bytes(afl, afl->fsrv.trace_bits);
524 q->handicap = handicap;
525 q->cal_failed = 0;
526
527 afl->total_bitmap_size += q->bitmap_size;
528 ++afl->total_bitmap_entries;
529
530 update_bitmap_score(afl, q);
531
532 /* If this case didn't result in new output from the instrumentation, tell
533 parent. This is a non-critical problem, but something to warn the user
534 about. */
535
536 if (!afl->non_instrumented_mode && first_run && !fault && !new_bits) {
537
538 fault = FSRV_RUN_NOBITS;
539
540 }
541
542 abort_calibration:
543
544 if (new_bits == 2 && !q->has_new_cov) {
545
546 q->has_new_cov = 1;
547 ++afl->queued_with_cov;
548
549 }
550
551 /* Mark variable paths. */
552
553 if (var_detected) {
554
555 afl->var_byte_count = count_bytes(afl, afl->var_bytes);
556
557 if (!q->var_behavior) {
558
559 mark_as_variable(afl, q);
560 ++afl->queued_variable;
561
562 }
563
564 }
565
566 afl->stage_name = old_sn;
567 afl->stage_cur = old_sc;
568 afl->stage_max = old_sm;
569
570 if (!first_run) { show_stats(afl); }
571
572 return fault;
573
574 }
575
576 /* Grab interesting test cases from other fuzzers. */
577
sync_fuzzers(afl_state_t * afl)578 void sync_fuzzers(afl_state_t *afl) {
579
580 DIR * sd;
581 struct dirent *sd_ent;
582 u32 sync_cnt = 0, synced = 0, entries = 0;
583 u8 path[PATH_MAX + 1 + NAME_MAX];
584
585 sd = opendir(afl->sync_dir);
586 if (!sd) { PFATAL("Unable to open '%s'", afl->sync_dir); }
587
588 afl->stage_max = afl->stage_cur = 0;
589 afl->cur_depth = 0;
590
591 /* Look at the entries created for every other fuzzer in the sync directory.
592 */
593
594 while ((sd_ent = readdir(sd))) {
595
596 u8 qd_synced_path[PATH_MAX], qd_path[PATH_MAX];
597 u32 min_accept = 0, next_min_accept = 0;
598
599 s32 id_fd;
600
601 /* Skip dot files and our own output directory. */
602
603 if (sd_ent->d_name[0] == '.' || !strcmp(afl->sync_id, sd_ent->d_name)) {
604
605 continue;
606
607 }
608
609 entries++;
610
611 // secondary nodes only syncs from main, the main node syncs from everyone
612 if (likely(afl->is_secondary_node)) {
613
614 sprintf(qd_path, "%s/%s/is_main_node", afl->sync_dir, sd_ent->d_name);
615 int res = access(qd_path, F_OK);
616 if (unlikely(afl->is_main_node)) { // an elected temporary main node
617
618 if (likely(res == 0)) { // there is another main node? downgrade.
619
620 afl->is_main_node = 0;
621 sprintf(qd_path, "%s/is_main_node", afl->out_dir);
622 unlink(qd_path);
623
624 }
625
626 } else {
627
628 if (likely(res != 0)) { continue; }
629
630 }
631
632 }
633
634 synced++;
635
636 /* document the attempt to sync to this instance */
637
638 sprintf(qd_synced_path, "%s/.synced/%s.last", afl->out_dir, sd_ent->d_name);
639 id_fd =
640 open(qd_synced_path, O_RDWR | O_CREAT | O_TRUNC, DEFAULT_PERMISSION);
641 if (id_fd >= 0) close(id_fd);
642
643 /* Skip anything that doesn't have a queue/ subdirectory. */
644
645 sprintf(qd_path, "%s/%s/queue", afl->sync_dir, sd_ent->d_name);
646
647 struct dirent **namelist = NULL;
648 int m = 0, n, o;
649
650 n = scandir(qd_path, &namelist, NULL, alphasort);
651
652 if (n < 1) {
653
654 if (namelist) free(namelist);
655 continue;
656
657 }
658
659 /* Retrieve the ID of the last seen test case. */
660
661 sprintf(qd_synced_path, "%s/.synced/%s", afl->out_dir, sd_ent->d_name);
662
663 id_fd = open(qd_synced_path, O_RDWR | O_CREAT, DEFAULT_PERMISSION);
664
665 if (id_fd < 0) { PFATAL("Unable to create '%s'", qd_synced_path); }
666
667 if (read(id_fd, &min_accept, sizeof(u32)) == sizeof(u32)) {
668
669 next_min_accept = min_accept;
670 lseek(id_fd, 0, SEEK_SET);
671
672 }
673
674 /* Show stats */
675
676 snprintf(afl->stage_name_buf, STAGE_BUF_SIZE, "sync %u", ++sync_cnt);
677
678 afl->stage_name = afl->stage_name_buf;
679 afl->stage_cur = 0;
680 afl->stage_max = 0;
681
682 /* For every file queued by this fuzzer, parse ID and see if we have
683 looked at it before; exec a test case if not. */
684
685 u8 entry[12];
686 sprintf(entry, "id:%06u", next_min_accept);
687
688 while (m < n) {
689
690 if (strncmp(namelist[m]->d_name, entry, 9)) {
691
692 m++;
693
694 } else {
695
696 break;
697
698 }
699
700 }
701
702 if (m >= n) { goto close_sync; } // nothing new
703
704 for (o = m; o < n; o++) {
705
706 s32 fd;
707 struct stat st;
708
709 snprintf(path, sizeof(path), "%s/%s", qd_path, namelist[o]->d_name);
710 afl->syncing_case = next_min_accept;
711 next_min_accept++;
712
713 /* Allow this to fail in case the other fuzzer is resuming or so... */
714
715 fd = open(path, O_RDONLY);
716
717 if (fd < 0) { continue; }
718
719 if (fstat(fd, &st)) { WARNF("fstat() failed"); }
720
721 /* Ignore zero-sized or oversized files. */
722
723 if (st.st_size && st.st_size <= MAX_FILE) {
724
725 u8 fault;
726 u8 *mem = mmap(0, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
727
728 if (mem == MAP_FAILED) { PFATAL("Unable to mmap '%s'", path); }
729
730 /* See what happens. We rely on save_if_interesting() to catch major
731 errors and save the test case. */
732
733 (void)write_to_testcase(afl, (void **)&mem, st.st_size, 1);
734
735 fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout);
736
737 if (afl->stop_soon) { goto close_sync; }
738
739 afl->syncing_party = sd_ent->d_name;
740 afl->queued_imported +=
741 save_if_interesting(afl, mem, st.st_size, fault);
742 afl->syncing_party = 0;
743
744 munmap(mem, st.st_size);
745
746 }
747
748 close(fd);
749
750 }
751
752 ck_write(id_fd, &next_min_accept, sizeof(u32), qd_synced_path);
753
754 close_sync:
755 close(id_fd);
756 if (n > 0)
757 for (m = 0; m < n; m++)
758 free(namelist[m]);
759 free(namelist);
760
761 }
762
763 closedir(sd);
764
765 // If we are a secondary and no main was found to sync then become the main
766 if (unlikely(synced == 0) && likely(entries) &&
767 likely(afl->is_secondary_node)) {
768
769 // there is a small race condition here that another secondary runs at the
770 // same time. If so, the first temporary main node running again will demote
771 // themselves so this is not an issue
772
773 // u8 path2[PATH_MAX];
774 afl->is_main_node = 1;
775 sprintf(path, "%s/is_main_node", afl->out_dir);
776 int fd = open(path, O_CREAT | O_RDWR, 0644);
777 if (fd >= 0) { close(fd); }
778
779 }
780
781 if (afl->foreign_sync_cnt) read_foreign_testcases(afl, 0);
782
783 afl->last_sync_time = get_cur_time();
784 afl->last_sync_cycle = afl->queue_cycle;
785
786 }
787
788 /* Trim all new test cases to save cycles when doing deterministic checks. The
789 trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of
790 file size, to keep the stage short and sweet. */
791
trim_case(afl_state_t * afl,struct queue_entry * q,u8 * in_buf)792 u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
793
794 u32 orig_len = q->len;
795
796 /* Custom mutator trimmer */
797 if (afl->custom_mutators_count) {
798
799 u8 trimmed_case = 0;
800 bool custom_trimmed = false;
801
802 LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
803
804 if (el->afl_custom_trim) {
805
806 trimmed_case = trim_case_custom(afl, q, in_buf, el);
807 custom_trimmed = true;
808
809 }
810
811 });
812
813 if (orig_len != q->len || custom_trimmed) {
814
815 queue_testcase_retake(afl, q, orig_len);
816
817 }
818
819 if (custom_trimmed) return trimmed_case;
820
821 }
822
823 u8 needs_write = 0, fault = 0;
824 u32 trim_exec = 0;
825 u32 remove_len;
826 u32 len_p2;
827
828 u8 val_bufs[2][STRINGIFY_VAL_SIZE_MAX];
829
830 /* Although the trimmer will be less useful when variable behavior is
831 detected, it will still work to some extent, so we don't check for
832 this. */
833
834 if (q->len < 5) { return 0; }
835
836 afl->stage_name = afl->stage_name_buf;
837 afl->bytes_trim_in += q->len;
838
839 /* Select initial chunk len, starting with large steps. */
840
841 len_p2 = next_pow2(q->len);
842
843 remove_len = MAX(len_p2 / TRIM_START_STEPS, (u32)TRIM_MIN_BYTES);
844
845 /* Continue until the number of steps gets too high or the stepover
846 gets too small. */
847
848 while (remove_len >= MAX(len_p2 / TRIM_END_STEPS, (u32)TRIM_MIN_BYTES)) {
849
850 u32 remove_pos = remove_len;
851
852 sprintf(afl->stage_name_buf, "trim %s/%s",
853 u_stringify_int(val_bufs[0], remove_len),
854 u_stringify_int(val_bufs[1], remove_len));
855
856 afl->stage_cur = 0;
857 afl->stage_max = q->len / remove_len;
858
859 while (remove_pos < q->len) {
860
861 u32 trim_avail = MIN(remove_len, q->len - remove_pos);
862 u64 cksum;
863
864 write_with_gap(afl, in_buf, q->len, remove_pos, trim_avail);
865
866 fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout);
867
868 if (afl->stop_soon || fault == FSRV_RUN_ERROR) { goto abort_trimming; }
869
870 /* Note that we don't keep track of crashes or hangs here; maybe TODO?
871 */
872
873 ++afl->trim_execs;
874 classify_counts(&afl->fsrv);
875 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
876
877 /* If the deletion had no impact on the trace, make it permanent. This
878 isn't perfect for variable-path inputs, but we're just making a
879 best-effort pass, so it's not a big deal if we end up with false
880 negatives every now and then. */
881
882 if (cksum == q->exec_cksum) {
883
884 u32 move_tail = q->len - remove_pos - trim_avail;
885
886 q->len -= trim_avail;
887 len_p2 = next_pow2(q->len);
888
889 memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail,
890 move_tail);
891
892 /* Let's save a clean trace, which will be needed by
893 update_bitmap_score once we're done with the trimming stuff. */
894
895 if (!needs_write) {
896
897 needs_write = 1;
898 memcpy(afl->clean_trace, afl->fsrv.trace_bits, afl->fsrv.map_size);
899
900 }
901
902 } else {
903
904 remove_pos += remove_len;
905
906 }
907
908 /* Since this can be slow, update the screen every now and then. */
909
910 if (!(trim_exec++ % afl->stats_update_freq)) { show_stats(afl); }
911 ++afl->stage_cur;
912
913 }
914
915 remove_len >>= 1;
916
917 }
918
919 /* If we have made changes to in_buf, we also need to update the on-disk
920 version of the test case. */
921
922 if (needs_write) {
923
924 s32 fd;
925
926 if (unlikely(afl->no_unlink)) {
927
928 fd = open(q->fname, O_WRONLY | O_CREAT | O_TRUNC, DEFAULT_PERMISSION);
929
930 if (fd < 0) { PFATAL("Unable to create '%s'", q->fname); }
931
932 u32 written = 0;
933 while (written < q->len) {
934
935 ssize_t result = write(fd, in_buf, q->len - written);
936 if (result > 0) written += result;
937
938 }
939
940 } else {
941
942 unlink(q->fname); /* ignore errors */
943 fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, DEFAULT_PERMISSION);
944
945 if (fd < 0) { PFATAL("Unable to create '%s'", q->fname); }
946
947 ck_write(fd, in_buf, q->len, q->fname);
948
949 }
950
951 close(fd);
952
953 queue_testcase_retake_mem(afl, q, in_buf, q->len, orig_len);
954
955 memcpy(afl->fsrv.trace_bits, afl->clean_trace, afl->fsrv.map_size);
956 update_bitmap_score(afl, q);
957
958 }
959
960 abort_trimming:
961
962 afl->bytes_trim_out += q->len;
963 return fault;
964
965 }
966
967 /* Write a modified test case, run program, process results. Handle
968 error conditions, returning 1 if it's time to bail out. This is
969 a helper function for fuzz_one(). */
970
971 u8 __attribute__((hot))
common_fuzz_stuff(afl_state_t * afl,u8 * out_buf,u32 len)972 common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u32 len) {
973
974 u8 fault;
975
976 len = write_to_testcase(afl, (void **)&out_buf, len, 0);
977
978 fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout);
979
980 if (afl->stop_soon) { return 1; }
981
982 if (fault == FSRV_RUN_TMOUT) {
983
984 if (afl->subseq_tmouts++ > TMOUT_LIMIT) {
985
986 ++afl->cur_skipped_items;
987 return 1;
988
989 }
990
991 } else {
992
993 afl->subseq_tmouts = 0;
994
995 }
996
997 /* Users can hit us with SIGUSR1 to request the current input
998 to be abandoned. */
999
1000 if (afl->skip_requested) {
1001
1002 afl->skip_requested = 0;
1003 ++afl->cur_skipped_items;
1004 return 1;
1005
1006 }
1007
1008 /* This handles FAULT_ERROR for us: */
1009
1010 afl->queued_discovered += save_if_interesting(afl, out_buf, len, fault);
1011
1012 if (!(afl->stage_cur % afl->stats_update_freq) ||
1013 afl->stage_cur + 1 == afl->stage_max) {
1014
1015 show_stats(afl);
1016
1017 }
1018
1019 return 0;
1020
1021 }
1022
1023