1 /*
2 american fuzzy lop++ - bitmap related routines
3 ----------------------------------------------
4
5 Originally written by Michal Zalewski
6
7 Now maintained by Marc Heuse <mh@mh-sec.de>,
8 Heiko Eißfeldt <heiko.eissfeldt@hexco.de> and
9 Andrea Fioraldi <andreafioraldi@gmail.com>
10
11 Copyright 2016, 2017 Google Inc. All rights reserved.
12 Copyright 2019-2022 AFLplusplus Project. All rights reserved.
13
14 Licensed under the Apache License, Version 2.0 (the "License");
15 you may not use this file except in compliance with the License.
16 You may obtain a copy of the License at:
17
18 https://www.apache.org/licenses/LICENSE-2.0
19
20 This is the real deal: the program takes an instrumented binary and
21 attempts a variety of basic fuzzing tricks, paying close attention to
22 how they affect the execution path.
23
24 */
25
26 #include "afl-fuzz.h"
27 #include <limits.h>
28 #if !defined NAME_MAX
29 #define NAME_MAX _XOPEN_NAME_MAX
30 #endif
31
32 /* Write bitmap to file. The bitmap is useful mostly for the secret
33 -B option, to focus a separate fuzzing session on a particular
34 interesting input without rediscovering all the others. */
35
write_bitmap(afl_state_t * afl)36 void write_bitmap(afl_state_t *afl) {
37
38 u8 fname[PATH_MAX];
39 s32 fd;
40
41 if (!afl->bitmap_changed) { return; }
42 afl->bitmap_changed = 0;
43
44 snprintf(fname, PATH_MAX, "%s/fuzz_bitmap", afl->out_dir);
45 fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, DEFAULT_PERMISSION);
46
47 if (fd < 0) { PFATAL("Unable to open '%s'", fname); }
48
49 ck_write(fd, afl->virgin_bits, afl->fsrv.map_size, fname);
50
51 close(fd);
52
53 }
54
55 /* Count the number of bits set in the provided bitmap. Used for the status
56 screen several times every second, does not have to be fast. */
57
count_bits(afl_state_t * afl,u8 * mem)58 u32 count_bits(afl_state_t *afl, u8 *mem) {
59
60 u32 *ptr = (u32 *)mem;
61 u32 i = ((afl->fsrv.real_map_size + 3) >> 2);
62 u32 ret = 0;
63
64 while (i--) {
65
66 u32 v = *(ptr++);
67
68 /* This gets called on the inverse, virgin bitmap; optimize for sparse
69 data. */
70
71 if (likely(v == 0xffffffff)) {
72
73 ret += 32;
74 continue;
75
76 }
77
78 v -= ((v >> 1) & 0x55555555);
79 v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
80 ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24;
81
82 }
83
84 return ret;
85
86 }
87
88 /* Count the number of bytes set in the bitmap. Called fairly sporadically,
89 mostly to update the status screen or calibrate and examine confirmed
90 new paths. */
91
count_bytes(afl_state_t * afl,u8 * mem)92 u32 count_bytes(afl_state_t *afl, u8 *mem) {
93
94 u32 *ptr = (u32 *)mem;
95 u32 i = ((afl->fsrv.real_map_size + 3) >> 2);
96 u32 ret = 0;
97
98 while (i--) {
99
100 u32 v = *(ptr++);
101
102 if (likely(!v)) { continue; }
103 if (v & 0x000000ffU) { ++ret; }
104 if (v & 0x0000ff00U) { ++ret; }
105 if (v & 0x00ff0000U) { ++ret; }
106 if (v & 0xff000000U) { ++ret; }
107
108 }
109
110 return ret;
111
112 }
113
114 /* Count the number of non-255 bytes set in the bitmap. Used strictly for the
115 status screen, several calls per second or so. */
116
count_non_255_bytes(afl_state_t * afl,u8 * mem)117 u32 count_non_255_bytes(afl_state_t *afl, u8 *mem) {
118
119 u32 *ptr = (u32 *)mem;
120 u32 i = ((afl->fsrv.real_map_size + 3) >> 2);
121 u32 ret = 0;
122
123 while (i--) {
124
125 u32 v = *(ptr++);
126
127 /* This is called on the virgin bitmap, so optimize for the most likely
128 case. */
129
130 if (likely(v == 0xffffffffU)) { continue; }
131 if ((v & 0x000000ffU) != 0x000000ffU) { ++ret; }
132 if ((v & 0x0000ff00U) != 0x0000ff00U) { ++ret; }
133 if ((v & 0x00ff0000U) != 0x00ff0000U) { ++ret; }
134 if ((v & 0xff000000U) != 0xff000000U) { ++ret; }
135
136 }
137
138 return ret;
139
140 }
141
142 /* Destructively simplify trace by eliminating hit count information
143 and replacing it with 0x80 or 0x01 depending on whether the tuple
144 is hit or not. Called on every new crash or timeout, should be
145 reasonably fast. */
146 const u8 simplify_lookup[256] = {
147
148 [0] = 1, [1 ... 255] = 128
149
150 };
151
152 /* Destructively classify execution counts in a trace. This is used as a
153 preprocessing step for any newly acquired traces. Called on every exec,
154 must be fast. */
155
156 const u8 count_class_lookup8[256] = {
157
158 [0] = 0,
159 [1] = 1,
160 [2] = 2,
161 [3] = 4,
162 [4 ... 7] = 8,
163 [8 ... 15] = 16,
164 [16 ... 31] = 32,
165 [32 ... 127] = 64,
166 [128 ... 255] = 128
167
168 };
169
170 u16 count_class_lookup16[65536];
171
init_count_class16(void)172 void init_count_class16(void) {
173
174 u32 b1, b2;
175
176 for (b1 = 0; b1 < 256; b1++) {
177
178 for (b2 = 0; b2 < 256; b2++) {
179
180 count_class_lookup16[(b1 << 8) + b2] =
181 (count_class_lookup8[b1] << 8) | count_class_lookup8[b2];
182
183 }
184
185 }
186
187 }
188
189 /* Import coverage processing routines. */
190
191 #ifdef WORD_SIZE_64
192 #include "coverage-64.h"
193 #else
194 #include "coverage-32.h"
195 #endif
196
197 /* Check if the current execution path brings anything new to the table.
198 Update virgin bits to reflect the finds. Returns 1 if the only change is
199 the hit-count for a particular tuple; 2 if there are new tuples seen.
200 Updates the map, so subsequent calls will always return 0.
201
202 This function is called after every exec() on a fairly large buffer, so
203 it needs to be fast. We do this in 32-bit and 64-bit flavors. */
204
has_new_bits(afl_state_t * afl,u8 * virgin_map)205 inline u8 has_new_bits(afl_state_t *afl, u8 *virgin_map) {
206
207 #ifdef WORD_SIZE_64
208
209 u64 *current = (u64 *)afl->fsrv.trace_bits;
210 u64 *virgin = (u64 *)virgin_map;
211
212 u32 i = ((afl->fsrv.real_map_size + 7) >> 3);
213
214 #else
215
216 u32 *current = (u32 *)afl->fsrv.trace_bits;
217 u32 *virgin = (u32 *)virgin_map;
218
219 u32 i = ((afl->fsrv.real_map_size + 3) >> 2);
220
221 #endif /* ^WORD_SIZE_64 */
222
223 u8 ret = 0;
224 while (i--) {
225
226 if (unlikely(*current)) discover_word(&ret, current, virgin);
227
228 current++;
229 virgin++;
230
231 }
232
233 if (unlikely(ret) && likely(virgin_map == afl->virgin_bits))
234 afl->bitmap_changed = 1;
235
236 return ret;
237
238 }
239
240 /* A combination of classify_counts and has_new_bits. If 0 is returned, then the
241 * trace bits are kept as-is. Otherwise, the trace bits are overwritten with
242 * classified values.
243 *
244 * This accelerates the processing: in most cases, no interesting behavior
245 * happen, and the trace bits will be discarded soon. This function optimizes
246 * for such cases: one-pass scan on trace bits without modifying anything. Only
247 * on rare cases it fall backs to the slow path: classify_counts() first, then
248 * return has_new_bits(). */
249
has_new_bits_unclassified(afl_state_t * afl,u8 * virgin_map)250 inline u8 has_new_bits_unclassified(afl_state_t *afl, u8 *virgin_map) {
251
252 /* Handle the hot path first: no new coverage */
253 u8 *end = afl->fsrv.trace_bits + afl->fsrv.map_size;
254
255 #ifdef WORD_SIZE_64
256
257 if (!skim((u64 *)virgin_map, (u64 *)afl->fsrv.trace_bits, (u64 *)end))
258 return 0;
259
260 #else
261
262 if (!skim((u32 *)virgin_map, (u32 *)afl->fsrv.trace_bits, (u32 *)end))
263 return 0;
264
265 #endif /* ^WORD_SIZE_64 */
266 classify_counts(&afl->fsrv);
267 return has_new_bits(afl, virgin_map);
268
269 }
270
271 /* Compact trace bytes into a smaller bitmap. We effectively just drop the
272 count information here. This is called only sporadically, for some
273 new paths. */
274
minimize_bits(afl_state_t * afl,u8 * dst,u8 * src)275 void minimize_bits(afl_state_t *afl, u8 *dst, u8 *src) {
276
277 u32 i = 0;
278
279 while (i < afl->fsrv.map_size) {
280
281 if (*(src++)) { dst[i >> 3] |= 1 << (i & 7); }
282 ++i;
283
284 }
285
286 }
287
288 #ifndef SIMPLE_FILES
289
290 /* Construct a file name for a new test case, capturing the operation
291 that led to its discovery. Returns a ptr to afl->describe_op_buf_256. */
292
describe_op(afl_state_t * afl,u8 new_bits,size_t max_description_len)293 u8 *describe_op(afl_state_t *afl, u8 new_bits, size_t max_description_len) {
294
295 u8 is_timeout = 0;
296
297 if (new_bits & 0xf0) {
298
299 new_bits -= 0x80;
300 is_timeout = 1;
301
302 }
303
304 size_t real_max_len =
305 MIN(max_description_len, sizeof(afl->describe_op_buf_256));
306 u8 *ret = afl->describe_op_buf_256;
307
308 if (unlikely(afl->syncing_party)) {
309
310 sprintf(ret, "sync:%s,src:%06u", afl->syncing_party, afl->syncing_case);
311
312 } else {
313
314 sprintf(ret, "src:%06u", afl->current_entry);
315
316 if (afl->splicing_with >= 0) {
317
318 sprintf(ret + strlen(ret), "+%06d", afl->splicing_with);
319
320 }
321
322 sprintf(ret + strlen(ret), ",time:%llu,execs:%llu",
323 get_cur_time() + afl->prev_run_time - afl->start_time,
324 afl->fsrv.total_execs);
325
326 if (afl->current_custom_fuzz &&
327 afl->current_custom_fuzz->afl_custom_describe) {
328
329 /* We are currently in a custom mutator that supports afl_custom_describe,
330 * use it! */
331
332 size_t len_current = strlen(ret);
333 ret[len_current++] = ',';
334 ret[len_current] = '\0';
335
336 ssize_t size_left = real_max_len - len_current - strlen(",+cov") - 2;
337 if (is_timeout) { size_left -= strlen(",+tout"); }
338 if (unlikely(size_left <= 0)) FATAL("filename got too long");
339
340 const char *custom_description =
341 afl->current_custom_fuzz->afl_custom_describe(
342 afl->current_custom_fuzz->data, size_left);
343 if (!custom_description || !custom_description[0]) {
344
345 DEBUGF("Error getting a description from afl_custom_describe");
346 /* Take the stage name as description fallback */
347 sprintf(ret + len_current, "op:%s", afl->stage_short);
348
349 } else {
350
351 /* We got a proper custom description, use it */
352 strncat(ret + len_current, custom_description, size_left);
353
354 }
355
356 } else {
357
358 /* Normal testcase descriptions start here */
359 sprintf(ret + strlen(ret), ",op:%s", afl->stage_short);
360
361 if (afl->stage_cur_byte >= 0) {
362
363 sprintf(ret + strlen(ret), ",pos:%d", afl->stage_cur_byte);
364
365 if (afl->stage_val_type != STAGE_VAL_NONE) {
366
367 sprintf(ret + strlen(ret), ",val:%s%+d",
368 (afl->stage_val_type == STAGE_VAL_BE) ? "be:" : "",
369 afl->stage_cur_val);
370
371 }
372
373 } else {
374
375 sprintf(ret + strlen(ret), ",rep:%d", afl->stage_cur_val);
376
377 }
378
379 }
380
381 }
382
383 if (is_timeout) { strcat(ret, ",+tout"); }
384
385 if (new_bits == 2) { strcat(ret, ",+cov"); }
386
387 if (unlikely(strlen(ret) >= max_description_len))
388 FATAL("describe string is too long");
389
390 return ret;
391
392 }
393
394 #endif /* !SIMPLE_FILES */
395
396 /* Write a message accompanying the crash directory :-) */
397
write_crash_readme(afl_state_t * afl)398 void write_crash_readme(afl_state_t *afl) {
399
400 u8 fn[PATH_MAX];
401 s32 fd;
402 FILE *f;
403
404 u8 val_buf[STRINGIFY_VAL_SIZE_MAX];
405
406 sprintf(fn, "%s/crashes/README.txt", afl->out_dir);
407
408 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, DEFAULT_PERMISSION);
409
410 /* Do not die on errors here - that would be impolite. */
411
412 if (unlikely(fd < 0)) { return; }
413
414 f = fdopen(fd, "w");
415
416 if (unlikely(!f)) {
417
418 close(fd);
419 return;
420
421 }
422
423 fprintf(
424 f,
425 "Command line used to find this crash:\n\n"
426
427 "%s\n\n"
428
429 "If you can't reproduce a bug outside of afl-fuzz, be sure to set the "
430 "same\n"
431 "memory limit. The limit used for this fuzzing session was %s.\n\n"
432
433 "Need a tool to minimize test cases before investigating the crashes or "
434 "sending\n"
435 "them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n"
436
437 "Found any cool bugs in open-source tools using afl-fuzz? If yes, please "
438 "post\n"
439 "to https://github.com/AFLplusplus/AFLplusplus/issues/286 once the "
440 "issues\n"
441 " are fixed :)\n\n",
442
443 afl->orig_cmdline,
444 stringify_mem_size(val_buf, sizeof(val_buf),
445 afl->fsrv.mem_limit << 20)); /* ignore errors */
446
447 fclose(f);
448
449 }
450
451 /* Check if the result of an execve() during routine fuzzing is interesting,
452 save or queue the input test case for further analysis if so. Returns 1 if
453 entry is saved, 0 otherwise. */
454
455 u8 __attribute__((hot))
save_if_interesting(afl_state_t * afl,void * mem,u32 len,u8 fault)456 save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
457
458 if (unlikely(len == 0)) { return 0; }
459
460 u8 fn[PATH_MAX];
461 u8 *queue_fn = "";
462 u8 new_bits = 0, keeping = 0, res, classified = 0, is_timeout = 0;
463 s32 fd;
464 u64 cksum = 0;
465
466 /* Update path frequency. */
467
468 /* Generating a hash on every input is super expensive. Bad idea and should
469 only be used for special schedules */
470 if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE)) {
471
472 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
473
474 /* Saturated increment */
475 if (afl->n_fuzz[cksum % N_FUZZ_SIZE] < 0xFFFFFFFF)
476 afl->n_fuzz[cksum % N_FUZZ_SIZE]++;
477
478 }
479
480 if (likely(fault == afl->crash_mode)) {
481
482 /* Keep only if there are new bits in the map, add to queue for
483 future fuzzing, etc. */
484
485 new_bits = has_new_bits_unclassified(afl, afl->virgin_bits);
486
487 if (likely(!new_bits)) {
488
489 if (unlikely(afl->crash_mode)) { ++afl->total_crashes; }
490 return 0;
491
492 }
493
494 classified = new_bits;
495
496 save_to_queue:
497
498 #ifndef SIMPLE_FILES
499
500 queue_fn =
501 alloc_printf("%s/queue/id:%06u,%s", afl->out_dir, afl->queued_items,
502 describe_op(afl, new_bits + is_timeout,
503 NAME_MAX - strlen("id:000000,")));
504
505 #else
506
507 queue_fn =
508 alloc_printf("%s/queue/id_%06u", afl->out_dir, afl->queued_items);
509
510 #endif /* ^!SIMPLE_FILES */
511 fd = open(queue_fn, O_WRONLY | O_CREAT | O_EXCL, DEFAULT_PERMISSION);
512 if (unlikely(fd < 0)) { PFATAL("Unable to create '%s'", queue_fn); }
513 ck_write(fd, mem, len, queue_fn);
514 close(fd);
515 add_to_queue(afl, queue_fn, len, 0);
516
517 #ifdef INTROSPECTION
518 if (afl->custom_mutators_count && afl->current_custom_fuzz) {
519
520 LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
521
522 if (afl->current_custom_fuzz == el && el->afl_custom_introspection) {
523
524 const char *ptr = el->afl_custom_introspection(el->data);
525
526 if (ptr != NULL && *ptr != 0) {
527
528 fprintf(afl->introspection_file, "QUEUE CUSTOM %s = %s\n", ptr,
529 afl->queue_top->fname);
530
531 }
532
533 }
534
535 });
536
537 } else if (afl->mutation[0] != 0) {
538
539 fprintf(afl->introspection_file, "QUEUE %s = %s\n", afl->mutation,
540 afl->queue_top->fname);
541
542 }
543
544 #endif
545
546 if (new_bits == 2) {
547
548 afl->queue_top->has_new_cov = 1;
549 ++afl->queued_with_cov;
550
551 }
552
553 /* AFLFast schedule? update the new queue entry */
554 if (cksum) {
555
556 afl->queue_top->n_fuzz_entry = cksum % N_FUZZ_SIZE;
557 afl->n_fuzz[afl->queue_top->n_fuzz_entry] = 1;
558
559 }
560
561 /* due to classify counts we have to recalculate the checksum */
562 afl->queue_top->exec_cksum =
563 hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
564
565 /* Try to calibrate inline; this also calls update_bitmap_score() when
566 successful. */
567
568 res = calibrate_case(afl, afl->queue_top, mem, afl->queue_cycle - 1, 0);
569
570 if (unlikely(res == FSRV_RUN_ERROR)) {
571
572 FATAL("Unable to execute target application");
573
574 }
575
576 if (likely(afl->q_testcase_max_cache_size)) {
577
578 queue_testcase_store_mem(afl, afl->queue_top, mem);
579
580 }
581
582 keeping = 1;
583
584 }
585
586 switch (fault) {
587
588 case FSRV_RUN_TMOUT:
589
590 /* Timeouts are not very interesting, but we're still obliged to keep
591 a handful of samples. We use the presence of new bits in the
592 hang-specific bitmap as a signal of uniqueness. In "non-instrumented"
593 mode, we just keep everything. */
594
595 ++afl->total_tmouts;
596
597 if (afl->saved_hangs >= KEEP_UNIQUE_HANG) { return keeping; }
598
599 if (likely(!afl->non_instrumented_mode)) {
600
601 if (!classified) {
602
603 classify_counts(&afl->fsrv);
604 classified = 1;
605
606 }
607
608 simplify_trace(afl, afl->fsrv.trace_bits);
609
610 if (!has_new_bits(afl, afl->virgin_tmout)) { return keeping; }
611
612 }
613
614 is_timeout = 0x80;
615 #ifdef INTROSPECTION
616 if (afl->custom_mutators_count && afl->current_custom_fuzz) {
617
618 LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
619
620 if (afl->current_custom_fuzz == el && el->afl_custom_introspection) {
621
622 const char *ptr = el->afl_custom_introspection(el->data);
623
624 if (ptr != NULL && *ptr != 0) {
625
626 fprintf(afl->introspection_file,
627 "UNIQUE_TIMEOUT CUSTOM %s = %s\n", ptr,
628 afl->queue_top->fname);
629
630 }
631
632 }
633
634 });
635
636 } else if (afl->mutation[0] != 0) {
637
638 fprintf(afl->introspection_file, "UNIQUE_TIMEOUT %s\n", afl->mutation);
639
640 }
641
642 #endif
643
644 /* Before saving, we make sure that it's a genuine hang by re-running
645 the target with a more generous timeout (unless the default timeout
646 is already generous). */
647
648 if (afl->fsrv.exec_tmout < afl->hang_tmout) {
649
650 u8 new_fault;
651 len = write_to_testcase(afl, &mem, len, 0);
652 new_fault = fuzz_run_target(afl, &afl->fsrv, afl->hang_tmout);
653 classify_counts(&afl->fsrv);
654
655 /* A corner case that one user reported bumping into: increasing the
656 timeout actually uncovers a crash. Make sure we don't discard it if
657 so. */
658
659 if (!afl->stop_soon && new_fault == FSRV_RUN_CRASH) {
660
661 goto keep_as_crash;
662
663 }
664
665 if (afl->stop_soon || new_fault != FSRV_RUN_TMOUT) {
666
667 if (afl->afl_env.afl_keep_timeouts) {
668
669 ++afl->saved_tmouts;
670 goto save_to_queue;
671
672 } else {
673
674 return keeping;
675
676 }
677
678 }
679
680 }
681
682 #ifndef SIMPLE_FILES
683
684 snprintf(fn, PATH_MAX, "%s/hangs/id:%06llu,%s", afl->out_dir,
685 afl->saved_hangs,
686 describe_op(afl, 0, NAME_MAX - strlen("id:000000,")));
687
688 #else
689
690 snprintf(fn, PATH_MAX, "%s/hangs/id_%06llu", afl->out_dir,
691 afl->saved_hangs);
692
693 #endif /* ^!SIMPLE_FILES */
694
695 ++afl->saved_hangs;
696
697 afl->last_hang_time = get_cur_time();
698
699 break;
700
701 case FSRV_RUN_CRASH:
702
703 keep_as_crash:
704
705 /* This is handled in a manner roughly similar to timeouts,
706 except for slightly different limits and no need to re-run test
707 cases. */
708
709 ++afl->total_crashes;
710
711 if (afl->saved_crashes >= KEEP_UNIQUE_CRASH) { return keeping; }
712
713 if (likely(!afl->non_instrumented_mode)) {
714
715 if (!classified) { classify_counts(&afl->fsrv); }
716
717 simplify_trace(afl, afl->fsrv.trace_bits);
718
719 if (!has_new_bits(afl, afl->virgin_crash)) { return keeping; }
720
721 }
722
723 if (unlikely(!afl->saved_crashes)) { write_crash_readme(afl); }
724
725 #ifndef SIMPLE_FILES
726
727 snprintf(fn, PATH_MAX, "%s/crashes/id:%06llu,sig:%02u,%s", afl->out_dir,
728 afl->saved_crashes, afl->fsrv.last_kill_signal,
729 describe_op(afl, 0, NAME_MAX - strlen("id:000000,sig:00,")));
730
731 #else
732
733 snprintf(fn, PATH_MAX, "%s/crashes/id_%06llu_%02u", afl->out_dir,
734 afl->saved_crashes, afl->fsrv.last_kill_signal);
735
736 #endif /* ^!SIMPLE_FILES */
737
738 ++afl->saved_crashes;
739 #ifdef INTROSPECTION
740 if (afl->custom_mutators_count && afl->current_custom_fuzz) {
741
742 LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
743
744 if (afl->current_custom_fuzz == el && el->afl_custom_introspection) {
745
746 const char *ptr = el->afl_custom_introspection(el->data);
747
748 if (ptr != NULL && *ptr != 0) {
749
750 fprintf(afl->introspection_file, "UNIQUE_CRASH CUSTOM %s = %s\n",
751 ptr, afl->queue_top->fname);
752
753 }
754
755 }
756
757 });
758
759 } else if (afl->mutation[0] != 0) {
760
761 fprintf(afl->introspection_file, "UNIQUE_CRASH %s\n", afl->mutation);
762
763 }
764
765 #endif
766 if (unlikely(afl->infoexec)) {
767
768 // if the user wants to be informed on new crashes - do that
769 #if !TARGET_OS_IPHONE
770 // we dont care if system errors, but we dont want a
771 // compiler warning either
772 // See
773 // https://stackoverflow.com/questions/11888594/ignoring-return-values-in-c
774 (void)(system(afl->infoexec) + 1);
775 #else
776 WARNF("command execution unsupported");
777 #endif
778
779 }
780
781 afl->last_crash_time = get_cur_time();
782 afl->last_crash_execs = afl->fsrv.total_execs;
783
784 break;
785
786 case FSRV_RUN_ERROR:
787 FATAL("Unable to execute target application");
788
789 default:
790 return keeping;
791
792 }
793
794 /* If we're here, we apparently want to save the crash or hang
795 test case, too. */
796
797 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, DEFAULT_PERMISSION);
798 if (unlikely(fd < 0)) { PFATAL("Unable to create '%s'", fn); }
799 ck_write(fd, mem, len, fn);
800 close(fd);
801
802 #ifdef __linux__
803 if (afl->fsrv.nyx_mode && fault == FSRV_RUN_CRASH) {
804
805 u8 fn_log[PATH_MAX];
806
807 (void)(snprintf(fn_log, PATH_MAX, "%s.log", fn) + 1);
808 fd = open(fn_log, O_WRONLY | O_CREAT | O_EXCL, DEFAULT_PERMISSION);
809 if (unlikely(fd < 0)) { PFATAL("Unable to create '%s'", fn_log); }
810
811 u32 nyx_aux_string_len = afl->fsrv.nyx_handlers->nyx_get_aux_string(
812 afl->fsrv.nyx_runner, afl->fsrv.nyx_aux_string, 0x1000);
813
814 ck_write(fd, afl->fsrv.nyx_aux_string, nyx_aux_string_len, fn_log);
815 close(fd);
816
817 }
818
819 #endif
820
821 return keeping;
822
823 }
824
825