1 // Copyright 2015 syzkaller project authors. All rights reserved.
2 // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
3
4 #include <fcntl.h>
5 #include <signal.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <sys/ioctl.h>
9 #include <sys/mman.h>
10 #include <sys/prctl.h>
11 #include <sys/syscall.h>
12 #include <unistd.h>
13
14 #define KCOV_INIT_TRACE32 _IOR('c', 1, uint32)
15 #define KCOV_INIT_TRACE64 _IOR('c', 1, uint64)
16 #define KCOV_ENABLE _IO('c', 100)
17 #define KCOV_DISABLE _IO('c', 101)
18
19 const unsigned long KCOV_TRACE_PC = 0;
20 const unsigned long KCOV_TRACE_CMP = 1;
21
22 static bool detect_kernel_bitness();
23
os_init(int argc,char ** argv,void * data,size_t data_size)24 static void os_init(int argc, char** argv, void* data, size_t data_size)
25 {
26 prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
27 is_kernel_64_bit = detect_kernel_bitness();
28 if (mmap(data, data_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0) != data)
29 fail("mmap of data segment failed");
30 }
31
32 static __thread cover_t* current_cover;
33
execute_syscall(const call_t * c,long a[kMaxArgs])34 static long execute_syscall(const call_t* c, long a[kMaxArgs])
35 {
36 if (c->call)
37 return c->call(a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]);
38 return syscall(c->sys_nr, a[0], a[1], a[2], a[3], a[4], a[5]);
39 }
40
cover_open(cover_t * cov)41 static void cover_open(cover_t* cov)
42 {
43 int fd = open("/sys/kernel/debug/kcov", O_RDWR);
44 if (fd == -1)
45 fail("open of /sys/kernel/debug/kcov failed");
46 if (dup2(fd, cov->fd) < 0)
47 fail("filed to dup2(%d, %d) cover fd", fd, cov->fd);
48 close(fd);
49 const int kcov_init_trace = is_kernel_64_bit ? KCOV_INIT_TRACE64 : KCOV_INIT_TRACE32;
50 if (ioctl(cov->fd, kcov_init_trace, kCoverSize))
51 fail("cover init trace write failed");
52 size_t mmap_alloc_size = kCoverSize * (is_kernel_64_bit ? 8 : 4);
53 cov->data = (char*)mmap(NULL, mmap_alloc_size,
54 PROT_READ | PROT_WRITE, MAP_SHARED, cov->fd, 0);
55 if (cov->data == MAP_FAILED)
56 fail("cover mmap failed");
57 cov->data_end = cov->data + mmap_alloc_size;
58 }
59
cover_enable(cover_t * cov,bool collect_comps)60 static void cover_enable(cover_t* cov, bool collect_comps)
61 {
62 int kcov_mode = collect_comps ? KCOV_TRACE_CMP : KCOV_TRACE_PC;
63 // This should be fatal,
64 // but in practice ioctl fails with assorted errors (9, 14, 25),
65 // so we use exitf.
66 if (ioctl(cov->fd, KCOV_ENABLE, kcov_mode))
67 exitf("cover enable write trace failed, mode=%d", kcov_mode);
68 current_cover = cov;
69 }
70
cover_reset(cover_t * cov)71 static void cover_reset(cover_t* cov)
72 {
73 if (cov == 0)
74 cov = current_cover;
75 *(uint64*)cov->data = 0;
76 }
77
cover_collect(cover_t * cov)78 static void cover_collect(cover_t* cov)
79 {
80 // Note: this assumes little-endian kernel.
81 cov->size = *(uint32*)cov->data;
82 }
83
cover_check(uint32 pc)84 static bool cover_check(uint32 pc)
85 {
86 return true;
87 }
88
cover_check(uint64 pc)89 static bool cover_check(uint64 pc)
90 {
91 #if defined(__i386__) || defined(__x86_64__)
92 // Text/modules range for x86_64.
93 return pc >= 0xffffffff80000000ull && pc < 0xffffffffff000000ull;
94 #else
95 return true;
96 #endif
97 }
98
detect_kernel_bitness()99 static bool detect_kernel_bitness()
100 {
101 if (sizeof(void*) == 8)
102 return true;
103 // It turns out to be surprisingly hard to understand if the kernel underneath is 64-bits.
104 // A common method is to look at uname.machine. But it is produced in some involved ways,
105 // and we will need to know about all strings it returns and in the end it can be overriden
106 // during build and lie (and there are known precedents of this).
107 // So instead we look at size of addresses in /proc/kallsyms.
108 bool wide = true;
109 int fd = open("/proc/kallsyms", O_RDONLY);
110 if (fd != -1) {
111 char buf[16];
112 if (read(fd, buf, sizeof(buf)) == sizeof(buf) &&
113 (buf[8] == ' ' || buf[8] == '\t'))
114 wide = false;
115 close(fd);
116 }
117 debug("detected %d-bit kernel\n", wide ? 64 : 32);
118 return wide;
119 }
120
121 // One does not simply exit.
122 // _exit can in fact fail.
123 // syzkaller did manage to generate a seccomp filter that prohibits exit_group syscall.
124 // Previously, we get into infinite recursion via segv_handler in such case
125 // and corrupted output_data, which does matter in our case since it is shared
126 // with fuzzer process. Loop infinitely instead. Parent will kill us.
127 // But one does not simply loop either. Compilers are sure that _exit never returns,
128 // so they remove all code after _exit as dead. Call _exit via volatile indirection.
129 // And this does not work as well. _exit has own handling of failing exit_group
130 // in the form of HLT instruction, it will divert control flow from our loop.
131 // So call the syscall directly.
doexit(int status)132 NORETURN void doexit(int status)
133 {
134 volatile unsigned i;
135 syscall(__NR_exit_group, status);
136 for (i = 0;; i++) {
137 }
138 }
139