• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: MIT */
2 // https://syzkaller.appspot.com/bug?id=5f5a44abb4cba056fe24255c4fcb7e7bbe13de7a
3 // autogenerated by syzkaller (https://github.com/google/syzkaller)
4 
5 #include <dirent.h>
6 #include <endian.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <pthread.h>
10 #include <signal.h>
11 #include <stdarg.h>
12 #include <stdbool.h>
13 #include <stdint.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/mman.h>
18 #include <sys/prctl.h>
19 #include <sys/stat.h>
20 #include <sys/syscall.h>
21 #include <sys/types.h>
22 #include <sys/wait.h>
23 #include <time.h>
24 #include <unistd.h>
25 
26 #include <linux/futex.h>
27 
28 #ifdef __NR_futex
29 
sleep_ms(uint64_t ms)30 static void sleep_ms(uint64_t ms)
31 {
32   usleep(ms * 1000);
33 }
34 
current_time_ms(void)35 static uint64_t current_time_ms(void)
36 {
37   struct timespec ts;
38   if (clock_gettime(CLOCK_MONOTONIC, &ts))
39     exit(1);
40   return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000;
41 }
42 
thread_start(void * (* fn)(void *),void * arg)43 static void thread_start(void* (*fn)(void*), void* arg)
44 {
45   pthread_t th;
46   pthread_attr_t attr;
47   pthread_attr_init(&attr);
48   pthread_attr_setstacksize(&attr, 128 << 10);
49   int i = 0;
50   for (; i < 100; i++) {
51     if (pthread_create(&th, &attr, fn, arg) == 0) {
52       pthread_attr_destroy(&attr);
53       return;
54     }
55     if (errno == EAGAIN) {
56       usleep(50);
57       continue;
58     }
59     break;
60   }
61   exit(1);
62 }
63 
64 typedef struct {
65   int state;
66 } event_t;
67 
event_init(event_t * ev)68 static void event_init(event_t* ev)
69 {
70   ev->state = 0;
71 }
72 
event_reset(event_t * ev)73 static void event_reset(event_t* ev)
74 {
75   ev->state = 0;
76 }
77 
event_set(event_t * ev)78 static void event_set(event_t* ev)
79 {
80   if (ev->state)
81     exit(1);
82   __atomic_store_n(&ev->state, 1, __ATOMIC_RELEASE);
83   syscall(__NR_futex, &ev->state, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1000000);
84 }
85 
event_wait(event_t * ev)86 static void event_wait(event_t* ev)
87 {
88   while (!__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE))
89     syscall(__NR_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, 0);
90 }
91 
event_isset(event_t * ev)92 static int event_isset(event_t* ev)
93 {
94   return __atomic_load_n(&ev->state, __ATOMIC_ACQUIRE);
95 }
96 
event_timedwait(event_t * ev,uint64_t timeout)97 static int event_timedwait(event_t* ev, uint64_t timeout)
98 {
99   uint64_t start = current_time_ms();
100   uint64_t now = start;
101   for (;;) {
102     uint64_t remain = timeout - (now - start);
103     struct timespec ts;
104     ts.tv_sec = remain / 1000;
105     ts.tv_nsec = (remain % 1000) * 1000 * 1000;
106     syscall(__NR_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, &ts);
107     if (__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE))
108       return 1;
109     now = current_time_ms();
110     if (now - start > timeout)
111       return 0;
112   }
113 }
114 
115 #define SIZEOF_IO_URING_SQE 64
116 #define SIZEOF_IO_URING_CQE 16
117 #define SQ_HEAD_OFFSET 0
118 #define SQ_TAIL_OFFSET 64
119 #define SQ_RING_MASK_OFFSET 256
120 #define SQ_RING_ENTRIES_OFFSET 264
121 #define SQ_FLAGS_OFFSET 276
122 #define SQ_DROPPED_OFFSET 272
123 #define CQ_HEAD_OFFSET 128
124 #define CQ_TAIL_OFFSET 192
125 #define CQ_RING_MASK_OFFSET 260
126 #define CQ_RING_ENTRIES_OFFSET 268
127 #define CQ_RING_OVERFLOW_OFFSET 284
128 #define CQ_FLAGS_OFFSET 280
129 #define CQ_CQES_OFFSET 320
130 
131 struct io_sqring_offsets {
132   uint32_t head;
133   uint32_t tail;
134   uint32_t ring_mask;
135   uint32_t ring_entries;
136   uint32_t flags;
137   uint32_t dropped;
138   uint32_t array;
139   uint32_t resv1;
140   uint64_t resv2;
141 };
142 
143 struct io_cqring_offsets {
144   uint32_t head;
145   uint32_t tail;
146   uint32_t ring_mask;
147   uint32_t ring_entries;
148   uint32_t overflow;
149   uint32_t cqes;
150   uint64_t resv[2];
151 };
152 
153 struct io_uring_params {
154   uint32_t sq_entries;
155   uint32_t cq_entries;
156   uint32_t flags;
157   uint32_t sq_thread_cpu;
158   uint32_t sq_thread_idle;
159   uint32_t features;
160   uint32_t resv[4];
161   struct io_sqring_offsets sq_off;
162   struct io_cqring_offsets cq_off;
163 };
164 
165 #define IORING_OFF_SQ_RING 0
166 #define IORING_OFF_SQES 0x10000000ULL
167 
168 #define sys_io_uring_setup 425
syz_io_uring_setup(volatile long a0,volatile long a1,volatile long a2,volatile long a3,volatile long a4,volatile long a5)169 static long syz_io_uring_setup(volatile long a0, volatile long a1,
170                                volatile long a2, volatile long a3,
171                                volatile long a4, volatile long a5)
172 {
173   uint32_t entries = (uint32_t)a0;
174   struct io_uring_params* setup_params = (struct io_uring_params*)a1;
175   void* vma1 = (void*)a2;
176   void* vma2 = (void*)a3;
177   void** ring_ptr_out = (void**)a4;
178   void** sqes_ptr_out = (void**)a5;
179   uint32_t fd_io_uring = syscall(sys_io_uring_setup, entries, setup_params);
180   uint32_t sq_ring_sz =
181       setup_params->sq_off.array + setup_params->sq_entries * sizeof(uint32_t);
182   uint32_t cq_ring_sz = setup_params->cq_off.cqes +
183                         setup_params->cq_entries * SIZEOF_IO_URING_CQE;
184   uint32_t ring_sz = sq_ring_sz > cq_ring_sz ? sq_ring_sz : cq_ring_sz;
185   *ring_ptr_out = mmap(vma1, ring_sz, PROT_READ | PROT_WRITE,
186                        MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd_io_uring,
187                        IORING_OFF_SQ_RING);
188   uint32_t sqes_sz = setup_params->sq_entries * SIZEOF_IO_URING_SQE;
189   *sqes_ptr_out =
190       mmap(vma2, sqes_sz, PROT_READ | PROT_WRITE,
191            MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd_io_uring, IORING_OFF_SQES);
192   return fd_io_uring;
193 }
194 
syz_io_uring_submit(volatile long a0,volatile long a1,volatile long a2,volatile long a3)195 static long syz_io_uring_submit(volatile long a0, volatile long a1,
196                                 volatile long a2, volatile long a3)
197 {
198   char* ring_ptr = (char*)a0;
199   char* sqes_ptr = (char*)a1;
200   char* sqe = (char*)a2;
201   uint32_t sqes_index = (uint32_t)a3;
202   uint32_t sq_ring_entries = *(uint32_t*)(ring_ptr + SQ_RING_ENTRIES_OFFSET);
203   uint32_t cq_ring_entries = *(uint32_t*)(ring_ptr + CQ_RING_ENTRIES_OFFSET);
204   uint32_t sq_array_off =
205       (CQ_CQES_OFFSET + cq_ring_entries * SIZEOF_IO_URING_CQE + 63) & ~63;
206   if (sq_ring_entries)
207     sqes_index %= sq_ring_entries;
208   char* sqe_dest = sqes_ptr + sqes_index * SIZEOF_IO_URING_SQE;
209   memcpy(sqe_dest, sqe, SIZEOF_IO_URING_SQE);
210   uint32_t sq_ring_mask = *(uint32_t*)(ring_ptr + SQ_RING_MASK_OFFSET);
211   uint32_t* sq_tail_ptr = (uint32_t*)(ring_ptr + SQ_TAIL_OFFSET);
212   uint32_t sq_tail = *sq_tail_ptr & sq_ring_mask;
213   uint32_t sq_tail_next = *sq_tail_ptr + 1;
214   uint32_t* sq_array = (uint32_t*)(ring_ptr + sq_array_off);
215   *(sq_array + sq_tail) = sqes_index;
216   __atomic_store_n(sq_tail_ptr, sq_tail_next, __ATOMIC_RELEASE);
217   return 0;
218 }
219 
kill_and_wait(int pid,int * status)220 static void kill_and_wait(int pid, int* status)
221 {
222   kill(-pid, SIGKILL);
223   kill(pid, SIGKILL);
224   for (int i = 0; i < 100; i++) {
225     if (waitpid(-1, status, WNOHANG | __WALL) == pid)
226       return;
227     usleep(1000);
228   }
229   DIR* dir = opendir("/sys/fs/fuse/connections");
230   if (dir) {
231     for (;;) {
232       struct dirent* ent = readdir(dir);
233       if (!ent)
234         break;
235       if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0)
236         continue;
237       char abort[300];
238       snprintf(abort, sizeof(abort), "/sys/fs/fuse/connections/%s/abort",
239                ent->d_name);
240       int fd = open(abort, O_WRONLY);
241       if (fd == -1) {
242         continue;
243       }
244       if (write(fd, abort, 1) < 0) {
245       }
246       close(fd);
247     }
248     closedir(dir);
249   } else {
250   }
251   while (waitpid(-1, status, __WALL) != pid) {
252   }
253 }
254 
setup_test()255 static void setup_test()
256 {
257   prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
258   setpgrp();
259 }
260 
261 struct thread_t {
262   int created, call;
263   event_t ready, done;
264 };
265 
266 static struct thread_t threads[16];
267 static void execute_call(int call);
268 static int running;
269 
thr(void * arg)270 static void* thr(void* arg)
271 {
272   struct thread_t* th = (struct thread_t*)arg;
273   for (;;) {
274     event_wait(&th->ready);
275     event_reset(&th->ready);
276     execute_call(th->call);
277     __atomic_fetch_sub(&running, 1, __ATOMIC_RELAXED);
278     event_set(&th->done);
279   }
280   return 0;
281 }
282 
execute_one(void)283 static void execute_one(void)
284 {
285   int i, call, thread;
286   for (call = 0; call < 4; call++) {
287     for (thread = 0; thread < (int)(sizeof(threads) / sizeof(threads[0]));
288          thread++) {
289       struct thread_t* th = &threads[thread];
290       if (!th->created) {
291         th->created = 1;
292         event_init(&th->ready);
293         event_init(&th->done);
294         event_set(&th->done);
295         thread_start(thr, th);
296       }
297       if (!event_isset(&th->done))
298         continue;
299       event_reset(&th->done);
300       th->call = call;
301       __atomic_fetch_add(&running, 1, __ATOMIC_RELAXED);
302       event_set(&th->ready);
303       event_timedwait(&th->done, 50);
304       break;
305     }
306   }
307   for (i = 0; i < 100 && __atomic_load_n(&running, __ATOMIC_RELAXED); i++)
308     sleep_ms(1);
309 }
310 
311 static void execute_one(void);
312 
313 #define WAIT_FLAGS __WALL
314 
loop(void)315 static void loop(void)
316 {
317   int iter = 0;
318   for (; iter < 5000; iter++) {
319     int pid = fork();
320     if (pid < 0)
321       exit(1);
322     if (pid == 0) {
323       setup_test();
324       execute_one();
325       exit(0);
326     }
327     int status = 0;
328     uint64_t start = current_time_ms();
329     for (;;) {
330       if (waitpid(-1, &status, WNOHANG | WAIT_FLAGS) == pid)
331         break;
332       sleep_ms(1);
333       if (current_time_ms() - start < 5000)
334         continue;
335       kill_and_wait(pid, &status);
336       break;
337     }
338   }
339 }
340 
341 #ifndef __NR_io_uring_enter
342 #define __NR_io_uring_enter 426
343 #endif
344 
345 uint64_t r[4] = {0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0x0};
346 
execute_call(int call)347 void execute_call(int call)
348 {
349   intptr_t res = 0;
350   switch (call) {
351   case 0:
352     *(uint64_t*)0x200000c0 = 0;
353     res = syscall(__NR_signalfd4, -1, 0x200000c0ul, 8ul, 0ul);
354     if (res != -1)
355       r[0] = res;
356     break;
357   case 1:
358     *(uint32_t*)0x20000a84 = 0;
359     *(uint32_t*)0x20000a88 = 0;
360     *(uint32_t*)0x20000a8c = 0;
361     *(uint32_t*)0x20000a90 = 0;
362     *(uint32_t*)0x20000a98 = -1;
363     memset((void*)0x20000a9c, 0, 12);
364     res = -1;
365     res = syz_io_uring_setup(0x87, 0x20000a80, 0x206d6000, 0x206d7000,
366                              0x20000000, 0x20000040);
367     if (res != -1) {
368       r[1] = res;
369       r[2] = *(uint64_t*)0x20000000;
370       r[3] = *(uint64_t*)0x20000040;
371     }
372     break;
373   case 2:
374     *(uint8_t*)0x20002240 = 6;
375     *(uint8_t*)0x20002241 = 0;
376     *(uint16_t*)0x20002242 = 0;
377     *(uint32_t*)0x20002244 = r[0];
378     *(uint64_t*)0x20002248 = 0;
379     *(uint64_t*)0x20002250 = 0;
380     *(uint32_t*)0x20002258 = 0;
381     *(uint16_t*)0x2000225c = 0;
382     *(uint16_t*)0x2000225e = 0;
383     *(uint64_t*)0x20002260 = 0;
384     *(uint16_t*)0x20002268 = 0;
385     *(uint16_t*)0x2000226a = 0;
386     memset((void*)0x2000226c, 0, 20);
387     syz_io_uring_submit(r[2], r[3], 0x20002240, 0);
388     break;
389   case 3:
390     syscall(__NR_io_uring_enter, r[1], 0x1523a, 0, 0ul, 0ul, 0xaul);
391     break;
392   }
393 }
394 
main(int argc,char * argv[])395 int main(int argc, char *argv[])
396 {
397   void *ret;
398 
399 #if !defined(__i386) && !defined(__x86_64__)
400   return 0;
401 #endif
402 
403   if (argc > 1)
404     return 0;
405 
406   ret = mmap((void *)0x1ffff000ul, 0x1000ul, 0ul, 0x32ul, -1, 0ul);
407   if (ret == MAP_FAILED)
408     return 0;
409   ret = mmap((void *)0x20000000ul, 0x1000000ul, 7ul, 0x32ul, -1, 0ul);
410   if (ret == MAP_FAILED)
411     return 0;
412   ret = mmap((void *)0x21000000ul, 0x1000ul, 0ul, 0x32ul, -1, 0ul);
413   if (ret == MAP_FAILED)
414     return 0;
415   loop();
416   return 0;
417 }
418 
419 #else /* __NR_futex */
420 
main(int argc,char * argv[])421 int main(int argc, char *argv[])
422 {
423   return 0;
424 }
425 
426 #endif /* __NR_futex */
427