• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #![cfg(not(test))]
6 #![no_main]
7 
8 use std::mem::size_of;
9 
10 use cros_fuzz::fuzz_target;
11 use cros_fuzz::rand::FuzzRng;
12 use devices::virtio::DescriptorChain;
13 use devices::virtio::Queue;
14 use rand::Rng;
15 use rand::RngCore;
16 use vm_memory::GuestAddress;
17 use vm_memory::GuestMemory;
18 
19 const MAX_QUEUE_SIZE: u16 = 256;
20 const MEM_SIZE: u64 = 1024 * 1024;
21 
22 thread_local! {
23     static GUEST_MEM: GuestMemory = GuestMemory::new(&[(GuestAddress(0), MEM_SIZE)]).unwrap();
24 }
25 
26 // These are taken from the virtio spec and can be used as a reference for the size calculations in
27 // the fuzzer.
28 #[repr(C, packed)]
29 struct virtq_desc {
30     addr: u64,
31     len: u32,
32     flags: u16,
33     next: u16,
34 }
35 
36 #[repr(C, packed)]
37 struct virtq_avail {
38     flags: u16,
39     idx: u16,
40     ring: [u16; MAX_QUEUE_SIZE as usize],
41     used_event: u16,
42 }
43 
44 #[repr(C, packed)]
45 struct virtq_used_elem {
46     id: u32,
47     len: u32,
48 }
49 
50 #[repr(C, packed)]
51 struct virtq_used {
52     flags: u16,
53     idx: u16,
54     ring: [virtq_used_elem; MAX_QUEUE_SIZE as usize],
55     avail_event: u16,
56 }
57 
58 fuzz_target!(|data: &[u8]| {
59     let mut q = Queue::new(MAX_QUEUE_SIZE);
60     let mut rng = FuzzRng::new(data);
61     q.set_size(rng.gen());
62 
63     // For each of {desc_table,avail_ring,used_ring} generate a random address that includes enough
64     // space to hold the relevant struct with the largest possible queue size.
65     let max_table_size = MAX_QUEUE_SIZE as u64 * size_of::<virtq_desc>() as u64;
66     q.set_desc_table(GuestAddress(rng.gen_range(0..MEM_SIZE - max_table_size)));
67     q.set_avail_ring(GuestAddress(
68         rng.gen_range(0..MEM_SIZE - size_of::<virtq_avail>() as u64),
69     ));
70     q.set_used_ring(GuestAddress(
71         rng.gen_range(0..MEM_SIZE - size_of::<virtq_used>() as u64),
72     ));
73     q.set_ready(true);
74 
75     GUEST_MEM.with(|mem| {
76         let mut q = if let Ok(q) = q.activate() {
77             q
78         } else {
79             return;
80         };
81 
82         // First zero out all of the memory.
83         let vs = mem
84             .get_slice_at_addr(GuestAddress(0), MEM_SIZE as usize)
85             .unwrap();
86         vs.write_bytes(0);
87 
88         // Fill in the descriptor table.
89         let queue_size = q.size() as usize;
90         let mut buf = vec![0u8; queue_size * size_of::<virtq_desc>()];
91 
92         rng.fill_bytes(&mut buf[..]);
93         mem.write_all_at_addr(&buf[..], q.desc_table()).unwrap();
94 
95         // Fill in the available ring. See the definition of virtq_avail above for the source of
96         // these numbers.
97         let avail_size = 4 + (queue_size * 2) + 2;
98         buf.resize(avail_size, 0);
99         rng.fill_bytes(&mut buf[..]);
100         mem.write_all_at_addr(&buf[..], q.avail_ring()).unwrap();
101 
102         // Fill in the used ring. See the definition of virtq_used above for the source of
103         // these numbers.
104         let used_size = 4 + (queue_size * size_of::<virtq_used_elem>()) + 2;
105         buf.resize(used_size, 0);
106         rng.fill_bytes(&mut buf[..]);
107         mem.write_all_at_addr(&buf[..], q.used_ring()).unwrap();
108 
109         while let Some(avail_desc) = q.pop(mem) {
110             let idx = avail_desc.index;
111             let total = avail_desc
112                 .into_iter()
113                 .filter(DescriptorChain::is_write_only)
114                 .try_fold(0u32, |sum, cur| sum.checked_add(cur.len));
115             if let Some(len) = total {
116                 q.add_used(mem, idx, len);
117             } else {
118                 q.add_used(mem, idx, 0);
119             }
120         }
121     });
122 });
123