• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #![no_main]
6 
7 use std::mem::size_of;
8 
9 use cros_fuzz::fuzz_target;
10 use cros_fuzz::rand::FuzzRng;
11 use devices::virtio::{DescriptorChain, Queue};
12 use rand::{Rng, RngCore};
13 use vm_memory::{GuestAddress, GuestMemory};
14 
15 const MAX_QUEUE_SIZE: u16 = 256;
16 const MEM_SIZE: u64 = 1024 * 1024;
17 
18 thread_local! {
19     static GUEST_MEM: GuestMemory = GuestMemory::new(&[(GuestAddress(0), MEM_SIZE)]).unwrap();
20 }
21 
22 // These are taken from the virtio spec and can be used as a reference for the size calculations in
23 // the fuzzer.
24 #[repr(C, packed)]
25 struct virtq_desc {
26     addr: u64,
27     len: u32,
28     flags: u16,
29     next: u16,
30 }
31 
32 #[repr(C, packed)]
33 struct virtq_avail {
34     flags: u16,
35     idx: u16,
36     ring: [u16; MAX_QUEUE_SIZE as usize],
37     used_event: u16,
38 }
39 
40 #[repr(C, packed)]
41 struct virtq_used_elem {
42     id: u32,
43     len: u32,
44 }
45 
46 #[repr(C, packed)]
47 struct virtq_used {
48     flags: u16,
49     idx: u16,
50     ring: [virtq_used_elem; MAX_QUEUE_SIZE as usize],
51     avail_event: u16,
52 }
53 
54 fuzz_target!(|data: &[u8]| {
55     let mut q = Queue::new(MAX_QUEUE_SIZE);
56     let mut rng = FuzzRng::new(data);
57     q.size = rng.gen();
58     q.ready = true;
59 
60     // For each of {desc_table,avail_ring,used_ring} generate a random address that includes enough
61     // space to hold the relevant struct with the largest possible queue size.
62     let max_table_size = MAX_QUEUE_SIZE as u64 * size_of::<virtq_desc>() as u64;
63     q.desc_table = GuestAddress(rng.gen_range(0, MEM_SIZE - max_table_size));
64     q.avail_ring = GuestAddress(rng.gen_range(0, MEM_SIZE - size_of::<virtq_avail>() as u64));
65     q.used_ring = GuestAddress(rng.gen_range(0, MEM_SIZE - size_of::<virtq_used>() as u64));
66 
67     GUEST_MEM.with(|mem| {
68         if !q.is_valid(mem) {
69             return;
70         }
71 
72         // First zero out all of the memory.
73         let vs = mem
74             .get_slice_at_addr(GuestAddress(0), MEM_SIZE as usize)
75             .unwrap();
76         vs.write_bytes(0);
77 
78         // Fill in the descriptor table.
79         let queue_size = q.size as usize;
80         let mut buf = vec![0u8; queue_size * size_of::<virtq_desc>()];
81 
82         rng.fill_bytes(&mut buf[..]);
83         mem.write_all_at_addr(&buf[..], q.desc_table).unwrap();
84 
85         // Fill in the available ring. See the definition of virtq_avail above for the source of
86         // these numbers.
87         let avail_size = 4 + (queue_size * 2) + 2;
88         buf.resize(avail_size, 0);
89         rng.fill_bytes(&mut buf[..]);
90         mem.write_all_at_addr(&buf[..], q.avail_ring).unwrap();
91 
92         // Fill in the used ring. See the definition of virtq_used above for the source of
93         // these numbers.
94         let used_size = 4 + (queue_size * size_of::<virtq_used_elem>()) + 2;
95         buf.resize(used_size, 0);
96         rng.fill_bytes(&mut buf[..]);
97         mem.write_all_at_addr(&buf[..], q.used_ring).unwrap();
98 
99         while let Some(avail_desc) = q.pop(mem) {
100             let idx = avail_desc.index;
101             let total = avail_desc
102                 .into_iter()
103                 .filter(DescriptorChain::is_write_only)
104                 .try_fold(0u32, |sum, cur| sum.checked_add(cur.len));
105             if let Some(len) = total {
106                 q.add_used(mem, idx, len);
107             } else {
108                 q.add_used(mem, idx, 0);
109             }
110         }
111     });
112 });
113