• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::cmp::min;
6 use std::num::Wrapping;
7 use std::sync::atomic::{fence, Ordering};
8 
9 use sys_util::{error, GuestAddress, GuestMemory};
10 
11 const VIRTQ_DESC_F_NEXT: u16 = 0x1;
12 const VIRTQ_DESC_F_WRITE: u16 = 0x2;
13 #[allow(dead_code)]
14 const VIRTQ_DESC_F_INDIRECT: u16 = 0x4;
15 
16 /// An iterator over a single descriptor chain.  Not to be confused with AvailIter,
17 /// which iterates over the descriptor chain heads in a queue.
18 pub struct DescIter<'a> {
19     next: Option<DescriptorChain<'a>>,
20 }
21 
22 impl<'a> DescIter<'a> {
23     /// Returns an iterator that only yields the readable descriptors in the chain.
readable(self) -> impl Iterator<Item = DescriptorChain<'a>>24     pub fn readable(self) -> impl Iterator<Item = DescriptorChain<'a>> {
25         self.take_while(DescriptorChain::is_read_only)
26     }
27 
28     /// Returns an iterator that only yields the writable descriptors in the chain.
writable(self) -> impl Iterator<Item = DescriptorChain<'a>>29     pub fn writable(self) -> impl Iterator<Item = DescriptorChain<'a>> {
30         self.skip_while(DescriptorChain::is_read_only)
31     }
32 }
33 
34 impl<'a> Iterator for DescIter<'a> {
35     type Item = DescriptorChain<'a>;
36 
next(&mut self) -> Option<Self::Item>37     fn next(&mut self) -> Option<Self::Item> {
38         if let Some(current) = self.next.take() {
39             self.next = current.next_descriptor();
40             Some(current)
41         } else {
42             None
43         }
44     }
45 }
46 
47 /// A virtio descriptor chain.
48 #[derive(Clone)]
49 pub struct DescriptorChain<'a> {
50     mem: &'a GuestMemory,
51     desc_table: GuestAddress,
52     queue_size: u16,
53     ttl: u16, // used to prevent infinite chain cycles
54 
55     /// Index into the descriptor table
56     pub index: u16,
57 
58     /// Guest physical address of device specific data
59     pub addr: GuestAddress,
60 
61     /// Length of device specific data
62     pub len: u32,
63 
64     /// Includes next, write, and indirect bits
65     pub flags: u16,
66 
67     /// Index into the descriptor table of the next descriptor if flags has
68     /// the next bit set
69     pub next: u16,
70 }
71 
72 impl<'a> DescriptorChain<'a> {
checked_new( mem: &GuestMemory, desc_table: GuestAddress, queue_size: u16, index: u16, ) -> Option<DescriptorChain>73     fn checked_new(
74         mem: &GuestMemory,
75         desc_table: GuestAddress,
76         queue_size: u16,
77         index: u16,
78     ) -> Option<DescriptorChain> {
79         if index >= queue_size {
80             return None;
81         }
82 
83         let desc_head = match mem.checked_offset(desc_table, (index as u64) * 16) {
84             Some(a) => a,
85             None => return None,
86         };
87         // These reads can't fail unless Guest memory is hopelessly broken.
88         let addr = GuestAddress(mem.read_obj_from_addr::<u64>(desc_head).unwrap() as u64);
89         if mem.checked_offset(desc_head, 16).is_none() {
90             return None;
91         }
92         let len: u32 = mem.read_obj_from_addr(desc_head.unchecked_add(8)).unwrap();
93         let flags: u16 = mem.read_obj_from_addr(desc_head.unchecked_add(12)).unwrap();
94         let next: u16 = mem.read_obj_from_addr(desc_head.unchecked_add(14)).unwrap();
95         let chain = DescriptorChain {
96             mem,
97             desc_table,
98             queue_size,
99             ttl: queue_size,
100             index,
101             addr,
102             len,
103             flags,
104             next,
105         };
106 
107         if chain.is_valid() {
108             Some(chain)
109         } else {
110             None
111         }
112     }
113 
114     #[allow(clippy::if_same_then_else)]
is_valid(&self) -> bool115     fn is_valid(&self) -> bool {
116         if self
117             .mem
118             .checked_offset(self.addr, self.len as u64)
119             .is_none()
120         {
121             false
122         } else if self.has_next() && self.next >= self.queue_size {
123             false
124         } else {
125             true
126         }
127     }
128 
129     /// Gets if this descriptor chain has another descriptor chain linked after it.
has_next(&self) -> bool130     pub fn has_next(&self) -> bool {
131         self.flags & VIRTQ_DESC_F_NEXT != 0 && self.ttl > 1
132     }
133 
134     /// If the driver designated this as a write only descriptor.
135     ///
136     /// If this is false, this descriptor is read only.
137     /// Write only means the the emulated device can write and the driver can read.
is_write_only(&self) -> bool138     pub fn is_write_only(&self) -> bool {
139         self.flags & VIRTQ_DESC_F_WRITE != 0
140     }
141 
142     /// If the driver designated this as a read only descriptor.
143     ///
144     /// If this is false, this descriptor is write only.
145     /// Read only means the emulated device can read and the driver can write.
is_read_only(&self) -> bool146     pub fn is_read_only(&self) -> bool {
147         self.flags & VIRTQ_DESC_F_WRITE == 0
148     }
149 
150     /// Gets the next descriptor in this descriptor chain, if there is one.
151     ///
152     /// Note that this is distinct from the next descriptor chain returned by `AvailIter`, which is
153     /// the head of the next _available_ descriptor chain.
next_descriptor(&self) -> Option<DescriptorChain<'a>>154     pub fn next_descriptor(&self) -> Option<DescriptorChain<'a>> {
155         if self.has_next() {
156             DescriptorChain::checked_new(self.mem, self.desc_table, self.queue_size, self.next).map(
157                 |mut c| {
158                     c.ttl = self.ttl - 1;
159                     c
160                 },
161             )
162         } else {
163             None
164         }
165     }
166 
167     /// Produces an iterator over all the descriptors in this chain.
into_iter(self) -> DescIter<'a>168     pub fn into_iter(self) -> DescIter<'a> {
169         DescIter { next: Some(self) }
170     }
171 }
172 
173 /// Consuming iterator over all available descriptor chain heads in the queue.
174 pub struct AvailIter<'a, 'b> {
175     mem: &'a GuestMemory,
176     queue: &'b mut Queue,
177 }
178 
179 impl<'a, 'b> Iterator for AvailIter<'a, 'b> {
180     type Item = DescriptorChain<'a>;
181 
next(&mut self) -> Option<Self::Item>182     fn next(&mut self) -> Option<Self::Item> {
183         self.queue.pop(self.mem)
184     }
185 }
186 
187 #[derive(Clone)]
188 /// A virtio queue's parameters.
189 pub struct Queue {
190     /// The maximal size in elements offered by the device
191     pub max_size: u16,
192 
193     /// The queue size in elements the driver selected
194     pub size: u16,
195 
196     /// Inidcates if the queue is finished with configuration
197     pub ready: bool,
198 
199     /// Guest physical address of the descriptor table
200     pub desc_table: GuestAddress,
201 
202     /// Guest physical address of the available ring
203     pub avail_ring: GuestAddress,
204 
205     /// Guest physical address of the used ring
206     pub used_ring: GuestAddress,
207 
208     next_avail: Wrapping<u16>,
209     next_used: Wrapping<u16>,
210 }
211 
212 impl Queue {
213     /// Constructs an empty virtio queue with the given `max_size`.
new(max_size: u16) -> Queue214     pub fn new(max_size: u16) -> Queue {
215         Queue {
216             max_size,
217             size: max_size,
218             ready: false,
219             desc_table: GuestAddress(0),
220             avail_ring: GuestAddress(0),
221             used_ring: GuestAddress(0),
222             next_avail: Wrapping(0),
223             next_used: Wrapping(0),
224         }
225     }
226 
227     /// Return the actual size of the queue, as the driver may not set up a
228     /// queue as big as the device allows.
actual_size(&self) -> u16229     pub fn actual_size(&self) -> u16 {
230         min(self.size, self.max_size)
231     }
232 
is_valid(&self, mem: &GuestMemory) -> bool233     pub fn is_valid(&self, mem: &GuestMemory) -> bool {
234         let queue_size = self.actual_size() as usize;
235         let desc_table = self.desc_table;
236         let desc_table_size = 16 * queue_size;
237         let avail_ring = self.avail_ring;
238         let avail_ring_size = 6 + 2 * queue_size;
239         let used_ring = self.used_ring;
240         let used_ring_size = 6 + 8 * queue_size;
241         if !self.ready {
242             error!("attempt to use virtio queue that is not marked ready");
243             false
244         } else if self.size > self.max_size || self.size == 0 || (self.size & (self.size - 1)) != 0
245         {
246             error!("virtio queue with invalid size: {}", self.size);
247             false
248         } else if desc_table
249             .checked_add(desc_table_size as u64)
250             .map_or(true, |v| !mem.address_in_range(v))
251         {
252             error!(
253                 "virtio queue descriptor table goes out of bounds: start:0x{:08x} size:0x{:08x}",
254                 desc_table.offset(),
255                 desc_table_size
256             );
257             false
258         } else if avail_ring
259             .checked_add(avail_ring_size as u64)
260             .map_or(true, |v| !mem.address_in_range(v))
261         {
262             error!(
263                 "virtio queue available ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
264                 avail_ring.offset(),
265                 avail_ring_size
266             );
267             false
268         } else if used_ring
269             .checked_add(used_ring_size as u64)
270             .map_or(true, |v| !mem.address_in_range(v))
271         {
272             error!(
273                 "virtio queue used ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
274                 used_ring.offset(),
275                 used_ring_size
276             );
277             false
278         } else {
279             true
280         }
281     }
282 
283     /// If a new DescriptorHead is available, returns one and removes it from the queue.
pop<'a>(&mut self, mem: &'a GuestMemory) -> Option<DescriptorChain<'a>>284     pub fn pop<'a>(&mut self, mem: &'a GuestMemory) -> Option<DescriptorChain<'a>> {
285         if !self.is_valid(mem) {
286             return None;
287         }
288 
289         let queue_size = self.actual_size();
290         let avail_index_addr = mem.checked_offset(self.avail_ring, 2).unwrap();
291         let avail_index: u16 = mem.read_obj_from_addr(avail_index_addr).unwrap();
292         let avail_len = Wrapping(avail_index) - self.next_avail;
293 
294         if avail_len.0 > queue_size || self.next_avail == Wrapping(avail_index) {
295             return None;
296         }
297 
298         let desc_idx_addr_offset = (4 + (self.next_avail.0 % queue_size) * 2) as u64;
299         let desc_idx_addr = mem.checked_offset(self.avail_ring, desc_idx_addr_offset)?;
300 
301         // This index is checked below in checked_new.
302         let descriptor_index: u16 = mem.read_obj_from_addr(desc_idx_addr).unwrap();
303 
304         let descriptor_chain =
305             DescriptorChain::checked_new(mem, self.desc_table, queue_size, descriptor_index);
306         if descriptor_chain.is_some() {
307             self.next_avail += Wrapping(1);
308         }
309         descriptor_chain
310     }
311 
312     /// A consuming iterator over all available descriptor chain heads offered by the driver.
iter<'a, 'b>(&'b mut self, mem: &'a GuestMemory) -> AvailIter<'a, 'b>313     pub fn iter<'a, 'b>(&'b mut self, mem: &'a GuestMemory) -> AvailIter<'a, 'b> {
314         AvailIter { mem, queue: self }
315     }
316 
317     /// Puts an available descriptor head into the used ring for use by the guest.
add_used(&mut self, mem: &GuestMemory, desc_index: u16, len: u32)318     pub fn add_used(&mut self, mem: &GuestMemory, desc_index: u16, len: u32) {
319         if desc_index >= self.actual_size() {
320             error!(
321                 "attempted to add out of bounds descriptor to used ring: {}",
322                 desc_index
323             );
324             return;
325         }
326 
327         let used_ring = self.used_ring;
328         let next_used = (self.next_used.0 % self.actual_size()) as usize;
329         let used_elem = used_ring.unchecked_add((4 + next_used * 8) as u64);
330 
331         // These writes can't fail as we are guaranteed to be within the descriptor ring.
332         mem.write_obj_at_addr(desc_index as u32, used_elem).unwrap();
333         mem.write_obj_at_addr(len as u32, used_elem.unchecked_add(4))
334             .unwrap();
335 
336         self.next_used += Wrapping(1);
337 
338         // This fence ensures all descriptor writes are visible before the index update is.
339         fence(Ordering::Release);
340 
341         mem.write_obj_at_addr(self.next_used.0 as u16, used_ring.unchecked_add(2))
342             .unwrap();
343     }
344 }
345