• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use data_model::DataInit;
6 use std::fmt::{self, Display};
7 use std::mem::size_of;
8 use std::sync::atomic::{fence, Ordering};
9 use vm_memory::{GuestAddress, GuestMemory, GuestMemoryError};
10 
11 use super::xhci_abi::*;
12 
13 #[derive(Debug)]
14 pub enum Error {
15     Uninitialized,
16     EventRingFull,
17     BadEnqueuePointer(GuestAddress),
18     BadSegTableIndex(u16),
19     BadSegTableAddress(GuestAddress),
20     MemoryRead(GuestMemoryError),
21     MemoryWrite(GuestMemoryError),
22 }
23 
24 type Result<T> = std::result::Result<T, Error>;
25 
26 impl Display for Error {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result27     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
28         use self::Error::*;
29 
30         match self {
31             Uninitialized => write!(f, "event ring is uninitialized"),
32             EventRingFull => write!(f, "event ring is full"),
33             BadEnqueuePointer(addr) => write!(f, "event ring has a bad enqueue pointer: {}", addr),
34             BadSegTableIndex(i) => write!(f, "event ring has a bad seg table index: {}", i),
35             BadSegTableAddress(addr) => write!(f, "event ring has a bad seg table addr: {}", addr),
36             MemoryRead(e) => write!(f, "event ring cannot read from guest memory: {}", e),
37             MemoryWrite(e) => write!(f, "event ring cannot write to guest memory: {}", e),
38         }
39     }
40 }
41 
42 /// Event rings are segmented circular buffers used to pass event TRBs from the xHCI device back to
43 /// the guest.  Each event ring is associated with a single interrupter.  See section 4.9.4 of the
44 /// xHCI specification for more details.
45 /// This implementation is only for primary interrupter. Please review xhci spec before using it
46 /// for secondary.
47 pub struct EventRing {
48     mem: GuestMemory,
49     segment_table_size: u16,
50     segment_table_base_address: GuestAddress,
51     current_segment_index: u16,
52     trb_count: u16,
53     enqueue_pointer: GuestAddress,
54     dequeue_pointer: GuestAddress,
55     producer_cycle_state: bool,
56 }
57 
58 impl EventRing {
59     /// Create an empty, uninitialized event ring.
new(mem: GuestMemory) -> Self60     pub fn new(mem: GuestMemory) -> Self {
61         EventRing {
62             mem,
63             segment_table_size: 0,
64             segment_table_base_address: GuestAddress(0),
65             current_segment_index: 0,
66             enqueue_pointer: GuestAddress(0),
67             dequeue_pointer: GuestAddress(0),
68             trb_count: 0,
69             // As specified in xHCI spec 4.9.4, cycle state should be initialized to 1.
70             producer_cycle_state: true,
71         }
72     }
73 
74     /// This function implements left side of xHCI spec, Figure 4-12.
add_event(&mut self, mut trb: Trb) -> Result<()>75     pub fn add_event(&mut self, mut trb: Trb) -> Result<()> {
76         self.check_inited()?;
77         if self.is_full()? {
78             return Err(Error::EventRingFull);
79         }
80         // Event is write twice to avoid race condition.
81         // Guest kernel use cycle bit to check ownership, thus we should write cycle last.
82         trb.set_cycle(!self.producer_cycle_state);
83         self.mem
84             .write_obj_at_addr(trb, self.enqueue_pointer)
85             .map_err(Error::MemoryWrite)?;
86 
87         // Updating the cycle state bit should always happen after updating other parts.
88         fence(Ordering::SeqCst);
89 
90         trb.set_cycle(self.producer_cycle_state);
91 
92         // Offset of cycle state byte.
93         const CYCLE_STATE_OFFSET: usize = 12usize;
94         let data = trb.as_slice();
95         // Trb contains 4 dwords, the last one contains cycle bit.
96         let cycle_bit_dword = &data[CYCLE_STATE_OFFSET..];
97         let address = self.enqueue_pointer;
98         let address = address
99             .checked_add(CYCLE_STATE_OFFSET as u64)
100             .ok_or(Error::BadEnqueuePointer(self.enqueue_pointer))?;
101         self.mem
102             .write_all_at_addr(cycle_bit_dword, address)
103             .map_err(Error::MemoryWrite)?;
104 
105         usb_debug!(
106             "event write to pointer {:#x}, trb_count {}, {}",
107             self.enqueue_pointer.0,
108             self.trb_count,
109             trb
110         );
111         self.enqueue_pointer = match self.enqueue_pointer.checked_add(size_of::<Trb>() as u64) {
112             Some(addr) => addr,
113             None => return Err(Error::BadEnqueuePointer(self.enqueue_pointer)),
114         };
115         self.trb_count -= 1;
116         if self.trb_count == 0 {
117             self.current_segment_index += 1;
118             if self.current_segment_index == self.segment_table_size {
119                 self.producer_cycle_state ^= true;
120                 self.current_segment_index = 0;
121             }
122             self.load_current_seg_table_entry()?;
123         }
124         Ok(())
125     }
126 
127     /// Set segment table size.
set_seg_table_size(&mut self, size: u16) -> Result<()>128     pub fn set_seg_table_size(&mut self, size: u16) -> Result<()> {
129         usb_debug!("event ring seg table size is set to {}", size);
130         self.segment_table_size = size;
131         self.try_reconfigure_event_ring()
132     }
133 
134     /// Set segment table base addr.
set_seg_table_base_addr(&mut self, addr: GuestAddress) -> Result<()>135     pub fn set_seg_table_base_addr(&mut self, addr: GuestAddress) -> Result<()> {
136         usb_debug!("event ring seg table base addr is set to {:#x}", addr.0);
137         self.segment_table_base_address = addr;
138         self.try_reconfigure_event_ring()
139     }
140 
141     /// Set dequeue pointer.
set_dequeue_pointer(&mut self, addr: GuestAddress)142     pub fn set_dequeue_pointer(&mut self, addr: GuestAddress) {
143         usb_debug!("event ring dequeue pointer set to {:#x}", addr.0);
144         self.dequeue_pointer = addr;
145     }
146 
147     /// Check if event ring is empty.
is_empty(&self) -> bool148     pub fn is_empty(&self) -> bool {
149         self.enqueue_pointer == self.dequeue_pointer
150     }
151 
152     /// Event ring is considered full when there is only space for one last TRB. In this case, xHC
153     /// should write an error Trb and do a bunch of handlings. See spec, figure 4-12 for more
154     /// details.
155     /// For now, we just check event ring full and fail (as it's unlikely to happen).
is_full(&self) -> Result<bool>156     pub fn is_full(&self) -> Result<bool> {
157         if self.trb_count == 1 {
158             // erst == event ring segment table
159             let next_erst_idx = (self.current_segment_index + 1) % self.segment_table_size;
160             let erst_entry = self.read_seg_table_entry(next_erst_idx)?;
161             Ok(self.dequeue_pointer.0 == erst_entry.get_ring_segment_base_address())
162         } else {
163             Ok(self.dequeue_pointer.0 == self.enqueue_pointer.0 + size_of::<Trb>() as u64)
164         }
165     }
166 
167     /// Try to init event ring. Will fail if seg table size/address are invalid.
try_reconfigure_event_ring(&mut self) -> Result<()>168     fn try_reconfigure_event_ring(&mut self) -> Result<()> {
169         if self.segment_table_size == 0 || self.segment_table_base_address.0 == 0 {
170             return Ok(());
171         }
172         self.load_current_seg_table_entry()
173     }
174 
175     // Check if this event ring is inited.
check_inited(&self) -> Result<()>176     fn check_inited(&self) -> Result<()> {
177         if self.segment_table_size == 0
178             || self.segment_table_base_address == GuestAddress(0)
179             || self.enqueue_pointer == GuestAddress(0)
180         {
181             return Err(Error::Uninitialized);
182         }
183         Ok(())
184     }
185 
186     // Load entry of current seg table.
load_current_seg_table_entry(&mut self) -> Result<()>187     fn load_current_seg_table_entry(&mut self) -> Result<()> {
188         let entry = self.read_seg_table_entry(self.current_segment_index)?;
189         self.enqueue_pointer = GuestAddress(entry.get_ring_segment_base_address());
190         self.trb_count = entry.get_ring_segment_size();
191         Ok(())
192     }
193 
194     // Get seg table entry at index.
read_seg_table_entry(&self, index: u16) -> Result<EventRingSegmentTableEntry>195     fn read_seg_table_entry(&self, index: u16) -> Result<EventRingSegmentTableEntry> {
196         let seg_table_addr = self.get_seg_table_addr(index)?;
197         // TODO(jkwang) We can refactor GuestMemory to allow in-place memory operation.
198         self.mem
199             .read_obj_from_addr(seg_table_addr)
200             .map_err(Error::MemoryRead)
201     }
202 
203     // Get seg table addr at index.
get_seg_table_addr(&self, index: u16) -> Result<GuestAddress>204     fn get_seg_table_addr(&self, index: u16) -> Result<GuestAddress> {
205         if index > self.segment_table_size {
206             return Err(Error::BadSegTableIndex(index));
207         }
208         self.segment_table_base_address
209             .checked_add(((size_of::<EventRingSegmentTableEntry>() as u16) * index) as u64)
210             .ok_or(Error::BadSegTableAddress(self.segment_table_base_address))
211     }
212 }
213 
214 #[cfg(test)]
215 mod test {
216     use super::*;
217     use std::mem::size_of;
218 
219     #[test]
test_uninited()220     fn test_uninited() {
221         let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
222         let mut er = EventRing::new(gm.clone());
223         let trb = Trb::new();
224         match er.add_event(trb).err().unwrap() {
225             Error::Uninitialized => {}
226             _ => panic!("unexpected error"),
227         }
228         assert_eq!(er.is_empty(), true);
229         assert_eq!(er.is_full().unwrap(), false);
230     }
231 
232     #[test]
test_event_ring()233     fn test_event_ring() {
234         let trb_size = size_of::<Trb>() as u64;
235         let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
236         let mut er = EventRing::new(gm.clone());
237         let mut st_entries = [EventRingSegmentTableEntry::new(); 3];
238         st_entries[0].set_ring_segment_base_address(0x100);
239         st_entries[0].set_ring_segment_size(3);
240         st_entries[1].set_ring_segment_base_address(0x200);
241         st_entries[1].set_ring_segment_size(3);
242         st_entries[2].set_ring_segment_base_address(0x300);
243         st_entries[2].set_ring_segment_size(3);
244         gm.write_obj_at_addr(st_entries[0], GuestAddress(0x8))
245             .unwrap();
246         gm.write_obj_at_addr(
247             st_entries[1],
248             GuestAddress(0x8 + size_of::<EventRingSegmentTableEntry>() as u64),
249         )
250         .unwrap();
251         gm.write_obj_at_addr(
252             st_entries[2],
253             GuestAddress(0x8 + 2 * size_of::<EventRingSegmentTableEntry>() as u64),
254         )
255         .unwrap();
256         // Init event ring. Must init after segment tables writting.
257         er.set_seg_table_size(3).unwrap();
258         er.set_seg_table_base_addr(GuestAddress(0x8)).unwrap();
259         er.set_dequeue_pointer(GuestAddress(0x100));
260 
261         let mut trb = Trb::new();
262 
263         // Fill first table.
264         trb.set_control(1);
265         assert_eq!(er.is_empty(), true);
266         assert_eq!(er.is_full().unwrap(), false);
267         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
268         assert_eq!(er.is_full().unwrap(), false);
269         assert_eq!(er.is_empty(), false);
270         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x100)).unwrap();
271         assert_eq!(t.get_control(), 1);
272         assert_eq!(t.get_cycle(), true);
273 
274         trb.set_control(2);
275         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
276         assert_eq!(er.is_full().unwrap(), false);
277         assert_eq!(er.is_empty(), false);
278         let t: Trb = gm
279             .read_obj_from_addr(GuestAddress(0x100 + trb_size))
280             .unwrap();
281         assert_eq!(t.get_control(), 2);
282         assert_eq!(t.get_cycle(), true);
283 
284         trb.set_control(3);
285         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
286         assert_eq!(er.is_full().unwrap(), false);
287         assert_eq!(er.is_empty(), false);
288         let t: Trb = gm
289             .read_obj_from_addr(GuestAddress(0x100 + 2 * trb_size))
290             .unwrap();
291         assert_eq!(t.get_control(), 3);
292         assert_eq!(t.get_cycle(), true);
293 
294         // Fill second table.
295         trb.set_control(4);
296         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
297         assert_eq!(er.is_full().unwrap(), false);
298         assert_eq!(er.is_empty(), false);
299         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x200)).unwrap();
300         assert_eq!(t.get_control(), 4);
301         assert_eq!(t.get_cycle(), true);
302 
303         trb.set_control(5);
304         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
305         assert_eq!(er.is_full().unwrap(), false);
306         assert_eq!(er.is_empty(), false);
307         let t: Trb = gm
308             .read_obj_from_addr(GuestAddress(0x200 + trb_size))
309             .unwrap();
310         assert_eq!(t.get_control(), 5);
311         assert_eq!(t.get_cycle(), true);
312 
313         trb.set_control(6);
314         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
315         assert_eq!(er.is_full().unwrap(), false);
316         assert_eq!(er.is_empty(), false);
317         let t: Trb = gm
318             .read_obj_from_addr(GuestAddress(0x200 + 2 * trb_size as u64))
319             .unwrap();
320         assert_eq!(t.get_control(), 6);
321         assert_eq!(t.get_cycle(), true);
322 
323         // Fill third table.
324         trb.set_control(7);
325         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
326         assert_eq!(er.is_full().unwrap(), false);
327         assert_eq!(er.is_empty(), false);
328         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x300)).unwrap();
329         assert_eq!(t.get_control(), 7);
330         assert_eq!(t.get_cycle(), true);
331 
332         trb.set_control(8);
333         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
334         // There is only one last trb. Considered full.
335         assert_eq!(er.is_full().unwrap(), true);
336         assert_eq!(er.is_empty(), false);
337         let t: Trb = gm
338             .read_obj_from_addr(GuestAddress(0x300 + trb_size))
339             .unwrap();
340         assert_eq!(t.get_control(), 8);
341         assert_eq!(t.get_cycle(), true);
342 
343         // Add the last trb will result in error.
344         match er.add_event(trb.clone()) {
345             Err(Error::EventRingFull) => {}
346             _ => panic!("er should be full"),
347         };
348 
349         // Dequeue one trb.
350         er.set_dequeue_pointer(GuestAddress(0x100 + trb_size));
351         assert_eq!(er.is_full().unwrap(), false);
352         assert_eq!(er.is_empty(), false);
353 
354         // Fill the last trb of the third table.
355         trb.set_control(9);
356         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
357         // There is only one last trb. Considered full.
358         assert_eq!(er.is_full().unwrap(), true);
359         assert_eq!(er.is_empty(), false);
360         let t: Trb = gm
361             .read_obj_from_addr(GuestAddress(0x300 + trb_size))
362             .unwrap();
363         assert_eq!(t.get_control(), 8);
364         assert_eq!(t.get_cycle(), true);
365 
366         // Add the last trb will result in error.
367         match er.add_event(trb.clone()) {
368             Err(Error::EventRingFull) => {}
369             _ => panic!("er should be full"),
370         };
371 
372         // Dequeue until empty.
373         er.set_dequeue_pointer(GuestAddress(0x100));
374         assert_eq!(er.is_full().unwrap(), false);
375         assert_eq!(er.is_empty(), true);
376 
377         // Fill first table again.
378         trb.set_control(10);
379         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
380         assert_eq!(er.is_full().unwrap(), false);
381         assert_eq!(er.is_empty(), false);
382         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x100)).unwrap();
383         assert_eq!(t.get_control(), 10);
384         // cycle bit should be reversed.
385         assert_eq!(t.get_cycle(), false);
386 
387         trb.set_control(11);
388         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
389         assert_eq!(er.is_full().unwrap(), false);
390         assert_eq!(er.is_empty(), false);
391         let t: Trb = gm
392             .read_obj_from_addr(GuestAddress(0x100 + trb_size))
393             .unwrap();
394         assert_eq!(t.get_control(), 11);
395         assert_eq!(t.get_cycle(), false);
396 
397         trb.set_control(12);
398         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
399         assert_eq!(er.is_full().unwrap(), false);
400         assert_eq!(er.is_empty(), false);
401         let t: Trb = gm
402             .read_obj_from_addr(GuestAddress(0x100 + 2 * trb_size))
403             .unwrap();
404         assert_eq!(t.get_control(), 12);
405         assert_eq!(t.get_cycle(), false);
406     }
407 }
408