• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use data_model::DataInit;
6 use std;
7 use std::fmt::{self, Display};
8 use std::mem::size_of;
9 use std::sync::atomic::{fence, Ordering};
10 use sys_util::{GuestAddress, GuestMemory, GuestMemoryError};
11 
12 use super::xhci_abi::*;
13 
14 #[derive(Debug)]
15 pub enum Error {
16     Uninitialized,
17     EventRingFull,
18     BadEnqueuePointer(GuestAddress),
19     BadSegTableIndex(u16),
20     BadSegTableAddress(GuestAddress),
21     MemoryRead(GuestMemoryError),
22     MemoryWrite(GuestMemoryError),
23 }
24 
25 type Result<T> = std::result::Result<T, Error>;
26 
27 impl Display for Error {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result28     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
29         use self::Error::*;
30 
31         match self {
32             Uninitialized => write!(f, "event ring is uninitialized"),
33             EventRingFull => write!(f, "event ring is full"),
34             BadEnqueuePointer(addr) => write!(f, "event ring has a bad enqueue pointer: {}", addr),
35             BadSegTableIndex(i) => write!(f, "event ring has a bad seg table index: {}", i),
36             BadSegTableAddress(addr) => write!(f, "event ring has a bad seg table addr: {}", addr),
37             MemoryRead(e) => write!(f, "event ring cannot read from guest memory: {}", e),
38             MemoryWrite(e) => write!(f, "event ring cannot write to guest memory: {}", e),
39         }
40     }
41 }
42 
43 /// Event rings are segmented circular buffers used to pass event TRBs from the xHCI device back to
44 /// the guest.  Each event ring is associated with a single interrupter.  See section 4.9.4 of the
45 /// xHCI specification for more details.
46 /// This implementation is only for primary interrupter. Please review xhci spec before using it
47 /// for secondary.
48 pub struct EventRing {
49     mem: GuestMemory,
50     segment_table_size: u16,
51     segment_table_base_address: GuestAddress,
52     current_segment_index: u16,
53     trb_count: u16,
54     enqueue_pointer: GuestAddress,
55     dequeue_pointer: GuestAddress,
56     producer_cycle_state: bool,
57 }
58 
59 impl EventRing {
60     /// Create an empty, uninitialized event ring.
new(mem: GuestMemory) -> Self61     pub fn new(mem: GuestMemory) -> Self {
62         EventRing {
63             mem,
64             segment_table_size: 0,
65             segment_table_base_address: GuestAddress(0),
66             current_segment_index: 0,
67             enqueue_pointer: GuestAddress(0),
68             dequeue_pointer: GuestAddress(0),
69             trb_count: 0,
70             // As specified in xHCI spec 4.9.4, cycle state should be initialized to 1.
71             producer_cycle_state: true,
72         }
73     }
74 
75     /// This function implements left side of xHCI spec, Figure 4-12.
add_event(&mut self, mut trb: Trb) -> Result<()>76     pub fn add_event(&mut self, mut trb: Trb) -> Result<()> {
77         self.check_inited()?;
78         if self.is_full()? {
79             return Err(Error::EventRingFull);
80         }
81         // Event is write twice to avoid race condition.
82         // Guest kernel use cycle bit to check ownership, thus we should write cycle last.
83         trb.set_cycle(!self.producer_cycle_state);
84         self.mem
85             .write_obj_at_addr(trb, self.enqueue_pointer)
86             .map_err(Error::MemoryWrite)?;
87 
88         // Updating the cycle state bit should always happen after updating other parts.
89         fence(Ordering::SeqCst);
90 
91         trb.set_cycle(self.producer_cycle_state);
92 
93         // Offset of cycle state byte.
94         const CYCLE_STATE_OFFSET: usize = 12usize;
95         let data = trb.as_slice();
96         // Trb contains 4 dwords, the last one contains cycle bit.
97         let cycle_bit_dword = &data[CYCLE_STATE_OFFSET..];
98         let address = self.enqueue_pointer;
99         let address = address
100             .checked_add(CYCLE_STATE_OFFSET as u64)
101             .ok_or(Error::BadEnqueuePointer(self.enqueue_pointer))?;
102         self.mem
103             .write_all_at_addr(cycle_bit_dword, address)
104             .map_err(Error::MemoryWrite)?;
105 
106         usb_debug!(
107             "event write to pointer {:#x}, trb_count {}, {}",
108             self.enqueue_pointer.0,
109             self.trb_count,
110             trb
111         );
112         self.enqueue_pointer = match self.enqueue_pointer.checked_add(size_of::<Trb>() as u64) {
113             Some(addr) => addr,
114             None => return Err(Error::BadEnqueuePointer(self.enqueue_pointer)),
115         };
116         self.trb_count -= 1;
117         if self.trb_count == 0 {
118             self.current_segment_index += 1;
119             if self.current_segment_index == self.segment_table_size {
120                 self.producer_cycle_state ^= true;
121                 self.current_segment_index = 0;
122             }
123             self.load_current_seg_table_entry()?;
124         }
125         Ok(())
126     }
127 
128     /// Set segment table size.
set_seg_table_size(&mut self, size: u16) -> Result<()>129     pub fn set_seg_table_size(&mut self, size: u16) -> Result<()> {
130         usb_debug!("event ring seg table size is set to {}", size);
131         self.segment_table_size = size;
132         self.try_reconfigure_event_ring()
133     }
134 
135     /// Set segment table base addr.
set_seg_table_base_addr(&mut self, addr: GuestAddress) -> Result<()>136     pub fn set_seg_table_base_addr(&mut self, addr: GuestAddress) -> Result<()> {
137         usb_debug!("event ring seg table base addr is set to {:#x}", addr.0);
138         self.segment_table_base_address = addr;
139         self.try_reconfigure_event_ring()
140     }
141 
142     /// Set dequeue pointer.
set_dequeue_pointer(&mut self, addr: GuestAddress)143     pub fn set_dequeue_pointer(&mut self, addr: GuestAddress) {
144         usb_debug!("event ring dequeue pointer set to {:#x}", addr.0);
145         self.dequeue_pointer = addr;
146     }
147 
148     /// Get the enqueue pointer.
get_enqueue_pointer(&self) -> GuestAddress149     pub fn get_enqueue_pointer(&self) -> GuestAddress {
150         self.enqueue_pointer
151     }
152 
153     /// Check if event ring is empty.
is_empty(&self) -> bool154     pub fn is_empty(&self) -> bool {
155         self.enqueue_pointer == self.dequeue_pointer
156     }
157 
158     /// Event ring is considered full when there is only space for one last TRB. In this case, xHC
159     /// should write an error Trb and do a bunch of handlings. See spec, figure 4-12 for more
160     /// details.
161     /// For now, we just check event ring full and fail (as it's unlikely to happen).
is_full(&self) -> Result<bool>162     pub fn is_full(&self) -> Result<bool> {
163         if self.trb_count == 1 {
164             // erst == event ring segment table
165             let next_erst_idx = (self.current_segment_index + 1) % self.segment_table_size;
166             let erst_entry = self.read_seg_table_entry(next_erst_idx)?;
167             Ok(self.dequeue_pointer.0 == erst_entry.get_ring_segment_base_address())
168         } else {
169             Ok(self.dequeue_pointer.0 == self.enqueue_pointer.0 + size_of::<Trb>() as u64)
170         }
171     }
172 
173     /// Try to init event ring. Will fail if seg table size/address are invalid.
try_reconfigure_event_ring(&mut self) -> Result<()>174     fn try_reconfigure_event_ring(&mut self) -> Result<()> {
175         if self.segment_table_size == 0 || self.segment_table_base_address.0 == 0 {
176             return Ok(());
177         }
178         self.load_current_seg_table_entry()
179     }
180 
181     // Check if this event ring is inited.
check_inited(&self) -> Result<()>182     fn check_inited(&self) -> Result<()> {
183         if self.segment_table_size == 0
184             || self.segment_table_base_address == GuestAddress(0)
185             || self.enqueue_pointer == GuestAddress(0)
186         {
187             return Err(Error::Uninitialized);
188         }
189         Ok(())
190     }
191 
192     // Load entry of current seg table.
load_current_seg_table_entry(&mut self) -> Result<()>193     fn load_current_seg_table_entry(&mut self) -> Result<()> {
194         let entry = self.read_seg_table_entry(self.current_segment_index)?;
195         self.enqueue_pointer = GuestAddress(entry.get_ring_segment_base_address());
196         self.trb_count = entry.get_ring_segment_size();
197         Ok(())
198     }
199 
200     // Get seg table entry at index.
read_seg_table_entry(&self, index: u16) -> Result<EventRingSegmentTableEntry>201     fn read_seg_table_entry(&self, index: u16) -> Result<EventRingSegmentTableEntry> {
202         let seg_table_addr = self.get_seg_table_addr(index)?;
203         // TODO(jkwang) We can refactor GuestMemory to allow in-place memory operation.
204         self.mem
205             .read_obj_from_addr(seg_table_addr)
206             .map_err(Error::MemoryRead)
207     }
208 
209     // Get seg table addr at index.
get_seg_table_addr(&self, index: u16) -> Result<GuestAddress>210     fn get_seg_table_addr(&self, index: u16) -> Result<GuestAddress> {
211         if index > self.segment_table_size {
212             return Err(Error::BadSegTableIndex(index));
213         }
214         self.segment_table_base_address
215             .checked_add(((size_of::<EventRingSegmentTableEntry>() as u16) * index) as u64)
216             .ok_or(Error::BadSegTableAddress(self.segment_table_base_address))
217     }
218 }
219 
220 #[cfg(test)]
221 mod test {
222     use super::*;
223     use std::mem::size_of;
224 
225     #[test]
test_uninited()226     fn test_uninited() {
227         let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
228         let mut er = EventRing::new(gm.clone());
229         let trb = Trb::new();
230         match er.add_event(trb).err().unwrap() {
231             Error::Uninitialized => {}
232             _ => panic!("unexpected error"),
233         }
234         assert_eq!(er.is_empty(), true);
235         assert_eq!(er.is_full().unwrap(), false);
236     }
237 
238     #[test]
test_event_ring()239     fn test_event_ring() {
240         let trb_size = size_of::<Trb>() as u64;
241         let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
242         let mut er = EventRing::new(gm.clone());
243         let mut st_entries = [EventRingSegmentTableEntry::new(); 3];
244         st_entries[0].set_ring_segment_base_address(0x100);
245         st_entries[0].set_ring_segment_size(3);
246         st_entries[1].set_ring_segment_base_address(0x200);
247         st_entries[1].set_ring_segment_size(3);
248         st_entries[2].set_ring_segment_base_address(0x300);
249         st_entries[2].set_ring_segment_size(3);
250         gm.write_obj_at_addr(st_entries[0], GuestAddress(0x8))
251             .unwrap();
252         gm.write_obj_at_addr(
253             st_entries[1],
254             GuestAddress(0x8 + size_of::<EventRingSegmentTableEntry>() as u64),
255         )
256         .unwrap();
257         gm.write_obj_at_addr(
258             st_entries[2],
259             GuestAddress(0x8 + 2 * size_of::<EventRingSegmentTableEntry>() as u64),
260         )
261         .unwrap();
262         // Init event ring. Must init after segment tables writting.
263         er.set_seg_table_size(3).unwrap();
264         er.set_seg_table_base_addr(GuestAddress(0x8)).unwrap();
265         er.set_dequeue_pointer(GuestAddress(0x100));
266 
267         let mut trb = Trb::new();
268 
269         // Fill first table.
270         trb.set_control(1);
271         assert_eq!(er.is_empty(), true);
272         assert_eq!(er.is_full().unwrap(), false);
273         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
274         assert_eq!(er.is_full().unwrap(), false);
275         assert_eq!(er.is_empty(), false);
276         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x100)).unwrap();
277         assert_eq!(t.get_control(), 1);
278         assert_eq!(t.get_cycle(), true);
279 
280         trb.set_control(2);
281         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
282         assert_eq!(er.is_full().unwrap(), false);
283         assert_eq!(er.is_empty(), false);
284         let t: Trb = gm
285             .read_obj_from_addr(GuestAddress(0x100 + trb_size))
286             .unwrap();
287         assert_eq!(t.get_control(), 2);
288         assert_eq!(t.get_cycle(), true);
289 
290         trb.set_control(3);
291         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
292         assert_eq!(er.is_full().unwrap(), false);
293         assert_eq!(er.is_empty(), false);
294         let t: Trb = gm
295             .read_obj_from_addr(GuestAddress(0x100 + 2 * trb_size))
296             .unwrap();
297         assert_eq!(t.get_control(), 3);
298         assert_eq!(t.get_cycle(), true);
299 
300         // Fill second table.
301         trb.set_control(4);
302         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
303         assert_eq!(er.is_full().unwrap(), false);
304         assert_eq!(er.is_empty(), false);
305         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x200)).unwrap();
306         assert_eq!(t.get_control(), 4);
307         assert_eq!(t.get_cycle(), true);
308 
309         trb.set_control(5);
310         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
311         assert_eq!(er.is_full().unwrap(), false);
312         assert_eq!(er.is_empty(), false);
313         let t: Trb = gm
314             .read_obj_from_addr(GuestAddress(0x200 + trb_size))
315             .unwrap();
316         assert_eq!(t.get_control(), 5);
317         assert_eq!(t.get_cycle(), true);
318 
319         trb.set_control(6);
320         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
321         assert_eq!(er.is_full().unwrap(), false);
322         assert_eq!(er.is_empty(), false);
323         let t: Trb = gm
324             .read_obj_from_addr(GuestAddress(0x200 + 2 * trb_size as u64))
325             .unwrap();
326         assert_eq!(t.get_control(), 6);
327         assert_eq!(t.get_cycle(), true);
328 
329         // Fill third table.
330         trb.set_control(7);
331         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
332         assert_eq!(er.is_full().unwrap(), false);
333         assert_eq!(er.is_empty(), false);
334         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x300)).unwrap();
335         assert_eq!(t.get_control(), 7);
336         assert_eq!(t.get_cycle(), true);
337 
338         trb.set_control(8);
339         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
340         // There is only one last trb. Considered full.
341         assert_eq!(er.is_full().unwrap(), true);
342         assert_eq!(er.is_empty(), false);
343         let t: Trb = gm
344             .read_obj_from_addr(GuestAddress(0x300 + trb_size))
345             .unwrap();
346         assert_eq!(t.get_control(), 8);
347         assert_eq!(t.get_cycle(), true);
348 
349         // Add the last trb will result in error.
350         match er.add_event(trb.clone()) {
351             Err(Error::EventRingFull) => {}
352             _ => panic!("er should be full"),
353         };
354 
355         // Dequeue one trb.
356         er.set_dequeue_pointer(GuestAddress(0x100 + trb_size));
357         assert_eq!(er.is_full().unwrap(), false);
358         assert_eq!(er.is_empty(), false);
359 
360         // Fill the last trb of the third table.
361         trb.set_control(9);
362         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
363         // There is only one last trb. Considered full.
364         assert_eq!(er.is_full().unwrap(), true);
365         assert_eq!(er.is_empty(), false);
366         let t: Trb = gm
367             .read_obj_from_addr(GuestAddress(0x300 + trb_size))
368             .unwrap();
369         assert_eq!(t.get_control(), 8);
370         assert_eq!(t.get_cycle(), true);
371 
372         // Add the last trb will result in error.
373         match er.add_event(trb.clone()) {
374             Err(Error::EventRingFull) => {}
375             _ => panic!("er should be full"),
376         };
377 
378         // Dequeue until empty.
379         er.set_dequeue_pointer(GuestAddress(0x100));
380         assert_eq!(er.is_full().unwrap(), false);
381         assert_eq!(er.is_empty(), true);
382 
383         // Fill first table again.
384         trb.set_control(10);
385         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
386         assert_eq!(er.is_full().unwrap(), false);
387         assert_eq!(er.is_empty(), false);
388         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x100)).unwrap();
389         assert_eq!(t.get_control(), 10);
390         // cycle bit should be reversed.
391         assert_eq!(t.get_cycle(), false);
392 
393         trb.set_control(11);
394         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
395         assert_eq!(er.is_full().unwrap(), false);
396         assert_eq!(er.is_empty(), false);
397         let t: Trb = gm
398             .read_obj_from_addr(GuestAddress(0x100 + trb_size))
399             .unwrap();
400         assert_eq!(t.get_control(), 11);
401         assert_eq!(t.get_cycle(), false);
402 
403         trb.set_control(12);
404         assert_eq!(er.add_event(trb.clone()).unwrap(), ());
405         assert_eq!(er.is_full().unwrap(), false);
406         assert_eq!(er.is_empty(), false);
407         let t: Trb = gm
408             .read_obj_from_addr(GuestAddress(0x100 + 2 * trb_size))
409             .unwrap();
410         assert_eq!(t.get_control(), 12);
411         assert_eq!(t.get_cycle(), false);
412     }
413 }
414