• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use data_model::DataInit;
6 use remain::sorted;
7 use std::mem::size_of;
8 use std::sync::atomic::{fence, Ordering};
9 use thiserror::Error;
10 use vm_memory::{GuestAddress, GuestMemory, GuestMemoryError};
11 
12 use super::xhci_abi::*;
13 
14 #[sorted]
15 #[derive(Error, Debug)]
16 pub enum Error {
17     #[error("event ring has a bad enqueue pointer: {0}")]
18     BadEnqueuePointer(GuestAddress),
19     #[error("event ring has a bad seg table addr: {0}")]
20     BadSegTableAddress(GuestAddress),
21     #[error("event ring has a bad seg table index: {0}")]
22     BadSegTableIndex(u16),
23     #[error("event ring is full")]
24     EventRingFull,
25     #[error("event ring cannot read from guest memory: {0}")]
26     MemoryRead(GuestMemoryError),
27     #[error("event ring cannot write to guest memory: {0}")]
28     MemoryWrite(GuestMemoryError),
29     #[error("event ring is uninitialized")]
30     Uninitialized,
31 }
32 
33 type Result<T> = std::result::Result<T, Error>;
34 
35 /// Event rings are segmented circular buffers used to pass event TRBs from the xHCI device back to
36 /// the guest.  Each event ring is associated with a single interrupter.  See section 4.9.4 of the
37 /// xHCI specification for more details.
38 /// This implementation is only for primary interrupter. Please review xhci spec before using it
39 /// for secondary.
40 pub struct EventRing {
41     mem: GuestMemory,
42     segment_table_size: u16,
43     segment_table_base_address: GuestAddress,
44     current_segment_index: u16,
45     trb_count: u16,
46     enqueue_pointer: GuestAddress,
47     dequeue_pointer: GuestAddress,
48     producer_cycle_state: bool,
49 }
50 
51 impl EventRing {
52     /// Create an empty, uninitialized event ring.
new(mem: GuestMemory) -> Self53     pub fn new(mem: GuestMemory) -> Self {
54         EventRing {
55             mem,
56             segment_table_size: 0,
57             segment_table_base_address: GuestAddress(0),
58             current_segment_index: 0,
59             enqueue_pointer: GuestAddress(0),
60             dequeue_pointer: GuestAddress(0),
61             trb_count: 0,
62             // As specified in xHCI spec 4.9.4, cycle state should be initialized to 1.
63             producer_cycle_state: true,
64         }
65     }
66 
67     /// This function implements left side of xHCI spec, Figure 4-12.
add_event(&mut self, mut trb: Trb) -> Result<()>68     pub fn add_event(&mut self, mut trb: Trb) -> Result<()> {
69         self.check_inited()?;
70         if self.is_full()? {
71             return Err(Error::EventRingFull);
72         }
73         // Event is write twice to avoid race condition.
74         // Guest kernel use cycle bit to check ownership, thus we should write cycle last.
75         trb.set_cycle(!self.producer_cycle_state);
76         self.mem
77             .write_obj_at_addr(trb, self.enqueue_pointer)
78             .map_err(Error::MemoryWrite)?;
79 
80         // Updating the cycle state bit should always happen after updating other parts.
81         fence(Ordering::SeqCst);
82 
83         trb.set_cycle(self.producer_cycle_state);
84 
85         // Offset of cycle state byte.
86         const CYCLE_STATE_OFFSET: usize = 12usize;
87         let data = trb.as_slice();
88         // Trb contains 4 dwords, the last one contains cycle bit.
89         let cycle_bit_dword = &data[CYCLE_STATE_OFFSET..];
90         let address = self.enqueue_pointer;
91         let address = address
92             .checked_add(CYCLE_STATE_OFFSET as u64)
93             .ok_or(Error::BadEnqueuePointer(self.enqueue_pointer))?;
94         self.mem
95             .write_all_at_addr(cycle_bit_dword, address)
96             .map_err(Error::MemoryWrite)?;
97 
98         usb_debug!(
99             "event write to pointer {:#x}, trb_count {}, {}",
100             self.enqueue_pointer.0,
101             self.trb_count,
102             trb
103         );
104         self.enqueue_pointer = match self.enqueue_pointer.checked_add(size_of::<Trb>() as u64) {
105             Some(addr) => addr,
106             None => return Err(Error::BadEnqueuePointer(self.enqueue_pointer)),
107         };
108         self.trb_count -= 1;
109         if self.trb_count == 0 {
110             self.current_segment_index += 1;
111             if self.current_segment_index == self.segment_table_size {
112                 self.producer_cycle_state ^= true;
113                 self.current_segment_index = 0;
114             }
115             self.load_current_seg_table_entry()?;
116         }
117         Ok(())
118     }
119 
120     /// Set segment table size.
set_seg_table_size(&mut self, size: u16) -> Result<()>121     pub fn set_seg_table_size(&mut self, size: u16) -> Result<()> {
122         usb_debug!("event ring seg table size is set to {}", size);
123         self.segment_table_size = size;
124         self.try_reconfigure_event_ring()
125     }
126 
127     /// Set segment table base addr.
set_seg_table_base_addr(&mut self, addr: GuestAddress) -> Result<()>128     pub fn set_seg_table_base_addr(&mut self, addr: GuestAddress) -> Result<()> {
129         usb_debug!("event ring seg table base addr is set to {:#x}", addr.0);
130         self.segment_table_base_address = addr;
131         self.try_reconfigure_event_ring()
132     }
133 
134     /// Set dequeue pointer.
set_dequeue_pointer(&mut self, addr: GuestAddress)135     pub fn set_dequeue_pointer(&mut self, addr: GuestAddress) {
136         usb_debug!("event ring dequeue pointer set to {:#x}", addr.0);
137         self.dequeue_pointer = addr;
138     }
139 
140     /// Check if event ring is empty.
is_empty(&self) -> bool141     pub fn is_empty(&self) -> bool {
142         self.enqueue_pointer == self.dequeue_pointer
143     }
144 
145     /// Event ring is considered full when there is only space for one last TRB. In this case, xHC
146     /// should write an error Trb and do a bunch of handlings. See spec, figure 4-12 for more
147     /// details.
148     /// For now, we just check event ring full and fail (as it's unlikely to happen).
is_full(&self) -> Result<bool>149     pub fn is_full(&self) -> Result<bool> {
150         if self.trb_count == 1 {
151             // erst == event ring segment table
152             let next_erst_idx = (self.current_segment_index + 1) % self.segment_table_size;
153             let erst_entry = self.read_seg_table_entry(next_erst_idx)?;
154             Ok(self.dequeue_pointer.0 == erst_entry.get_ring_segment_base_address())
155         } else {
156             Ok(self.dequeue_pointer.0 == self.enqueue_pointer.0 + size_of::<Trb>() as u64)
157         }
158     }
159 
160     /// Try to init event ring. Will fail if seg table size/address are invalid.
try_reconfigure_event_ring(&mut self) -> Result<()>161     fn try_reconfigure_event_ring(&mut self) -> Result<()> {
162         if self.segment_table_size == 0 || self.segment_table_base_address.0 == 0 {
163             return Ok(());
164         }
165         self.load_current_seg_table_entry()
166     }
167 
168     // Check if this event ring is inited.
check_inited(&self) -> Result<()>169     fn check_inited(&self) -> Result<()> {
170         if self.segment_table_size == 0
171             || self.segment_table_base_address == GuestAddress(0)
172             || self.enqueue_pointer == GuestAddress(0)
173         {
174             return Err(Error::Uninitialized);
175         }
176         Ok(())
177     }
178 
179     // Load entry of current seg table.
load_current_seg_table_entry(&mut self) -> Result<()>180     fn load_current_seg_table_entry(&mut self) -> Result<()> {
181         let entry = self.read_seg_table_entry(self.current_segment_index)?;
182         self.enqueue_pointer = GuestAddress(entry.get_ring_segment_base_address());
183         self.trb_count = entry.get_ring_segment_size();
184         Ok(())
185     }
186 
187     // Get seg table entry at index.
read_seg_table_entry(&self, index: u16) -> Result<EventRingSegmentTableEntry>188     fn read_seg_table_entry(&self, index: u16) -> Result<EventRingSegmentTableEntry> {
189         let seg_table_addr = self.get_seg_table_addr(index)?;
190         // TODO(jkwang) We can refactor GuestMemory to allow in-place memory operation.
191         self.mem
192             .read_obj_from_addr(seg_table_addr)
193             .map_err(Error::MemoryRead)
194     }
195 
196     // Get seg table addr at index.
get_seg_table_addr(&self, index: u16) -> Result<GuestAddress>197     fn get_seg_table_addr(&self, index: u16) -> Result<GuestAddress> {
198         if index > self.segment_table_size {
199             return Err(Error::BadSegTableIndex(index));
200         }
201         self.segment_table_base_address
202             .checked_add(((size_of::<EventRingSegmentTableEntry>() as u16) * index) as u64)
203             .ok_or(Error::BadSegTableAddress(self.segment_table_base_address))
204     }
205 }
206 
207 #[cfg(test)]
208 mod test {
209     use super::*;
210     use std::mem::size_of;
211 
212     #[test]
test_uninited()213     fn test_uninited() {
214         let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
215         let mut er = EventRing::new(gm);
216         let trb = Trb::new();
217         match er.add_event(trb).err().unwrap() {
218             Error::Uninitialized => {}
219             _ => panic!("unexpected error"),
220         }
221         assert_eq!(er.is_empty(), true);
222         assert_eq!(er.is_full().unwrap(), false);
223     }
224 
225     #[test]
test_event_ring()226     fn test_event_ring() {
227         let trb_size = size_of::<Trb>() as u64;
228         let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
229         let mut er = EventRing::new(gm.clone());
230         let mut st_entries = [EventRingSegmentTableEntry::new(); 3];
231         st_entries[0].set_ring_segment_base_address(0x100);
232         st_entries[0].set_ring_segment_size(3);
233         st_entries[1].set_ring_segment_base_address(0x200);
234         st_entries[1].set_ring_segment_size(3);
235         st_entries[2].set_ring_segment_base_address(0x300);
236         st_entries[2].set_ring_segment_size(3);
237         gm.write_obj_at_addr(st_entries[0], GuestAddress(0x8))
238             .unwrap();
239         gm.write_obj_at_addr(
240             st_entries[1],
241             GuestAddress(0x8 + size_of::<EventRingSegmentTableEntry>() as u64),
242         )
243         .unwrap();
244         gm.write_obj_at_addr(
245             st_entries[2],
246             GuestAddress(0x8 + 2 * size_of::<EventRingSegmentTableEntry>() as u64),
247         )
248         .unwrap();
249         // Init event ring. Must init after segment tables writting.
250         er.set_seg_table_size(3).unwrap();
251         er.set_seg_table_base_addr(GuestAddress(0x8)).unwrap();
252         er.set_dequeue_pointer(GuestAddress(0x100));
253 
254         let mut trb = Trb::new();
255 
256         // Fill first table.
257         trb.set_control(1);
258         assert_eq!(er.is_empty(), true);
259         assert_eq!(er.is_full().unwrap(), false);
260         assert!(er.add_event(trb).is_ok());
261         assert_eq!(er.is_full().unwrap(), false);
262         assert_eq!(er.is_empty(), false);
263         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x100)).unwrap();
264         assert_eq!(t.get_control(), 1);
265         assert_eq!(t.get_cycle(), true);
266 
267         trb.set_control(2);
268         assert!(er.add_event(trb).is_ok());
269         assert_eq!(er.is_full().unwrap(), false);
270         assert_eq!(er.is_empty(), false);
271         let t: Trb = gm
272             .read_obj_from_addr(GuestAddress(0x100 + trb_size))
273             .unwrap();
274         assert_eq!(t.get_control(), 2);
275         assert_eq!(t.get_cycle(), true);
276 
277         trb.set_control(3);
278         assert!(er.add_event(trb).is_ok());
279         assert_eq!(er.is_full().unwrap(), false);
280         assert_eq!(er.is_empty(), false);
281         let t: Trb = gm
282             .read_obj_from_addr(GuestAddress(0x100 + 2 * trb_size))
283             .unwrap();
284         assert_eq!(t.get_control(), 3);
285         assert_eq!(t.get_cycle(), true);
286 
287         // Fill second table.
288         trb.set_control(4);
289         assert!(er.add_event(trb).is_ok());
290         assert_eq!(er.is_full().unwrap(), false);
291         assert_eq!(er.is_empty(), false);
292         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x200)).unwrap();
293         assert_eq!(t.get_control(), 4);
294         assert_eq!(t.get_cycle(), true);
295 
296         trb.set_control(5);
297         assert!(er.add_event(trb).is_ok());
298         assert_eq!(er.is_full().unwrap(), false);
299         assert_eq!(er.is_empty(), false);
300         let t: Trb = gm
301             .read_obj_from_addr(GuestAddress(0x200 + trb_size))
302             .unwrap();
303         assert_eq!(t.get_control(), 5);
304         assert_eq!(t.get_cycle(), true);
305 
306         trb.set_control(6);
307         assert!(er.add_event(trb).is_ok());
308         assert_eq!(er.is_full().unwrap(), false);
309         assert_eq!(er.is_empty(), false);
310         let t: Trb = gm
311             .read_obj_from_addr(GuestAddress(0x200 + 2 * trb_size as u64))
312             .unwrap();
313         assert_eq!(t.get_control(), 6);
314         assert_eq!(t.get_cycle(), true);
315 
316         // Fill third table.
317         trb.set_control(7);
318         assert!(er.add_event(trb).is_ok());
319         assert_eq!(er.is_full().unwrap(), false);
320         assert_eq!(er.is_empty(), false);
321         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x300)).unwrap();
322         assert_eq!(t.get_control(), 7);
323         assert_eq!(t.get_cycle(), true);
324 
325         trb.set_control(8);
326         assert!(er.add_event(trb).is_ok());
327         // There is only one last trb. Considered full.
328         assert_eq!(er.is_full().unwrap(), true);
329         assert_eq!(er.is_empty(), false);
330         let t: Trb = gm
331             .read_obj_from_addr(GuestAddress(0x300 + trb_size))
332             .unwrap();
333         assert_eq!(t.get_control(), 8);
334         assert_eq!(t.get_cycle(), true);
335 
336         // Add the last trb will result in error.
337         match er.add_event(trb) {
338             Err(Error::EventRingFull) => {}
339             _ => panic!("er should be full"),
340         };
341 
342         // Dequeue one trb.
343         er.set_dequeue_pointer(GuestAddress(0x100 + trb_size));
344         assert_eq!(er.is_full().unwrap(), false);
345         assert_eq!(er.is_empty(), false);
346 
347         // Fill the last trb of the third table.
348         trb.set_control(9);
349         assert!(er.add_event(trb).is_ok());
350         // There is only one last trb. Considered full.
351         assert_eq!(er.is_full().unwrap(), true);
352         assert_eq!(er.is_empty(), false);
353         let t: Trb = gm
354             .read_obj_from_addr(GuestAddress(0x300 + trb_size))
355             .unwrap();
356         assert_eq!(t.get_control(), 8);
357         assert_eq!(t.get_cycle(), true);
358 
359         // Add the last trb will result in error.
360         match er.add_event(trb) {
361             Err(Error::EventRingFull) => {}
362             _ => panic!("er should be full"),
363         };
364 
365         // Dequeue until empty.
366         er.set_dequeue_pointer(GuestAddress(0x100));
367         assert_eq!(er.is_full().unwrap(), false);
368         assert_eq!(er.is_empty(), true);
369 
370         // Fill first table again.
371         trb.set_control(10);
372         assert!(er.add_event(trb).is_ok());
373         assert_eq!(er.is_full().unwrap(), false);
374         assert_eq!(er.is_empty(), false);
375         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x100)).unwrap();
376         assert_eq!(t.get_control(), 10);
377         // cycle bit should be reversed.
378         assert_eq!(t.get_cycle(), false);
379 
380         trb.set_control(11);
381         assert!(er.add_event(trb).is_ok());
382         assert_eq!(er.is_full().unwrap(), false);
383         assert_eq!(er.is_empty(), false);
384         let t: Trb = gm
385             .read_obj_from_addr(GuestAddress(0x100 + trb_size))
386             .unwrap();
387         assert_eq!(t.get_control(), 11);
388         assert_eq!(t.get_cycle(), false);
389 
390         trb.set_control(12);
391         assert!(er.add_event(trb).is_ok());
392         assert_eq!(er.is_full().unwrap(), false);
393         assert_eq!(er.is_empty(), false);
394         let t: Trb = gm
395             .read_obj_from_addr(GuestAddress(0x100 + 2 * trb_size))
396             .unwrap();
397         assert_eq!(t.get_control(), 12);
398         assert_eq!(t.get_cycle(), false);
399     }
400 }
401