• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::mem::size_of;
6 use std::sync::atomic::fence;
7 use std::sync::atomic::Ordering;
8 
9 use remain::sorted;
10 use thiserror::Error;
11 use vm_memory::GuestAddress;
12 use vm_memory::GuestMemory;
13 use vm_memory::GuestMemoryError;
14 use zerocopy::AsBytes;
15 
16 use super::xhci_abi::*;
17 
18 #[sorted]
19 #[derive(Error, Debug)]
20 pub enum Error {
21     #[error("event ring has a bad enqueue pointer: {0}")]
22     BadEnqueuePointer(GuestAddress),
23     #[error("event ring has a bad seg table addr: {0}")]
24     BadSegTableAddress(GuestAddress),
25     #[error("event ring has a bad seg table index: {0}")]
26     BadSegTableIndex(u16),
27     #[error("event ring is full")]
28     EventRingFull,
29     #[error("event ring cannot read from guest memory: {0}")]
30     MemoryRead(GuestMemoryError),
31     #[error("event ring cannot write to guest memory: {0}")]
32     MemoryWrite(GuestMemoryError),
33     #[error("event ring is uninitialized")]
34     Uninitialized,
35 }
36 
37 type Result<T> = std::result::Result<T, Error>;
38 
39 /// Event rings are segmented circular buffers used to pass event TRBs from the xHCI device back to
40 /// the guest.  Each event ring is associated with a single interrupter.  See section 4.9.4 of the
41 /// xHCI specification for more details.
42 /// This implementation is only for primary interrupter. Please review xhci spec before using it
43 /// for secondary.
44 pub struct EventRing {
45     mem: GuestMemory,
46     segment_table_size: u16,
47     segment_table_base_address: GuestAddress,
48     current_segment_index: u16,
49     trb_count: u16,
50     enqueue_pointer: GuestAddress,
51     dequeue_pointer: GuestAddress,
52     producer_cycle_state: bool,
53 }
54 
55 impl EventRing {
56     /// Create an empty, uninitialized event ring.
new(mem: GuestMemory) -> Self57     pub fn new(mem: GuestMemory) -> Self {
58         EventRing {
59             mem,
60             segment_table_size: 0,
61             segment_table_base_address: GuestAddress(0),
62             current_segment_index: 0,
63             enqueue_pointer: GuestAddress(0),
64             dequeue_pointer: GuestAddress(0),
65             trb_count: 0,
66             // As specified in xHCI spec 4.9.4, cycle state should be initialized to 1.
67             producer_cycle_state: true,
68         }
69     }
70 
71     /// This function implements left side of xHCI spec, Figure 4-12.
add_event(&mut self, mut trb: Trb) -> Result<()>72     pub fn add_event(&mut self, mut trb: Trb) -> Result<()> {
73         self.check_inited()?;
74         if self.is_full()? {
75             return Err(Error::EventRingFull);
76         }
77         // Event is write twice to avoid race condition.
78         // Guest kernel use cycle bit to check ownership, thus we should write cycle last.
79         trb.set_cycle(!self.producer_cycle_state);
80         self.mem
81             .write_obj_at_addr(trb, self.enqueue_pointer)
82             .map_err(Error::MemoryWrite)?;
83 
84         // Updating the cycle state bit should always happen after updating other parts.
85         fence(Ordering::SeqCst);
86 
87         trb.set_cycle(self.producer_cycle_state);
88 
89         // Offset of cycle state byte.
90         const CYCLE_STATE_OFFSET: usize = 12usize;
91         let data = trb.as_bytes();
92         // Trb contains 4 dwords, the last one contains cycle bit.
93         let cycle_bit_dword = &data[CYCLE_STATE_OFFSET..];
94         let address = self.enqueue_pointer;
95         let address = address
96             .checked_add(CYCLE_STATE_OFFSET as u64)
97             .ok_or(Error::BadEnqueuePointer(self.enqueue_pointer))?;
98         self.mem
99             .write_all_at_addr(cycle_bit_dword, address)
100             .map_err(Error::MemoryWrite)?;
101 
102         usb_debug!(
103             "event write to pointer {:#x}, trb_count {}, {}",
104             self.enqueue_pointer.0,
105             self.trb_count,
106             trb
107         );
108         self.enqueue_pointer = match self.enqueue_pointer.checked_add(size_of::<Trb>() as u64) {
109             Some(addr) => addr,
110             None => return Err(Error::BadEnqueuePointer(self.enqueue_pointer)),
111         };
112         self.trb_count -= 1;
113         if self.trb_count == 0 {
114             self.current_segment_index += 1;
115             if self.current_segment_index == self.segment_table_size {
116                 self.producer_cycle_state ^= true;
117                 self.current_segment_index = 0;
118             }
119             self.load_current_seg_table_entry()?;
120         }
121         Ok(())
122     }
123 
124     /// Set segment table size.
set_seg_table_size(&mut self, size: u16) -> Result<()>125     pub fn set_seg_table_size(&mut self, size: u16) -> Result<()> {
126         usb_debug!("event ring seg table size is set to {}", size);
127         self.segment_table_size = size;
128         self.try_reconfigure_event_ring()
129     }
130 
131     /// Set segment table base addr.
set_seg_table_base_addr(&mut self, addr: GuestAddress) -> Result<()>132     pub fn set_seg_table_base_addr(&mut self, addr: GuestAddress) -> Result<()> {
133         usb_debug!("event ring seg table base addr is set to {:#x}", addr.0);
134         self.segment_table_base_address = addr;
135         self.try_reconfigure_event_ring()
136     }
137 
138     /// Set dequeue pointer.
set_dequeue_pointer(&mut self, addr: GuestAddress)139     pub fn set_dequeue_pointer(&mut self, addr: GuestAddress) {
140         usb_debug!("event ring dequeue pointer set to {:#x}", addr.0);
141         self.dequeue_pointer = addr;
142     }
143 
144     /// Check if event ring is empty.
is_empty(&self) -> bool145     pub fn is_empty(&self) -> bool {
146         self.enqueue_pointer == self.dequeue_pointer
147     }
148 
149     /// Event ring is considered full when there is only space for one last TRB. In this case, xHC
150     /// should write an error Trb and do a bunch of handlings. See spec, figure 4-12 for more
151     /// details.
152     /// For now, we just check event ring full and fail (as it's unlikely to happen).
is_full(&self) -> Result<bool>153     pub fn is_full(&self) -> Result<bool> {
154         if self.trb_count == 1 {
155             // erst == event ring segment table
156             let next_erst_idx = (self.current_segment_index + 1) % self.segment_table_size;
157             let erst_entry = self.read_seg_table_entry(next_erst_idx)?;
158             Ok(self.dequeue_pointer.0 == erst_entry.get_ring_segment_base_address())
159         } else {
160             Ok(self.dequeue_pointer.0 == self.enqueue_pointer.0 + size_of::<Trb>() as u64)
161         }
162     }
163 
164     /// Try to init event ring. Will fail if seg table size/address are invalid.
try_reconfigure_event_ring(&mut self) -> Result<()>165     fn try_reconfigure_event_ring(&mut self) -> Result<()> {
166         if self.segment_table_size == 0 || self.segment_table_base_address.0 == 0 {
167             return Ok(());
168         }
169         self.load_current_seg_table_entry()
170     }
171 
172     // Check if this event ring is inited.
check_inited(&self) -> Result<()>173     fn check_inited(&self) -> Result<()> {
174         if self.segment_table_size == 0
175             || self.segment_table_base_address == GuestAddress(0)
176             || self.enqueue_pointer == GuestAddress(0)
177         {
178             return Err(Error::Uninitialized);
179         }
180         Ok(())
181     }
182 
183     // Load entry of current seg table.
load_current_seg_table_entry(&mut self) -> Result<()>184     fn load_current_seg_table_entry(&mut self) -> Result<()> {
185         let entry = self.read_seg_table_entry(self.current_segment_index)?;
186         self.enqueue_pointer = GuestAddress(entry.get_ring_segment_base_address());
187         self.trb_count = entry.get_ring_segment_size();
188         Ok(())
189     }
190 
191     // Get seg table entry at index.
read_seg_table_entry(&self, index: u16) -> Result<EventRingSegmentTableEntry>192     fn read_seg_table_entry(&self, index: u16) -> Result<EventRingSegmentTableEntry> {
193         let seg_table_addr = self.get_seg_table_addr(index)?;
194         // TODO(jkwang) We can refactor GuestMemory to allow in-place memory operation.
195         self.mem
196             .read_obj_from_addr(seg_table_addr)
197             .map_err(Error::MemoryRead)
198     }
199 
200     // Get seg table addr at index.
get_seg_table_addr(&self, index: u16) -> Result<GuestAddress>201     fn get_seg_table_addr(&self, index: u16) -> Result<GuestAddress> {
202         if index > self.segment_table_size {
203             return Err(Error::BadSegTableIndex(index));
204         }
205         self.segment_table_base_address
206             .checked_add(((size_of::<EventRingSegmentTableEntry>() as u16) * index) as u64)
207             .ok_or(Error::BadSegTableAddress(self.segment_table_base_address))
208     }
209 }
210 
211 #[cfg(test)]
212 mod test {
213     use std::mem::size_of;
214 
215     use super::*;
216 
217     #[test]
test_uninited()218     fn test_uninited() {
219         let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
220         let mut er = EventRing::new(gm);
221         let trb = Trb::new();
222         match er.add_event(trb).err().unwrap() {
223             Error::Uninitialized => {}
224             _ => panic!("unexpected error"),
225         }
226         assert_eq!(er.is_empty(), true);
227         assert_eq!(er.is_full().unwrap(), false);
228     }
229 
230     #[test]
test_event_ring()231     fn test_event_ring() {
232         let trb_size = size_of::<Trb>() as u64;
233         let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
234         let mut er = EventRing::new(gm.clone());
235         let mut st_entries = [EventRingSegmentTableEntry::new(); 3];
236         st_entries[0].set_ring_segment_base_address(0x100);
237         st_entries[0].set_ring_segment_size(3);
238         st_entries[1].set_ring_segment_base_address(0x200);
239         st_entries[1].set_ring_segment_size(3);
240         st_entries[2].set_ring_segment_base_address(0x300);
241         st_entries[2].set_ring_segment_size(3);
242         gm.write_obj_at_addr(st_entries[0], GuestAddress(0x8))
243             .unwrap();
244         gm.write_obj_at_addr(
245             st_entries[1],
246             GuestAddress(0x8 + size_of::<EventRingSegmentTableEntry>() as u64),
247         )
248         .unwrap();
249         gm.write_obj_at_addr(
250             st_entries[2],
251             GuestAddress(0x8 + 2 * size_of::<EventRingSegmentTableEntry>() as u64),
252         )
253         .unwrap();
254         // Init event ring. Must init after segment tables writting.
255         er.set_seg_table_size(3).unwrap();
256         er.set_seg_table_base_addr(GuestAddress(0x8)).unwrap();
257         er.set_dequeue_pointer(GuestAddress(0x100));
258 
259         let mut trb = Trb::new();
260 
261         // Fill first table.
262         trb.set_control(1);
263         assert_eq!(er.is_empty(), true);
264         assert_eq!(er.is_full().unwrap(), false);
265         assert!(er.add_event(trb).is_ok());
266         assert_eq!(er.is_full().unwrap(), false);
267         assert_eq!(er.is_empty(), false);
268         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x100)).unwrap();
269         assert_eq!(t.get_control(), 1);
270         assert_eq!(t.get_cycle(), true);
271 
272         trb.set_control(2);
273         assert!(er.add_event(trb).is_ok());
274         assert_eq!(er.is_full().unwrap(), false);
275         assert_eq!(er.is_empty(), false);
276         let t: Trb = gm
277             .read_obj_from_addr(GuestAddress(0x100 + trb_size))
278             .unwrap();
279         assert_eq!(t.get_control(), 2);
280         assert_eq!(t.get_cycle(), true);
281 
282         trb.set_control(3);
283         assert!(er.add_event(trb).is_ok());
284         assert_eq!(er.is_full().unwrap(), false);
285         assert_eq!(er.is_empty(), false);
286         let t: Trb = gm
287             .read_obj_from_addr(GuestAddress(0x100 + 2 * trb_size))
288             .unwrap();
289         assert_eq!(t.get_control(), 3);
290         assert_eq!(t.get_cycle(), true);
291 
292         // Fill second table.
293         trb.set_control(4);
294         assert!(er.add_event(trb).is_ok());
295         assert_eq!(er.is_full().unwrap(), false);
296         assert_eq!(er.is_empty(), false);
297         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x200)).unwrap();
298         assert_eq!(t.get_control(), 4);
299         assert_eq!(t.get_cycle(), true);
300 
301         trb.set_control(5);
302         assert!(er.add_event(trb).is_ok());
303         assert_eq!(er.is_full().unwrap(), false);
304         assert_eq!(er.is_empty(), false);
305         let t: Trb = gm
306             .read_obj_from_addr(GuestAddress(0x200 + trb_size))
307             .unwrap();
308         assert_eq!(t.get_control(), 5);
309         assert_eq!(t.get_cycle(), true);
310 
311         trb.set_control(6);
312         assert!(er.add_event(trb).is_ok());
313         assert_eq!(er.is_full().unwrap(), false);
314         assert_eq!(er.is_empty(), false);
315         let t: Trb = gm
316             .read_obj_from_addr(GuestAddress(0x200 + 2 * trb_size as u64))
317             .unwrap();
318         assert_eq!(t.get_control(), 6);
319         assert_eq!(t.get_cycle(), true);
320 
321         // Fill third table.
322         trb.set_control(7);
323         assert!(er.add_event(trb).is_ok());
324         assert_eq!(er.is_full().unwrap(), false);
325         assert_eq!(er.is_empty(), false);
326         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x300)).unwrap();
327         assert_eq!(t.get_control(), 7);
328         assert_eq!(t.get_cycle(), true);
329 
330         trb.set_control(8);
331         assert!(er.add_event(trb).is_ok());
332         // There is only one last trb. Considered full.
333         assert_eq!(er.is_full().unwrap(), true);
334         assert_eq!(er.is_empty(), false);
335         let t: Trb = gm
336             .read_obj_from_addr(GuestAddress(0x300 + trb_size))
337             .unwrap();
338         assert_eq!(t.get_control(), 8);
339         assert_eq!(t.get_cycle(), true);
340 
341         // Add the last trb will result in error.
342         match er.add_event(trb) {
343             Err(Error::EventRingFull) => {}
344             _ => panic!("er should be full"),
345         };
346 
347         // Dequeue one trb.
348         er.set_dequeue_pointer(GuestAddress(0x100 + trb_size));
349         assert_eq!(er.is_full().unwrap(), false);
350         assert_eq!(er.is_empty(), false);
351 
352         // Fill the last trb of the third table.
353         trb.set_control(9);
354         assert!(er.add_event(trb).is_ok());
355         // There is only one last trb. Considered full.
356         assert_eq!(er.is_full().unwrap(), true);
357         assert_eq!(er.is_empty(), false);
358         let t: Trb = gm
359             .read_obj_from_addr(GuestAddress(0x300 + trb_size))
360             .unwrap();
361         assert_eq!(t.get_control(), 8);
362         assert_eq!(t.get_cycle(), true);
363 
364         // Add the last trb will result in error.
365         match er.add_event(trb) {
366             Err(Error::EventRingFull) => {}
367             _ => panic!("er should be full"),
368         };
369 
370         // Dequeue until empty.
371         er.set_dequeue_pointer(GuestAddress(0x100));
372         assert_eq!(er.is_full().unwrap(), false);
373         assert_eq!(er.is_empty(), true);
374 
375         // Fill first table again.
376         trb.set_control(10);
377         assert!(er.add_event(trb).is_ok());
378         assert_eq!(er.is_full().unwrap(), false);
379         assert_eq!(er.is_empty(), false);
380         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x100)).unwrap();
381         assert_eq!(t.get_control(), 10);
382         // cycle bit should be reversed.
383         assert_eq!(t.get_cycle(), false);
384 
385         trb.set_control(11);
386         assert!(er.add_event(trb).is_ok());
387         assert_eq!(er.is_full().unwrap(), false);
388         assert_eq!(er.is_empty(), false);
389         let t: Trb = gm
390             .read_obj_from_addr(GuestAddress(0x100 + trb_size))
391             .unwrap();
392         assert_eq!(t.get_control(), 11);
393         assert_eq!(t.get_cycle(), false);
394 
395         trb.set_control(12);
396         assert!(er.add_event(trb).is_ok());
397         assert_eq!(er.is_full().unwrap(), false);
398         assert_eq!(er.is_empty(), false);
399         let t: Trb = gm
400             .read_obj_from_addr(GuestAddress(0x100 + 2 * trb_size))
401             .unwrap();
402         assert_eq!(t.get_control(), 12);
403         assert_eq!(t.get_cycle(), false);
404     }
405 }
406