1 // Copyright 2019 The ChromiumOS Authors 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 use std::fmt; 6 use std::fmt::Display; 7 use std::mem::size_of; 8 9 use remain::sorted; 10 use thiserror::Error; 11 use vm_memory::GuestAddress; 12 use vm_memory::GuestMemory; 13 use vm_memory::GuestMemoryError; 14 15 use super::xhci_abi::AddressedTrb; 16 use super::xhci_abi::Error as TrbError; 17 use super::xhci_abi::LinkTrb; 18 use super::xhci_abi::TransferDescriptor; 19 use super::xhci_abi::Trb; 20 use super::xhci_abi::TrbCast; 21 use super::xhci_abi::TrbType; 22 23 #[sorted] 24 #[derive(Error, Debug)] 25 pub enum Error { 26 #[error("bad dequeue pointer: {0}")] 27 BadDequeuePointer(GuestAddress), 28 #[error("cannot cast trb: {0}")] 29 CastTrb(TrbError), 30 #[error("cannot read guest memory: {0}")] 31 ReadGuestMemory(GuestMemoryError), 32 #[error("cannot get trb chain bit: {0}")] 33 TrbChain(TrbError), 34 } 35 36 type Result<T> = std::result::Result<T, Error>; 37 38 /// Ring Buffer is segmented circular buffer in guest memory containing work items 39 /// called transfer descriptors, each of which consists of one or more TRBs. 40 /// Ring buffer logic is shared between transfer ring and command ring. 41 /// Transfer Ring management is defined in xHCI spec 4.9.2. 42 pub struct RingBuffer { 43 name: String, 44 mem: GuestMemory, 45 dequeue_pointer: GuestAddress, 46 // Used to check if the ring is empty. Toggled when looping back to the begining 47 // of the buffer. 48 consumer_cycle_state: bool, 49 } 50 51 impl Display for RingBuffer { fmt(&self, f: &mut fmt::Formatter) -> fmt::Result52 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 53 write!(f, "RingBuffer `{}`", self.name) 54 } 55 } 56 57 // Public interfaces for Ring buffer. 58 impl RingBuffer { 59 /// Create a new RingBuffer. new(name: String, mem: GuestMemory) -> Self60 pub fn new(name: String, mem: GuestMemory) -> Self { 61 RingBuffer { 62 name, 63 mem, 64 dequeue_pointer: GuestAddress(0), 65 consumer_cycle_state: false, 66 } 67 } 68 69 /// Dequeue next transfer descriptor from the transfer ring. dequeue_transfer_descriptor(&mut self) -> Result<Option<TransferDescriptor>>70 pub fn dequeue_transfer_descriptor(&mut self) -> Result<Option<TransferDescriptor>> { 71 let mut td: TransferDescriptor = TransferDescriptor::new(); 72 while let Some(addressed_trb) = self.get_current_trb()? { 73 if let Ok(TrbType::Link) = addressed_trb.trb.get_trb_type() { 74 let link_trb = addressed_trb 75 .trb 76 .cast::<LinkTrb>() 77 .map_err(Error::CastTrb)?; 78 self.dequeue_pointer = GuestAddress(link_trb.get_ring_segment_pointer()); 79 self.consumer_cycle_state = 80 self.consumer_cycle_state != link_trb.get_toggle_cycle(); 81 continue; 82 } 83 84 self.dequeue_pointer = match self.dequeue_pointer.checked_add(size_of::<Trb>() as u64) { 85 Some(addr) => addr, 86 None => { 87 return Err(Error::BadDequeuePointer(self.dequeue_pointer)); 88 } 89 }; 90 91 usb_debug!( 92 "{}: adding trb to td {}", 93 self.name.as_str(), 94 addressed_trb.trb 95 ); 96 td.push(addressed_trb); 97 if !addressed_trb.trb.get_chain_bit().map_err(Error::TrbChain)? { 98 usb_debug!("trb chain is false returning"); 99 break; 100 } 101 } 102 // A valid transfer descriptor contains at least one addressed trb and the last trb has 103 // chain bit != 0. 104 match td.last() { 105 Some(t) => { 106 if t.trb.get_chain_bit().map_err(Error::TrbChain)? { 107 return Ok(None); 108 } 109 } 110 None => return Ok(None), 111 } 112 Ok(Some(td)) 113 } 114 115 /// Set dequeue pointer of the ring buffer. set_dequeue_pointer(&mut self, addr: GuestAddress)116 pub fn set_dequeue_pointer(&mut self, addr: GuestAddress) { 117 usb_debug!("{}: set dequeue pointer {:x}", self.name.as_str(), addr.0); 118 119 self.dequeue_pointer = addr; 120 } 121 122 /// Set consumer cycle state of the ring buffer. set_consumer_cycle_state(&mut self, state: bool)123 pub fn set_consumer_cycle_state(&mut self, state: bool) { 124 usb_debug!("{}: set consumer cycle state {}", self.name.as_str(), state); 125 self.consumer_cycle_state = state; 126 } 127 128 // Read trb pointed by dequeue pointer. Does not proceed dequeue pointer. get_current_trb(&self) -> Result<Option<AddressedTrb>>129 fn get_current_trb(&self) -> Result<Option<AddressedTrb>> { 130 let trb: Trb = self 131 .mem 132 .read_obj_from_addr(self.dequeue_pointer) 133 .map_err(Error::ReadGuestMemory)?; 134 usb_debug!("{}: trb read from memory {:?}", self.name.as_str(), trb); 135 // If cycle bit of trb does not equal consumer cycle state, the ring is empty. 136 // This trb is invalid. 137 if trb.get_cycle() != self.consumer_cycle_state { 138 usb_debug!( 139 "cycle bit does not match, self cycle {}", 140 self.consumer_cycle_state 141 ); 142 Ok(None) 143 } else { 144 Ok(Some(AddressedTrb { 145 trb, 146 gpa: self.dequeue_pointer.0, 147 })) 148 } 149 } 150 } 151 152 #[cfg(test)] 153 mod test { 154 use super::*; 155 use crate::usb::xhci::xhci_abi::*; 156 157 #[test] ring_test_dequeue()158 fn ring_test_dequeue() { 159 let trb_size = size_of::<Trb>() as u64; 160 let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap(); 161 let mut transfer_ring = RingBuffer::new(String::new(), gm.clone()); 162 163 // Structure of ring buffer: 164 // 0x100 --> 0x200 --> 0x300 165 // trb 1 | trb 3 | trb 5 166 // trb 2 | trb 4 | trb 6 167 // l trb - l trb - l trb to 0x100 168 let mut trb = NormalTrb::new(); 169 trb.set_trb_type(TrbType::Normal); 170 trb.set_data_buffer(1); 171 trb.set_chain(true); 172 gm.write_obj_at_addr(trb, GuestAddress(0x100)).unwrap(); 173 174 trb.set_data_buffer(2); 175 gm.write_obj_at_addr(trb, GuestAddress(0x100 + trb_size)) 176 .unwrap(); 177 178 let mut ltrb = LinkTrb::new(); 179 ltrb.set_trb_type(TrbType::Link); 180 ltrb.set_ring_segment_pointer(0x200); 181 gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + 2 * trb_size)) 182 .unwrap(); 183 184 trb.set_data_buffer(3); 185 gm.write_obj_at_addr(trb, GuestAddress(0x200)).unwrap(); 186 187 // Chain bit is false. 188 trb.set_data_buffer(4); 189 trb.set_chain(false); 190 gm.write_obj_at_addr(trb, GuestAddress(0x200 + 1 * trb_size)) 191 .unwrap(); 192 193 ltrb.set_ring_segment_pointer(0x300); 194 gm.write_obj_at_addr(ltrb, GuestAddress(0x200 + 2 * trb_size)) 195 .unwrap(); 196 197 trb.set_data_buffer(5); 198 trb.set_chain(true); 199 gm.write_obj_at_addr(trb, GuestAddress(0x300)).unwrap(); 200 201 // Chain bit is false. 202 trb.set_data_buffer(6); 203 trb.set_chain(false); 204 gm.write_obj_at_addr(trb, GuestAddress(0x300 + 1 * trb_size)) 205 .unwrap(); 206 207 ltrb.set_ring_segment_pointer(0x100); 208 gm.write_obj_at_addr(ltrb, GuestAddress(0x300 + 2 * trb_size)) 209 .unwrap(); 210 211 transfer_ring.set_dequeue_pointer(GuestAddress(0x100)); 212 transfer_ring.set_consumer_cycle_state(false); 213 214 // Read first transfer descriptor. 215 let descriptor = transfer_ring 216 .dequeue_transfer_descriptor() 217 .unwrap() 218 .unwrap(); 219 assert_eq!(descriptor.len(), 4); 220 assert_eq!(descriptor[0].trb.get_parameter(), 1); 221 assert_eq!(descriptor[1].trb.get_parameter(), 2); 222 assert_eq!(descriptor[2].trb.get_parameter(), 3); 223 assert_eq!(descriptor[3].trb.get_parameter(), 4); 224 225 // Read second transfer descriptor. 226 let descriptor = transfer_ring 227 .dequeue_transfer_descriptor() 228 .unwrap() 229 .unwrap(); 230 assert_eq!(descriptor.len(), 2); 231 assert_eq!(descriptor[0].trb.get_parameter(), 5); 232 assert_eq!(descriptor[1].trb.get_parameter(), 6); 233 } 234 235 #[test] transfer_ring_test_dequeue_failure()236 fn transfer_ring_test_dequeue_failure() { 237 let trb_size = size_of::<Trb>() as u64; 238 let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap(); 239 let mut transfer_ring = RingBuffer::new(String::new(), gm.clone()); 240 241 let mut trb = NormalTrb::new(); 242 trb.set_trb_type(TrbType::Normal); 243 trb.set_data_buffer(1); 244 trb.set_chain(true); 245 gm.write_obj_at_addr(trb, GuestAddress(0x100)).unwrap(); 246 247 trb.set_data_buffer(2); 248 gm.write_obj_at_addr(trb, GuestAddress(0x100 + trb_size)) 249 .unwrap(); 250 251 let mut ltrb = LinkTrb::new(); 252 ltrb.set_trb_type(TrbType::Link); 253 ltrb.set_ring_segment_pointer(0x200); 254 ltrb.set_toggle_cycle(true); 255 gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + 2 * trb_size)) 256 .unwrap(); 257 258 trb.set_data_buffer(3); 259 gm.write_obj_at_addr(trb, GuestAddress(0x200)).unwrap(); 260 261 transfer_ring.set_dequeue_pointer(GuestAddress(0x100)); 262 transfer_ring.set_consumer_cycle_state(false); 263 264 // Read first transfer descriptor. 265 let descriptor = transfer_ring.dequeue_transfer_descriptor().unwrap(); 266 assert_eq!(descriptor.is_none(), true); 267 } 268 269 #[test] ring_test_toggle_cycle()270 fn ring_test_toggle_cycle() { 271 let trb_size = size_of::<Trb>() as u64; 272 let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap(); 273 let mut transfer_ring = RingBuffer::new(String::new(), gm.clone()); 274 275 let mut trb = NormalTrb::new(); 276 trb.set_trb_type(TrbType::Normal); 277 trb.set_data_buffer(1); 278 trb.set_chain(false); 279 trb.set_cycle(false); 280 gm.write_obj_at_addr(trb, GuestAddress(0x100)).unwrap(); 281 282 let mut ltrb = LinkTrb::new(); 283 ltrb.set_trb_type(TrbType::Link); 284 ltrb.set_ring_segment_pointer(0x100); 285 ltrb.set_toggle_cycle(true); 286 ltrb.set_cycle(false); 287 gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + trb_size)) 288 .unwrap(); 289 290 // Initial state: consumer cycle = false 291 transfer_ring.set_dequeue_pointer(GuestAddress(0x100)); 292 transfer_ring.set_consumer_cycle_state(false); 293 294 // Read first transfer descriptor. 295 let descriptor = transfer_ring 296 .dequeue_transfer_descriptor() 297 .unwrap() 298 .unwrap(); 299 assert_eq!(descriptor.len(), 1); 300 assert_eq!(descriptor[0].trb.get_parameter(), 1); 301 302 // Cycle bit should be unchanged since we haven't advanced past the Link TRB yet. 303 assert_eq!(transfer_ring.consumer_cycle_state, false); 304 305 // Overwrite the first TRB with a new one (data = 2) 306 // with the new producer cycle bit state (true). 307 let mut trb = NormalTrb::new(); 308 trb.set_trb_type(TrbType::Normal); 309 trb.set_data_buffer(2); 310 trb.set_cycle(true); // Link TRB toggled the cycle. 311 gm.write_obj_at_addr(trb, GuestAddress(0x100)).unwrap(); 312 313 // Read new transfer descriptor. 314 let descriptor = transfer_ring 315 .dequeue_transfer_descriptor() 316 .unwrap() 317 .unwrap(); 318 assert_eq!(descriptor.len(), 1); 319 assert_eq!(descriptor[0].trb.get_parameter(), 2); 320 321 assert_eq!(transfer_ring.consumer_cycle_state, true); 322 323 // Update the Link TRB with the new cycle bit. 324 let mut ltrb = LinkTrb::new(); 325 ltrb.set_trb_type(TrbType::Link); 326 ltrb.set_ring_segment_pointer(0x100); 327 ltrb.set_toggle_cycle(true); 328 ltrb.set_cycle(true); // Producer cycle state is now 1. 329 gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + trb_size)) 330 .unwrap(); 331 332 // Overwrite the first TRB again with a new one (data = 3) 333 // with the new producer cycle bit state (false). 334 let mut trb = NormalTrb::new(); 335 trb.set_trb_type(TrbType::Normal); 336 trb.set_data_buffer(3); 337 trb.set_cycle(false); // Link TRB toggled the cycle. 338 gm.write_obj_at_addr(trb, GuestAddress(0x100)).unwrap(); 339 340 // Read new transfer descriptor. 341 let descriptor = transfer_ring 342 .dequeue_transfer_descriptor() 343 .unwrap() 344 .unwrap(); 345 assert_eq!(descriptor.len(), 1); 346 assert_eq!(descriptor[0].trb.get_parameter(), 3); 347 348 assert_eq!(transfer_ring.consumer_cycle_state, false); 349 } 350 } 351