1 // Copyright 2018 The ChromiumOS Authors 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 use std::fmt; 6 use std::fmt::Display; 7 8 use bit_field::Error as BitFieldError; 9 use bit_field::*; 10 use remain::sorted; 11 use thiserror::Error; 12 use vm_memory::GuestAddress; 13 use zerocopy::FromBytes; 14 use zerocopy::Immutable; 15 use zerocopy::IntoBytes; 16 use zerocopy::KnownLayout; 17 18 #[sorted] 19 #[derive(Error, Debug)] 20 pub enum Error { 21 #[error("cannot cast trb from raw memory")] 22 CannotCastTrb, 23 #[error("we got an unknown trb type value: {0}")] 24 UnknownTrbType(BitFieldError), 25 } 26 27 type Result<T> = std::result::Result<T, Error>; 28 29 // Fixed size of all TRB types. 30 const TRB_SIZE: usize = 16; 31 32 // Size of segment table. 33 const SEGMENT_TABLE_SIZE: usize = 16; 34 35 /// All kinds of trb. 36 #[bitfield] 37 #[bits = 6] 38 #[derive(PartialEq, Eq, Debug, Clone, Copy)] 39 pub enum TrbType { 40 Reserved = 0, 41 Normal = 1, 42 SetupStage = 2, 43 DataStage = 3, 44 StatusStage = 4, 45 Isoch = 5, 46 Link = 6, 47 EventData = 7, 48 Noop = 8, 49 EnableSlotCommand = 9, 50 DisableSlotCommand = 10, 51 AddressDeviceCommand = 11, 52 ConfigureEndpointCommand = 12, 53 EvaluateContextCommand = 13, 54 ResetEndpointCommand = 14, 55 StopEndpointCommand = 15, 56 SetTRDequeuePointerCommand = 16, 57 ResetDeviceCommand = 17, 58 NoopCommand = 23, 59 TransferEvent = 32, 60 CommandCompletionEvent = 33, 61 PortStatusChangeEvent = 34, 62 } 63 64 /// Completion code of trb types. 65 #[bitfield] 66 #[bits = 8] 67 #[derive(PartialEq, Eq, Debug)] 68 pub enum TrbCompletionCode { 69 Success = 1, 70 TransactionError = 4, 71 TrbError = 5, 72 StallError = 6, 73 NoSlotsAvailableError = 9, 74 SlotNotEnabledError = 11, 75 ShortPacket = 13, 76 ContextStateError = 19, 77 } 78 79 /// State of device slot. 80 #[bitfield] 81 #[bits = 5] 82 #[derive(PartialEq, Eq, Debug)] 83 pub enum DeviceSlotState { 84 // The same value (0) is used for both the enabled and disabled states. See 85 // xhci spec table 60. 86 DisabledOrEnabled = 0, 87 Default = 1, 88 Addressed = 2, 89 Configured = 3, 90 } 91 92 /// State of endpoint. 93 #[bitfield] 94 #[bits = 3] 95 #[derive(PartialEq, Eq, Debug)] 96 pub enum EndpointState { 97 Disabled = 0, 98 Running = 1, 99 Halted = 2, 100 Stopped = 3, 101 Error = 4, 102 } 103 104 #[bitfield] 105 #[bits = 60] 106 #[derive(PartialEq, Eq, Debug)] 107 pub struct DequeuePtr(u64); 108 109 impl DequeuePtr { new(addr: GuestAddress) -> Self110 pub fn new(addr: GuestAddress) -> Self { 111 DequeuePtr(addr.0 >> 4) 112 } 113 114 // Get the guest physical address. get_gpa(&self) -> GuestAddress115 pub fn get_gpa(&self) -> GuestAddress { 116 GuestAddress(self.0 << 4) 117 } 118 } 119 120 // Generic TRB struct containing only fields common to all types. 121 #[bitfield] 122 #[derive(Clone, Copy, PartialEq, Eq, FromBytes, Immutable, IntoBytes, KnownLayout)] 123 pub struct Trb { 124 parameter: B64, 125 status: B32, 126 cycle: bool, 127 flags: B9, 128 trb_type: TrbType, 129 control: B16, 130 } 131 132 impl Trb { fmt_helper(&self, f: &mut fmt::Formatter) -> Result<fmt::Result>133 fn fmt_helper(&self, f: &mut fmt::Formatter) -> Result<fmt::Result> { 134 match self.get_trb_type().map_err(Error::UnknownTrbType)? { 135 TrbType::Reserved => Ok(write!(f, "reserved trb type")), 136 TrbType::Normal => { 137 let t = self.cast::<NormalTrb>()?; 138 Ok(write!(f, "trb: {:?}", t)) 139 } 140 TrbType::SetupStage => { 141 let t = self.cast::<SetupStageTrb>()?; 142 Ok(write!(f, "trb: {:?}", t)) 143 } 144 TrbType::DataStage => { 145 let t = self.cast::<DataStageTrb>()?; 146 Ok(write!(f, "trb: {:?}", t)) 147 } 148 TrbType::StatusStage => { 149 let t = self.cast::<StatusStageTrb>()?; 150 Ok(write!(f, "trb: {:?}", t)) 151 } 152 TrbType::Isoch => { 153 let t = self.cast::<IsochTrb>()?; 154 Ok(write!(f, "trb: {:?}", t)) 155 } 156 TrbType::Link => { 157 let t = self.cast::<LinkTrb>()?; 158 Ok(write!(f, "trb: {:?}", t)) 159 } 160 TrbType::EventData => { 161 let t = self.cast::<EventDataTrb>()?; 162 Ok(write!(f, "trb: {:?}", t)) 163 } 164 TrbType::Noop => { 165 let t = self.cast::<NoopTrb>()?; 166 Ok(write!(f, "trb: {:?}", t)) 167 } 168 TrbType::EnableSlotCommand => Ok(write!(f, "trb: enable slot command {:?}", self)), 169 TrbType::DisableSlotCommand => { 170 let t = self.cast::<DisableSlotCommandTrb>()?; 171 Ok(write!(f, "trb: {:?}", t)) 172 } 173 TrbType::AddressDeviceCommand => { 174 let t = self.cast::<AddressDeviceCommandTrb>()?; 175 Ok(write!(f, "trb: {:?}", t)) 176 } 177 TrbType::ConfigureEndpointCommand => { 178 let t = self.cast::<ConfigureEndpointCommandTrb>()?; 179 Ok(write!(f, "trb: {:?}", t)) 180 } 181 TrbType::EvaluateContextCommand => { 182 let t = self.cast::<EvaluateContextCommandTrb>()?; 183 Ok(write!(f, "trb: {:?}", t)) 184 } 185 TrbType::ResetEndpointCommand => { 186 let t = self.cast::<ResetEndpointCommandTrb>()?; 187 Ok(write!(f, "trb: {:?}", t)) 188 } 189 TrbType::StopEndpointCommand => { 190 let t = self.cast::<StopEndpointCommandTrb>()?; 191 Ok(write!(f, "trb: {:?}", t)) 192 } 193 TrbType::SetTRDequeuePointerCommand => { 194 let t = self.cast::<SetTRDequeuePointerCommandTrb>()?; 195 Ok(write!(f, "trb: {:?}", t)) 196 } 197 TrbType::ResetDeviceCommand => { 198 let t = self.cast::<ResetDeviceCommandTrb>()?; 199 Ok(write!(f, "trb: {:?}", t)) 200 } 201 TrbType::NoopCommand => Ok(write!(f, "trb: noop command {:?}", self)), 202 TrbType::TransferEvent => { 203 let t = self.cast::<TransferEventTrb>()?; 204 Ok(write!(f, "trb: {:?}", t)) 205 } 206 TrbType::CommandCompletionEvent => { 207 let t = self.cast::<CommandCompletionEventTrb>()?; 208 Ok(write!(f, "trb: {:?}", t)) 209 } 210 TrbType::PortStatusChangeEvent => { 211 let t = self.cast::<PortStatusChangeEventTrb>()?; 212 Ok(write!(f, "trb: {:?}", t)) 213 } 214 } 215 } 216 } 217 218 impl Display for Trb { fmt(&self, f: &mut fmt::Formatter) -> fmt::Result219 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 220 match self.fmt_helper(f) { 221 Ok(f) => f, 222 Err(e) => write!(f, "fail to format trb {}", e), 223 } 224 } 225 } 226 227 impl Trb { 228 /// Get chain bit. get_chain_bit(&self) -> Result<bool>229 pub fn get_chain_bit(&self) -> Result<bool> { 230 Ok(match self.get_trb_type() { 231 Ok(TrbType::Normal) => self.cast::<NormalTrb>()?.get_chain(), 232 Ok(TrbType::DataStage) => self.cast::<DataStageTrb>()?.get_chain(), 233 Ok(TrbType::StatusStage) => self.cast::<StatusStageTrb>()?.get_chain(), 234 Ok(TrbType::Isoch) => self.cast::<IsochTrb>()?.get_chain(), 235 Ok(TrbType::Noop) => self.cast::<NoopTrb>()?.get_chain(), 236 Ok(TrbType::Link) => self.cast::<LinkTrb>()?.get_chain(), 237 Ok(TrbType::EventData) => self.cast::<EventDataTrb>()?.get_chain(), 238 _ => false, 239 }) 240 } 241 242 /// Get interrupt target. interrupter_target(&self) -> u8243 pub fn interrupter_target(&self) -> u8 { 244 const STATUS_INTERRUPTER_TARGET_OFFSET: u8 = 22; 245 (self.get_status() >> STATUS_INTERRUPTER_TARGET_OFFSET) as u8 246 } 247 248 /// Only some of trb types could appear in transfer ring. can_be_in_transfer_ring(&self) -> Result<bool>249 pub fn can_be_in_transfer_ring(&self) -> Result<bool> { 250 match self.get_trb_type().map_err(Error::UnknownTrbType)? { 251 TrbType::Normal 252 | TrbType::SetupStage 253 | TrbType::DataStage 254 | TrbType::StatusStage 255 | TrbType::Isoch 256 | TrbType::Link 257 | TrbType::EventData 258 | TrbType::Noop => Ok(true), 259 _ => Ok(false), 260 } 261 } 262 263 /// Length of this transfer. transfer_length(&self) -> Result<u32>264 pub fn transfer_length(&self) -> Result<u32> { 265 const STATUS_TRANSFER_LENGTH_MASK: u32 = 0x1ffff; 266 match self.get_trb_type().map_err(Error::UnknownTrbType)? { 267 TrbType::Normal | TrbType::SetupStage | TrbType::DataStage | TrbType::Isoch => { 268 Ok(self.get_status() & STATUS_TRANSFER_LENGTH_MASK) 269 } 270 _ => Ok(0), 271 } 272 } 273 274 /// Returns true if interrupt is required on completion. interrupt_on_completion(&self) -> bool275 pub fn interrupt_on_completion(&self) -> bool { 276 const FLAGS_INTERRUPT_ON_COMPLETION_MASK: u16 = 0x10; 277 (self.get_flags() & FLAGS_INTERRUPT_ON_COMPLETION_MASK) > 0 278 } 279 280 /// Returns true if interrupt is required on transfer of short packet. interrupt_on_short_packet(&self) -> bool281 pub fn interrupt_on_short_packet(&self) -> bool { 282 const FLAGS_INTERRUPT_ON_SHORT_PACKET: u16 = 0x2; 283 (self.get_flags() & FLAGS_INTERRUPT_ON_SHORT_PACKET) > 0 284 } 285 286 /// Returns true if this trb is immediate data. immediate_data(&self) -> Result<bool>287 pub fn immediate_data(&self) -> Result<bool> { 288 const FLAGS_IMMEDIATE_DATA_MASK: u16 = 0x20; 289 match self.get_trb_type().map_err(Error::UnknownTrbType)? { 290 TrbType::Normal | TrbType::SetupStage | TrbType::DataStage | TrbType::Isoch => { 291 Ok((self.get_flags() & FLAGS_IMMEDIATE_DATA_MASK) != 0) 292 } 293 _ => Ok(false), 294 } 295 } 296 } 297 298 #[bitfield] 299 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 300 pub struct NormalTrb { 301 data_buffer: B64, 302 trb_transfer_length: B17, 303 td_size: B5, 304 interrupter_target: B10, 305 cycle: bool, 306 evaluate_next_trb: B1, 307 interrupt_on_short_packet: B1, 308 no_snoop: B1, 309 chain: bool, 310 interrupt_on_completion: B1, 311 immediate_data: B1, 312 reserved: B2, 313 block_event_interrupt: B1, 314 trb_type: TrbType, 315 reserved1: B16, 316 } 317 318 #[bitfield] 319 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 320 pub struct SetupStageTrb { 321 request_type: B8, 322 request: B8, 323 value: B16, 324 index: B16, 325 length: B16, 326 trb_transfer_length: B17, 327 reserved0: B5, 328 interrupter_target: B10, 329 cycle: bool, 330 reserved1: B4, 331 interrupt_on_completion: B1, 332 immediate_data: B1, 333 reserved2: B3, 334 trb_type: TrbType, 335 transfer_type: B2, 336 reserved3: B14, 337 } 338 339 #[bitfield] 340 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 341 pub struct DataStageTrb { 342 data_buffer_pointer: B64, 343 trb_transfer_length: B17, 344 td_size: B5, 345 interrupter_target: B10, 346 cycle: bool, 347 evaluate_next_trb: B1, 348 interrupt_on_short_packet: B1, 349 no_snoop: B1, 350 chain: bool, 351 interrupt_on_completion: B1, 352 immediate_data: B1, 353 reserved0: B3, 354 trb_type: TrbType, 355 direction: B1, 356 reserved1: B15, 357 } 358 359 #[bitfield] 360 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 361 pub struct StatusStageTrb { 362 reserved0: B64, 363 reserved1: B22, 364 interrupter_target: B10, 365 cycle: bool, 366 evaluate_next_trb: B1, 367 reserved2: B2, 368 chain: bool, 369 interrupt_on_completion: B1, 370 reserved3: B4, 371 trb_type: TrbType, 372 direction: B1, 373 reserved4: B15, 374 } 375 376 #[bitfield] 377 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 378 pub struct IsochTrb { 379 data_buffer_pointer: B64, 380 trb_transfer_length: B17, 381 td_size: B5, 382 interrupter_target: B10, 383 cycle: bool, 384 evaulate_nex_trb: B1, 385 interrupt_on_short_packet: B1, 386 no_snoop: B1, 387 chain: bool, 388 interrupt_on_completion: B1, 389 immediate_data: B1, 390 transfer_burst_count: B2, 391 block_event_interrupt: B1, 392 trb_type: TrbType, 393 tlbpc: B4, 394 frame_id: B11, 395 sia: B1, 396 } 397 398 #[bitfield] 399 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 400 pub struct LinkTrb { 401 ring_segment_pointer: B64, 402 reserved0: B22, 403 interrupter_target: B10, 404 cycle: bool, 405 toggle_cycle: bool, 406 reserved1: B2, 407 chain: bool, 408 interrupt_on_completion: bool, 409 reserved2: B4, 410 trb_type: TrbType, 411 reserved3: B16, 412 } 413 414 #[bitfield] 415 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 416 pub struct EventDataTrb { 417 event_data: B64, 418 reserved0: B22, 419 interrupter_target: B10, 420 cycle: bool, 421 evaluate_next_trb: B1, 422 reserved1: B2, 423 chain: bool, 424 interrupt_on_completion: B1, 425 reserved2: B3, 426 block_event_interrupt: B1, 427 trb_type: TrbType, 428 reserved3: B16, 429 } 430 431 #[bitfield] 432 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 433 pub struct NoopTrb { 434 reserved0: B64, 435 reserved1: B22, 436 interrupter_target: B10, 437 cycle: bool, 438 evaluate_next_trb: B1, 439 reserved2: B2, 440 chain: bool, 441 interrupt_on_completion: B1, 442 reserved3: B4, 443 trb_type: TrbType, 444 reserved4: B16, 445 } 446 447 #[bitfield] 448 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 449 pub struct DisableSlotCommandTrb { 450 reserved0: B32, 451 reserved1: B32, 452 reserved2: B32, 453 cycle: bool, 454 reserved3: B9, 455 trb_type: TrbType, 456 reserved4: B8, 457 slot_id: B8, 458 } 459 460 #[bitfield] 461 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 462 pub struct AddressDeviceCommandTrb { 463 input_context_pointer: B64, 464 reserved: B32, 465 cycle: bool, 466 reserved2: B8, 467 block_set_address_request: bool, 468 trb_type: TrbType, 469 reserved3: B8, 470 slot_id: B8, 471 } 472 473 #[bitfield] 474 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 475 pub struct ConfigureEndpointCommandTrb { 476 input_context_pointer: B64, 477 reserved0: B32, 478 cycle: bool, 479 reserved1: B8, 480 deconfigure: bool, 481 trb_type: TrbType, 482 reserved2: B8, 483 slot_id: B8, 484 } 485 486 #[bitfield] 487 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 488 pub struct EvaluateContextCommandTrb { 489 input_context_pointer: B64, 490 reserved0: B32, 491 cycle: bool, 492 reserved1: B9, 493 trb_type: TrbType, 494 reserved2: B8, 495 slot_id: B8, 496 } 497 498 #[bitfield] 499 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 500 pub struct ResetEndpointCommandTrb { 501 reserved0: B32, 502 reserved1: B32, 503 reserved2: B32, 504 cycle: bool, 505 reserved3: B8, 506 transfer_state_preserve: B1, 507 trb_type: TrbType, 508 endpoint_id: B5, 509 reserved4: B3, 510 slot_id: B8, 511 } 512 513 #[bitfield] 514 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 515 pub struct StopEndpointCommandTrb { 516 reserved0: B32, 517 reserved1: B32, 518 reserved2: B32, 519 cycle: bool, 520 reserved3: B9, 521 trb_type: TrbType, 522 endpoint_id: B5, 523 reserved4: B2, 524 suspend: B1, 525 slot_id: B8, 526 } 527 528 #[bitfield] 529 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 530 pub struct SetTRDequeuePointerCommandTrb { 531 dequeue_cycle_state: bool, 532 stream_context_type: B3, 533 dequeue_ptr: DequeuePtr, 534 reserved0: B16, 535 stream_id: B16, 536 cycle: bool, 537 reserved1: B9, 538 trb_type: TrbType, 539 endpoint_id: B5, 540 reserved3: B2, 541 suspend: B1, 542 slot_id: B8, 543 } 544 545 #[bitfield] 546 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 547 pub struct ResetDeviceCommandTrb { 548 reserved0: B32, 549 reserved1: B32, 550 reserved2: B32, 551 cycle: bool, 552 reserved3: B9, 553 trb_type: TrbType, 554 reserved4: B8, 555 slot_id: B8, 556 } 557 558 #[bitfield] 559 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 560 pub struct TransferEventTrb { 561 trb_pointer: B64, 562 trb_transfer_length: B24, 563 completion_code: TrbCompletionCode, 564 cycle: bool, 565 reserved0: B1, 566 event_data: B1, 567 reserved1: B7, 568 trb_type: TrbType, 569 endpoint_id: B5, 570 reserved2: B3, 571 slot_id: B8, 572 } 573 574 #[bitfield] 575 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 576 pub struct CommandCompletionEventTrb { 577 trb_pointer: B64, 578 command_completion_parameter: B24, 579 completion_code: TrbCompletionCode, 580 cycle: bool, 581 reserved: B9, 582 trb_type: TrbType, 583 vf_id: B8, 584 slot_id: B8, 585 } 586 587 #[bitfield] 588 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 589 pub struct PortStatusChangeEventTrb { 590 reserved0: B24, 591 port_id: B8, 592 reserved1: B32, 593 reserved2: B24, 594 completion_code: TrbCompletionCode, 595 cycle: bool, 596 reserved3: B9, 597 trb_type: TrbType, 598 reserved4: B16, 599 } 600 601 /// Associate real type of trb. 602 pub trait TypedTrb { 603 const TY: TrbType; 604 } 605 606 impl TypedTrb for Trb { 607 const TY: TrbType = TrbType::Reserved; 608 } 609 610 impl TypedTrb for NormalTrb { 611 const TY: TrbType = TrbType::Normal; 612 } 613 614 impl TypedTrb for SetupStageTrb { 615 const TY: TrbType = TrbType::SetupStage; 616 } 617 618 impl TypedTrb for DataStageTrb { 619 const TY: TrbType = TrbType::DataStage; 620 } 621 622 impl TypedTrb for StatusStageTrb { 623 const TY: TrbType = TrbType::StatusStage; 624 } 625 626 impl TypedTrb for IsochTrb { 627 const TY: TrbType = TrbType::Isoch; 628 } 629 630 impl TypedTrb for LinkTrb { 631 const TY: TrbType = TrbType::Link; 632 } 633 634 impl TypedTrb for EventDataTrb { 635 const TY: TrbType = TrbType::EventData; 636 } 637 638 impl TypedTrb for NoopTrb { 639 const TY: TrbType = TrbType::Noop; 640 } 641 642 impl TypedTrb for DisableSlotCommandTrb { 643 const TY: TrbType = TrbType::DisableSlotCommand; 644 } 645 646 impl TypedTrb for AddressDeviceCommandTrb { 647 const TY: TrbType = TrbType::AddressDeviceCommand; 648 } 649 650 impl TypedTrb for ConfigureEndpointCommandTrb { 651 const TY: TrbType = TrbType::ConfigureEndpointCommand; 652 } 653 654 impl TypedTrb for EvaluateContextCommandTrb { 655 const TY: TrbType = TrbType::EvaluateContextCommand; 656 } 657 658 impl TypedTrb for ResetEndpointCommandTrb { 659 const TY: TrbType = TrbType::ResetEndpointCommand; 660 } 661 662 impl TypedTrb for StopEndpointCommandTrb { 663 const TY: TrbType = TrbType::StopEndpointCommand; 664 } 665 666 impl TypedTrb for SetTRDequeuePointerCommandTrb { 667 const TY: TrbType = TrbType::SetTRDequeuePointerCommand; 668 } 669 670 impl TypedTrb for ResetDeviceCommandTrb { 671 const TY: TrbType = TrbType::ResetDeviceCommand; 672 } 673 674 impl TypedTrb for TransferEventTrb { 675 const TY: TrbType = TrbType::TransferEvent; 676 } 677 678 impl TypedTrb for CommandCompletionEventTrb { 679 const TY: TrbType = TrbType::CommandCompletionEvent; 680 } 681 682 impl TypedTrb for PortStatusChangeEventTrb { 683 const TY: TrbType = TrbType::PortStatusChangeEvent; 684 } 685 686 /// # Safety 687 /// 688 /// All trb structs have the same size. One trb could be safely casted to another, though the 689 /// values might be invalid. 690 pub unsafe trait TrbCast: 691 FromBytes + Immutable + IntoBytes + KnownLayout + TypedTrb 692 { cast<T: TrbCast>(&self) -> Result<&T>693 fn cast<T: TrbCast>(&self) -> Result<&T> { 694 zerocopy::Ref::<_, T>::from_bytes(self.as_bytes()) 695 .map_err(|_| Error::CannotCastTrb) 696 .map(zerocopy::Ref::into_ref) 697 } 698 cast_mut<T: TrbCast>(&mut self) -> Result<&mut T>699 fn cast_mut<T: TrbCast>(&mut self) -> Result<&mut T> { 700 zerocopy::Ref::<_, T>::from_bytes(self.as_mut_bytes()) 701 .map_err(|_| Error::CannotCastTrb) 702 .map(zerocopy::Ref::into_mut) 703 } 704 checked_cast<T: TrbCast>(&self) -> Result<&T>705 fn checked_cast<T: TrbCast>(&self) -> Result<&T> { 706 if self 707 .cast::<Trb>()? 708 .get_trb_type() 709 .map_err(Error::UnknownTrbType)? 710 != T::TY 711 { 712 return Err(Error::CannotCastTrb); 713 } 714 self.cast::<T>() 715 } 716 checked_mut_cast<T: TrbCast>(&mut self) -> Result<&mut T>717 fn checked_mut_cast<T: TrbCast>(&mut self) -> Result<&mut T> { 718 if self 719 .cast::<Trb>()? 720 .get_trb_type() 721 .map_err(Error::UnknownTrbType)? 722 != T::TY 723 { 724 return Err(Error::CannotCastTrb); 725 } 726 self.cast_mut::<T>() 727 } 728 } 729 730 // SAFETY: see safety comments for TrbCast 731 unsafe impl TrbCast for Trb {} 732 // SAFETY: see safety comments for TrbCast 733 unsafe impl TrbCast for NormalTrb {} 734 // SAFETY: see safety comments for TrbCast 735 unsafe impl TrbCast for SetupStageTrb {} 736 // SAFETY: see safety comments for TrbCast 737 unsafe impl TrbCast for DataStageTrb {} 738 // SAFETY: see safety comments for TrbCast 739 unsafe impl TrbCast for StatusStageTrb {} 740 // SAFETY: see safety comments for TrbCast 741 unsafe impl TrbCast for IsochTrb {} 742 // SAFETY: see safety comments for TrbCast 743 unsafe impl TrbCast for LinkTrb {} 744 // SAFETY: see safety comments for TrbCast 745 unsafe impl TrbCast for EventDataTrb {} 746 // SAFETY: see safety comments for TrbCast 747 unsafe impl TrbCast for NoopTrb {} 748 // SAFETY: see safety comments for TrbCast 749 unsafe impl TrbCast for DisableSlotCommandTrb {} 750 // SAFETY: see safety comments for TrbCast 751 unsafe impl TrbCast for AddressDeviceCommandTrb {} 752 // SAFETY: see safety comments for TrbCast 753 unsafe impl TrbCast for ConfigureEndpointCommandTrb {} 754 // SAFETY: see safety comments for TrbCast 755 unsafe impl TrbCast for EvaluateContextCommandTrb {} 756 // SAFETY: see safety comments for TrbCast 757 unsafe impl TrbCast for ResetEndpointCommandTrb {} 758 // SAFETY: see safety comments for TrbCast 759 unsafe impl TrbCast for StopEndpointCommandTrb {} 760 // SAFETY: see safety comments for TrbCast 761 unsafe impl TrbCast for SetTRDequeuePointerCommandTrb {} 762 // SAFETY: see safety comments for TrbCast 763 unsafe impl TrbCast for ResetDeviceCommandTrb {} 764 // SAFETY: see safety comments for TrbCast 765 unsafe impl TrbCast for TransferEventTrb {} 766 // SAFETY: see safety comments for TrbCast 767 unsafe impl TrbCast for CommandCompletionEventTrb {} 768 // SAFETY: see safety comments for TrbCast 769 unsafe impl TrbCast for PortStatusChangeEventTrb {} 770 771 #[bitfield] 772 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 773 pub struct EventRingSegmentTableEntry { 774 ring_segment_base_address: B64, 775 ring_segment_size: B16, 776 reserved2: B48, 777 } 778 779 #[bitfield] 780 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 781 pub struct InputControlContext { 782 // Xhci spec 6.2.5.1. 783 drop_context_flags: B32, 784 add_context_flags: B32, 785 reserved0: B32, 786 reserved1: B32, 787 reserved2: B32, 788 reserved3: B32, 789 reserved4: B32, 790 configuration_value: B8, 791 interface_number: B8, 792 alternate_setting: B8, 793 reserved5: B8, 794 } 795 796 impl InputControlContext { 797 /// Get drop context flag. drop_context_flag(&self, idx: u8) -> bool798 pub fn drop_context_flag(&self, idx: u8) -> bool { 799 (self.get_drop_context_flags() & (1 << idx)) != 0 800 } 801 802 /// Get add context flag. add_context_flag(&self, idx: u8) -> bool803 pub fn add_context_flag(&self, idx: u8) -> bool { 804 (self.get_add_context_flags() & (1 << idx)) != 0 805 } 806 } 807 808 // Size of device context entries (SlotContext and EndpointContext). 809 pub const DEVICE_CONTEXT_ENTRY_SIZE: usize = 32usize; 810 811 #[bitfield] 812 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 813 pub struct SlotContext { 814 route_string: B20, 815 speed: B4, 816 reserved1: B1, 817 mtt: B1, 818 hub: B1, 819 context_entries: B5, 820 max_exit_latency: B16, 821 root_hub_port_number: B8, 822 num_ports: B8, 823 tt_hub_slot_id: B8, 824 tt_port_number: B8, 825 tt_think_time: B2, 826 reserved2: B4, 827 interrupter_target: B10, 828 usb_device_address: B8, 829 reserved3: B19, 830 slot_state: DeviceSlotState, 831 reserved4: B32, 832 reserved5: B32, 833 reserved6: B32, 834 reserved7: B32, 835 } 836 837 #[bitfield] 838 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 839 pub struct EndpointContext { 840 endpoint_state: EndpointState, 841 reserved1: B5, 842 mult: B2, 843 max_primary_streams: B5, 844 linear_stream_array: B1, 845 interval: B8, 846 max_esit_payload_hi: B8, 847 reserved2: B1, 848 error_count: B2, 849 endpoint_type: B3, 850 reserved3: B1, 851 host_initiate_disable: B1, 852 max_burst_size: B8, 853 max_packet_size: B16, 854 dequeue_cycle_state: bool, 855 reserved4: B3, 856 tr_dequeue_pointer: DequeuePtr, 857 average_trb_length: B16, 858 max_esit_payload_lo: B16, 859 reserved5: B32, 860 reserved6: B32, 861 reserved7: B32, 862 } 863 864 #[bitfield] 865 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 866 pub struct StreamContext { 867 dequeue_cycle_state: bool, 868 stream_context_type: B3, 869 tr_dequeue_pointer: DequeuePtr, 870 stopped_edtla: B24, 871 reserved1: B8, 872 reserved2: B32, 873 } 874 875 #[repr(C)] 876 #[derive(Clone, Copy, Debug, FromBytes, Immutable, IntoBytes, KnownLayout)] 877 pub struct StreamContextArray { 878 pub stream_contexts: [StreamContext; 16], 879 } 880 881 /// Device context. 882 #[repr(C)] 883 #[derive(Clone, Copy, Debug, FromBytes, Immutable, IntoBytes, KnownLayout)] 884 pub struct DeviceContext { 885 pub slot_context: SlotContext, 886 pub endpoint_context: [EndpointContext; 31], 887 } 888 889 /// POD struct associates a TRB with its address in guest memory. This is 890 /// useful because transfer and command completion event TRBs must contain 891 /// pointers to the original TRB that generated the event. 892 #[derive(Clone, Copy, Debug, PartialEq, Eq)] 893 pub struct AddressedTrb { 894 pub trb: Trb, 895 pub gpa: u64, 896 } 897 898 pub type TransferDescriptor = Vec<AddressedTrb>; 899 900 #[cfg(test)] 901 mod tests { 902 use super::*; 903 904 #[test] check_struct_sizes()905 fn check_struct_sizes() { 906 assert_eq!(std::mem::size_of::<Trb>(), TRB_SIZE); 907 assert_eq!(std::mem::size_of::<NormalTrb>(), TRB_SIZE); 908 assert_eq!(std::mem::size_of::<SetupStageTrb>(), TRB_SIZE); 909 assert_eq!(std::mem::size_of::<DataStageTrb>(), TRB_SIZE); 910 assert_eq!(std::mem::size_of::<StatusStageTrb>(), TRB_SIZE); 911 assert_eq!(std::mem::size_of::<IsochTrb>(), TRB_SIZE); 912 assert_eq!(std::mem::size_of::<LinkTrb>(), TRB_SIZE); 913 assert_eq!(std::mem::size_of::<EventDataTrb>(), TRB_SIZE); 914 assert_eq!(std::mem::size_of::<NoopTrb>(), TRB_SIZE); 915 assert_eq!(std::mem::size_of::<DisableSlotCommandTrb>(), TRB_SIZE); 916 assert_eq!(std::mem::size_of::<AddressDeviceCommandTrb>(), TRB_SIZE); 917 assert_eq!(std::mem::size_of::<ConfigureEndpointCommandTrb>(), TRB_SIZE); 918 assert_eq!(std::mem::size_of::<EvaluateContextCommandTrb>(), TRB_SIZE); 919 assert_eq!(std::mem::size_of::<ResetEndpointCommandTrb>(), TRB_SIZE); 920 assert_eq!(std::mem::size_of::<StopEndpointCommandTrb>(), TRB_SIZE); 921 assert_eq!( 922 std::mem::size_of::<SetTRDequeuePointerCommandTrb>(), 923 TRB_SIZE 924 ); 925 assert_eq!(std::mem::size_of::<ResetDeviceCommandTrb>(), TRB_SIZE); 926 assert_eq!(std::mem::size_of::<TransferEventTrb>(), TRB_SIZE); 927 assert_eq!(std::mem::size_of::<CommandCompletionEventTrb>(), TRB_SIZE); 928 assert_eq!(std::mem::size_of::<PortStatusChangeEventTrb>(), TRB_SIZE); 929 930 assert_eq!( 931 std::mem::size_of::<EventRingSegmentTableEntry>(), 932 SEGMENT_TABLE_SIZE 933 ); 934 assert_eq!(std::mem::size_of::<InputControlContext>(), 32); 935 assert_eq!( 936 std::mem::size_of::<SlotContext>(), 937 DEVICE_CONTEXT_ENTRY_SIZE 938 ); 939 assert_eq!( 940 std::mem::size_of::<EndpointContext>(), 941 DEVICE_CONTEXT_ENTRY_SIZE 942 ); 943 assert_eq!( 944 std::mem::size_of::<DeviceContext>(), 945 32 * DEVICE_CONTEXT_ENTRY_SIZE 946 ); 947 } 948 } 949