• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2023 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::cmp;
6 use std::io::Read;
7 use std::io::Write;
8 
9 use base::warn;
10 use data_model::Be16;
11 use data_model::Be32;
12 use data_model::Be64;
13 use zerocopy::FromBytes;
14 use zerocopy::Immutable;
15 use zerocopy::IntoBytes;
16 use zerocopy::KnownLayout;
17 
18 use crate::virtio::scsi::constants::INQUIRY;
19 use crate::virtio::scsi::constants::MAINTENANCE_IN;
20 use crate::virtio::scsi::constants::MODE_SELECT_6;
21 use crate::virtio::scsi::constants::MODE_SENSE_6;
22 use crate::virtio::scsi::constants::READ_10;
23 use crate::virtio::scsi::constants::READ_6;
24 use crate::virtio::scsi::constants::READ_CAPACITY_10;
25 use crate::virtio::scsi::constants::READ_CAPACITY_16;
26 use crate::virtio::scsi::constants::REPORT_LUNS;
27 use crate::virtio::scsi::constants::REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS;
28 use crate::virtio::scsi::constants::SERVICE_ACTION_IN_16;
29 use crate::virtio::scsi::constants::SYNCHRONIZE_CACHE_10;
30 use crate::virtio::scsi::constants::TEST_UNIT_READY;
31 use crate::virtio::scsi::constants::TYPE_DISK;
32 use crate::virtio::scsi::constants::UNMAP;
33 use crate::virtio::scsi::constants::WRITE_10;
34 use crate::virtio::scsi::constants::WRITE_SAME_10;
35 use crate::virtio::scsi::constants::WRITE_SAME_16;
36 use crate::virtio::scsi::device::AsyncLogicalUnit;
37 use crate::virtio::scsi::device::ExecuteError;
38 use crate::virtio::Reader;
39 use crate::virtio::Writer;
40 
41 #[derive(Debug, PartialEq, Eq)]
42 pub enum Command {
43     TestUnitReady(TestUnitReady),
44     Read6(Read6),
45     Inquiry(Inquiry),
46     ModeSelect6(ModeSelect6),
47     ModeSense6(ModeSense6),
48     ReadCapacity10(ReadCapacity10),
49     ReadCapacity16(ReadCapacity16),
50     Read10(Read10),
51     Write10(Write10),
52     SynchronizeCache10(SynchronizeCache10),
53     WriteSame10(WriteSame10),
54     Unmap(Unmap),
55     WriteSame16(WriteSame16),
56     ReportLuns(ReportLuns),
57     ReportSupportedTMFs(ReportSupportedTMFs),
58 }
59 
60 impl Command {
new(cdb: &[u8]) -> Result<Self, ExecuteError>61     pub fn new(cdb: &[u8]) -> Result<Self, ExecuteError> {
62         let op = cdb[0];
63         match op {
64             TEST_UNIT_READY => Ok(Self::TestUnitReady(Self::parse_command(cdb)?)),
65             READ_6 => Ok(Self::Read6(Self::parse_command(cdb)?)),
66             INQUIRY => Ok(Self::Inquiry(Self::parse_command(cdb)?)),
67             MODE_SELECT_6 => Ok(Self::ModeSelect6(Self::parse_command(cdb)?)),
68             MODE_SENSE_6 => Ok(Self::ModeSense6(Self::parse_command(cdb)?)),
69             READ_CAPACITY_10 => Ok(Self::ReadCapacity10(Self::parse_command(cdb)?)),
70             READ_10 => Ok(Self::Read10(Self::parse_command(cdb)?)),
71             WRITE_10 => Ok(Self::Write10(Self::parse_command(cdb)?)),
72             SYNCHRONIZE_CACHE_10 => Ok(Self::SynchronizeCache10(Self::parse_command(cdb)?)),
73             WRITE_SAME_10 => Ok(Self::WriteSame10(Self::parse_command(cdb)?)),
74             UNMAP => Ok(Self::Unmap(Self::parse_command(cdb)?)),
75             WRITE_SAME_16 => Ok(Self::WriteSame16(Self::parse_command(cdb)?)),
76             SERVICE_ACTION_IN_16 => Self::parse_service_action_in_16(cdb),
77             REPORT_LUNS => Ok(Self::ReportLuns(Self::parse_command(cdb)?)),
78             MAINTENANCE_IN => Self::parse_maintenance_in(cdb),
79             _ => {
80                 warn!("SCSI command {:#x?} is not implemented", op);
81                 Err(ExecuteError::Unsupported(op))
82             }
83         }
84     }
85 
parse_command<T: FromBytes>(cdb: &[u8]) -> Result<T, ExecuteError>86     fn parse_command<T: FromBytes>(cdb: &[u8]) -> Result<T, ExecuteError> {
87         let (command, _) = T::read_from_prefix(cdb).map_err(|_| ExecuteError::ReadCommand)?;
88         Ok(command)
89     }
90 
parse_maintenance_in(cdb: &[u8]) -> Result<Self, ExecuteError>91     fn parse_maintenance_in(cdb: &[u8]) -> Result<Self, ExecuteError> {
92         // Top three bits are reserved.
93         let service_action = cdb[1] & 0x1f;
94         match service_action {
95             REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS => {
96                 Ok(Self::ReportSupportedTMFs(Self::parse_command(cdb)?))
97             }
98             _ => {
99                 warn!(
100                     "service action {:#x?} for MAINTENANCE_IN is not implemented",
101                     service_action
102                 );
103                 Err(ExecuteError::Unsupported(cdb[0]))
104             }
105         }
106     }
107 
parse_service_action_in_16(cdb: &[u8]) -> Result<Self, ExecuteError>108     fn parse_service_action_in_16(cdb: &[u8]) -> Result<Self, ExecuteError> {
109         // Top three bits are reserved.
110         let service_action = cdb[1] & 0x1f;
111         match service_action {
112             READ_CAPACITY_16 => Ok(Self::ReadCapacity16(Self::parse_command(cdb)?)),
113             _ => {
114                 warn!(
115                     "service action {:#x?} for SERVICE_ACTION_IN_16 is not implemented",
116                     service_action
117                 );
118                 Err(ExecuteError::Unsupported(cdb[0]))
119             }
120         }
121     }
122 
execute( &self, reader: &mut Reader, writer: &mut Writer, dev: &AsyncLogicalUnit, ) -> Result<(), ExecuteError>123     pub async fn execute(
124         &self,
125         reader: &mut Reader,
126         writer: &mut Writer,
127         dev: &AsyncLogicalUnit,
128     ) -> Result<(), ExecuteError> {
129         match self {
130             Self::TestUnitReady(_) => Ok(()), // noop as the device is ready.
131             Self::Read6(read6) => read6.emulate(writer, dev).await,
132             Self::Inquiry(inquiry) => inquiry.emulate(writer, dev),
133             Self::ModeSelect6(mode_select_6) => mode_select_6.emulate(reader, dev),
134             Self::ModeSense6(mode_sense_6) => mode_sense_6.emulate(writer, dev),
135             Self::ReadCapacity10(read_capacity_10) => read_capacity_10.emulate(writer, dev),
136             Self::ReadCapacity16(read_capacity_16) => read_capacity_16.emulate(writer, dev),
137             Self::Read10(read_10) => read_10.emulate(writer, dev).await,
138             Self::Write10(write_10) => write_10.emulate(reader, dev).await,
139             Self::SynchronizeCache10(synchronize_cache_10) => {
140                 synchronize_cache_10.emulate(dev).await
141             }
142             Self::WriteSame10(write_same_10) => write_same_10.emulate(reader, dev).await,
143             Self::Unmap(unmap) => unmap.emulate(reader, dev).await,
144             Self::WriteSame16(write_same_16) => write_same_16.emulate(reader, dev).await,
145             Self::ReportLuns(report_luns) => report_luns.emulate(writer),
146             Self::ReportSupportedTMFs(report_supported_tmfs) => {
147                 report_supported_tmfs.emulate(writer)
148             }
149         }
150     }
151 }
152 
153 #[derive(
154     Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
155 )]
156 #[repr(C, packed)]
157 pub struct TestUnitReady {
158     opcode: u8,
159     reserved: [u8; 4],
160     control: u8,
161 }
162 
check_lba_range(max_lba: u64, sector_num: u64, sector_len: usize) -> Result<(), ExecuteError>163 fn check_lba_range(max_lba: u64, sector_num: u64, sector_len: usize) -> Result<(), ExecuteError> {
164     // Checking `sector_num + sector_len - 1 <= max_lba`, but we are being careful about overflows
165     // and underflows.
166     match sector_num.checked_add(sector_len as u64) {
167         Some(v) if v <= max_lba + 1 => Ok(()),
168         _ => Err(ExecuteError::LbaOutOfRange {
169             length: sector_len,
170             sector: sector_num,
171             max_lba,
172         }),
173     }
174 }
175 
read_from_disk( writer: &mut Writer, dev: &AsyncLogicalUnit, xfer_blocks: usize, lba: u64, ) -> Result<(), ExecuteError>176 async fn read_from_disk(
177     writer: &mut Writer,
178     dev: &AsyncLogicalUnit,
179     xfer_blocks: usize,
180     lba: u64,
181 ) -> Result<(), ExecuteError> {
182     check_lba_range(dev.max_lba, lba, xfer_blocks)?;
183     let block_size = dev.block_size;
184     let count = xfer_blocks * block_size as usize;
185     let offset = lba * block_size as u64;
186     let before = writer.bytes_written();
187     writer
188         .write_all_from_at_fut(&*dev.disk_image, count, offset)
189         .await
190         .map_err(|desc_error| {
191             let resid = count - (writer.bytes_written() - before);
192             ExecuteError::ReadIo { resid, desc_error }
193         })
194 }
195 
196 #[derive(
197     Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
198 )]
199 #[repr(C, packed)]
200 pub struct Read6 {
201     opcode: u8,
202     lba_bytes: [u8; 3],
203     xfer_len_byte: u8,
204     control: u8,
205 }
206 
207 impl Read6 {
lba(&self) -> u32208     fn lba(&self) -> u32 {
209         u32::from_be_bytes([
210             0,
211             // The top three bits are reserved.
212             self.lba_bytes[0] & 0x1f,
213             self.lba_bytes[1],
214             self.lba_bytes[2],
215         ])
216     }
217 
xfer_len(&self) -> usize218     fn xfer_len(&self) -> usize {
219         // The transfer length set to 0 means 256 blocks should be read.
220         if self.xfer_len_byte == 0 {
221             256
222         } else {
223             self.xfer_len_byte as usize
224         }
225     }
226 
emulate( &self, writer: &mut Writer, dev: &AsyncLogicalUnit, ) -> Result<(), ExecuteError>227     async fn emulate(
228         &self,
229         writer: &mut Writer,
230         dev: &AsyncLogicalUnit,
231     ) -> Result<(), ExecuteError> {
232         let xfer_len = self.xfer_len();
233         let lba = self.lba() as u64;
234         let _trace = cros_tracing::trace_event!(VirtioScsi, "READ(6)", xfer_len, lba);
235         read_from_disk(writer, dev, xfer_len, lba).await
236     }
237 }
238 
239 #[derive(
240     Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
241 )]
242 #[repr(C, packed)]
243 pub struct Inquiry {
244     opcode: u8,
245     vpd_field: u8,
246     page_code: u8,
247     alloc_len_bytes: [u8; 2],
248     control: u8,
249 }
250 
251 impl Inquiry {
vital_product_data_enabled(&self) -> bool252     fn vital_product_data_enabled(&self) -> bool {
253         self.vpd_field & 0x1 != 0
254     }
255 
alloc_len(&self) -> usize256     fn alloc_len(&self) -> usize {
257         u16::from_be_bytes(self.alloc_len_bytes) as usize
258     }
259 
page_code(&self) -> u8260     fn page_code(&self) -> u8 {
261         self.page_code
262     }
263 
emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError>264     fn emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
265         let _trace = cros_tracing::trace_event!(VirtioScsi, "INQUIRY");
266         if self.vital_product_data_enabled() {
267             return self.emulate_vital_product_data_page(writer, dev);
268         }
269         // PAGE CODE should be 0 when vpd bit is 0.
270         if self.page_code() != 0 {
271             return Err(ExecuteError::InvalidField);
272         }
273         let alloc_len = self.alloc_len();
274         let mut outbuf = vec![0u8; cmp::max(writer.available_bytes(), alloc_len)];
275         // Peripheral
276         outbuf[0] = TYPE_DISK;
277         // Removable bit. We currently do not support removable SCSI devices.
278         outbuf[1] = 0x0;
279         // Version 0x5 indicates that the device complies to SPC-3.
280         outbuf[2] = 0x5;
281         // Hierarchical Support | Response Data Format
282         // Support hierarchical addressing mode to assign LUNs to logical units.
283         // Response Data Format should be 2.
284         outbuf[3] = 0x10 | 0x2;
285         // Additional Length
286         outbuf[4] = {
287             let buflen = outbuf.len().try_into().unwrap_or(u8::MAX);
288             // We will write at least 36 bytes and this is the 5th byte.
289             cmp::max(buflen, 36) - 5
290         };
291         // Cmdque: support full task management mode
292         outbuf[7] = 0x2;
293         // Vendor
294         Self::fill_left_aligned_ascii(&mut outbuf[8..16], "CROSVM");
295         // Product ID
296         Self::fill_left_aligned_ascii(&mut outbuf[16..32], "CROSVM HARDDISK");
297         // Product revision level
298         Self::fill_left_aligned_ascii(&mut outbuf[32..36], "0.1");
299 
300         writer
301             .write_all(&outbuf[..alloc_len])
302             .map_err(ExecuteError::Write)
303     }
304 
emulate_vital_product_data_page( &self, writer: &mut Writer, dev: &AsyncLogicalUnit, ) -> Result<(), ExecuteError>305     fn emulate_vital_product_data_page(
306         &self,
307         writer: &mut Writer,
308         dev: &AsyncLogicalUnit,
309     ) -> Result<(), ExecuteError> {
310         let alloc_len = self.alloc_len();
311         let mut outbuf = vec![0u8; cmp::max(4096, alloc_len)];
312         // Peripheral
313         outbuf[0] = TYPE_DISK;
314         let page_code = self.page_code();
315         outbuf[1] = page_code;
316         match page_code {
317             // Supported VPD Pages
318             0x00 => {
319                 // outbuf[2] byte is reserved.
320                 // 0x00: Supported VPD Pages (this command)
321                 // 0x83: Device Identification
322                 // 0xb0: Block Limits
323                 // 0xb2: Logical Block Provisioning
324                 const SUPPORTED_VPD_PAGE_CODES: [u8; 4] = [0x00, 0x83, 0xb0, 0xb2];
325                 let page_code_len: u8 = SUPPORTED_VPD_PAGE_CODES
326                     .len()
327                     .try_into()
328                     .expect("The number of vpd page codes cannot exceed u8::MAX");
329                 // Page legth
330                 outbuf[3] = page_code_len;
331                 outbuf[4..4 + page_code_len as usize].copy_from_slice(&SUPPORTED_VPD_PAGE_CODES);
332             }
333             // Device Identification
334             0x83 => {
335                 const DEVICE_ID: &[u8] = b"CROSVM SCSI DEVICE";
336                 let device_id_len: u8 = DEVICE_ID
337                     .len()
338                     .try_into()
339                     .expect("device id should be shorter");
340                 // Page length: An identification descriptor will be 4 bytes followed by an id.
341                 outbuf[2..4].copy_from_slice(&(4 + device_id_len as u16).to_be_bytes());
342                 // ASCII
343                 outbuf[4] = 0x2;
344                 // ASSOCIATION | IDENTIFICATION_TYPE_FIELD
345                 // ASSOCIATION: device_id is associated with the addressed logical unit.
346                 // IDENTIFICATION_TYPE_FIELD: vendor specific
347                 // outbuf[5] = 0x0 | 0x0;
348                 // outbuf[6] byte is reserved.
349                 outbuf[7] = device_id_len;
350                 outbuf[8..8 + device_id_len as usize].copy_from_slice(DEVICE_ID);
351             }
352             // Block Limits
353             0xb0 => {
354                 // Page length
355                 outbuf[3] = 0x3c;
356                 // We do not support a value of zero in the NUMBER OF LOGICAL BLOCKS field in the
357                 // WRITE SAME command CDBs.
358                 outbuf[4] = 1;
359                 // skip outbuf[5]: crosvm does not support the COMPARE AND WRITE command.
360                 // Maximum transfer length
361                 outbuf[8..12]
362                     .copy_from_slice(&dev.max_lba.try_into().unwrap_or(u32::MAX).to_be_bytes());
363                 // Maximum unmap LBA count
364                 outbuf[20..24].fill(0xff);
365                 // Maximum unmap block descriptor count
366                 outbuf[24..28].fill(0xff);
367                 // Optimal unmap granularity
368                 outbuf[28..32].copy_from_slice(&128u32.to_be_bytes());
369                 // Maximum WRITE SAME length
370                 outbuf[36..44].copy_from_slice(&dev.max_lba.to_be_bytes());
371             }
372             // Logical Block Provisioning
373             0xb2 => {
374                 // Page length
375                 outbuf[3] = 4;
376                 // skip outbuf[4]: crosvm does not support logical block provisioning threshold
377                 // sets.
378                 const UNMAP: u8 = 1 << 7;
379                 const WRITE_SAME_16: u8 = 1 << 6;
380                 const WRITE_SAME_10: u8 = 1 << 5;
381                 outbuf[5] = UNMAP | WRITE_SAME_10 | WRITE_SAME_16;
382                 // The logical unit is thin-provisioned.
383                 outbuf[6] = 0x02;
384                 // skip outbuf[7]: The logical block data represented by unmapped LBAs is vendor
385                 // specific
386             }
387             _ => {
388                 warn!("unsupported vpd page code: {:#x?}", page_code);
389                 return Err(ExecuteError::InvalidField);
390             }
391         };
392         writer
393             .write_all(&outbuf[..alloc_len])
394             .map_err(ExecuteError::Write)
395     }
396 
fill_left_aligned_ascii(buf: &mut [u8], s: &str)397     fn fill_left_aligned_ascii(buf: &mut [u8], s: &str) {
398         debug_assert!(s.len() < buf.len());
399         buf[..s.len()].copy_from_slice(s.as_bytes());
400         buf[s.len()..].fill(b' ');
401     }
402 }
403 
404 // Fill in the information of the page code and return the number of bytes written to the buffer.
fill_mode_page( page_code: u8, subpage_code: u8, page_control: PageControl, outbuf: &mut [u8], ) -> Option<u8>405 fn fill_mode_page(
406     page_code: u8,
407     subpage_code: u8,
408     page_control: PageControl,
409     outbuf: &mut [u8],
410 ) -> Option<u8> {
411     // outbuf[0]: page code
412     // outbuf[1]: page length
413     match (page_code, subpage_code) {
414         // Vendor specific.
415         (0x00, 0x00) => None,
416         // Read-Write error recovery mode page
417         (0x01, 0x00) => {
418             const LEN: u8 = 10;
419             outbuf[0] = page_code;
420             outbuf[1] = LEN;
421             if page_control != PageControl::Changable {
422                 // Automatic write reallocation enabled.
423                 outbuf[3] = 0x80;
424             }
425             Some(LEN + 2)
426         }
427         // Caching.
428         (0x08, 0x00) => {
429             const LEN: u8 = 0x12;
430             outbuf[0] = page_code;
431             outbuf[1] = LEN;
432             if page_control != PageControl::Changable {
433                 // Writeback cache enabled.
434                 outbuf[2] = 0x04;
435             }
436             Some(LEN + 2)
437         }
438         _ => None,
439     }
440 }
441 
442 // According to the spec, devices that implement MODE SENSE(6) shall also implement MODE SELECT(6)
443 // as well.
444 #[derive(
445     Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
446 )]
447 #[repr(C, packed)]
448 pub struct ModeSelect6 {
449     opcode: u8,
450     pf_sp_field: u8,
451     _reserved: [u8; 2],
452     param_list_len: u8,
453     control: u8,
454 }
455 
456 impl ModeSelect6 {
is_valid_pf_and_sp(&self) -> bool457     fn is_valid_pf_and_sp(&self) -> bool {
458         // crosvm only support page format bit = 1 and saved pages bit = 0
459         self.pf_sp_field & 0x11 == 0x10
460     }
461 
emulate(&self, reader: &mut Reader, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError>462     fn emulate(&self, reader: &mut Reader, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
463         #[derive(
464             Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
465         )]
466         #[repr(C, packed)]
467         struct BlockDescriptor {
468             _density: u8,
469             _number_of_blocks_field: [u8; 3],
470             _reserved: u8,
471             block_len_field: [u8; 3],
472         }
473 
474         impl BlockDescriptor {
475             fn block_len(&self) -> u32 {
476                 u32::from_be_bytes([
477                     0,
478                     self.block_len_field[0],
479                     self.block_len_field[1],
480                     self.block_len_field[2],
481                 ])
482             }
483         }
484 
485         let _trace = cros_tracing::trace_event!(VirtioScsi, "MODE_SELECT(6)");
486         if !self.is_valid_pf_and_sp() {
487             return Err(ExecuteError::InvalidField);
488         }
489         // Values for the mode parameter header.
490         let [_mode_data_len, medium_type, _dev_param, block_desc_len] =
491             reader.read_obj::<[u8; 4]>().map_err(ExecuteError::Read)?;
492         if medium_type != TYPE_DISK {
493             return Err(ExecuteError::InvalidField);
494         }
495         match block_desc_len {
496             0 => (),
497             8 => {
498                 let block_desc = reader
499                     .read_obj::<BlockDescriptor>()
500                     .map_err(ExecuteError::Read)?;
501                 // crosvm currently does not support modifying the block size.
502                 if block_desc.block_len() != dev.block_size {
503                     return Err(ExecuteError::InvalidField);
504                 }
505             }
506             // crosvm does not support 2 or more block descriptors, hence block_desc_len other than
507             // 0 and 8 is considered invalid.
508             _ => return Err(ExecuteError::InvalidField),
509         };
510         while reader.available_bytes() > 0 {
511             Self::handle_mode_page(reader)?;
512         }
513         Ok(())
514     }
515 
handle_mode_page(reader: &mut Reader) -> Result<(), ExecuteError>516     fn handle_mode_page(reader: &mut Reader) -> Result<(), ExecuteError> {
517         #[derive(
518             Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
519         )]
520         #[repr(C, packed)]
521         struct Page0Header {
522             page_code: u8,
523             page_len: u8,
524         }
525 
526         #[derive(
527             Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
528         )]
529         #[repr(C, packed)]
530         struct SubpageHeader {
531             page_code: u8,
532             subpage_code: u8,
533             page_len_field: [u8; 2],
534         }
535 
536         let is_page0 = reader.peek_obj::<u8>().map_err(ExecuteError::Read)? & 0x40 == 0;
537         let (page_code, subpage_code, page_len) = if is_page0 {
538             let header = reader
539                 .read_obj::<Page0Header>()
540                 .map_err(ExecuteError::Read)?;
541             (header.page_code, 0, header.page_len as u16)
542         } else {
543             let header = reader
544                 .read_obj::<SubpageHeader>()
545                 .map_err(ExecuteError::Read)?;
546             (
547                 header.page_code,
548                 header.subpage_code,
549                 u16::from_be_bytes(header.page_len_field),
550             )
551         };
552         let mut outbuf = vec![0; page_len as usize];
553         fill_mode_page(page_code, subpage_code, PageControl::Current, &mut outbuf);
554         let mut input = vec![0; page_len as usize];
555         reader.read_exact(&mut input).map_err(ExecuteError::Read)?;
556         // crosvm does not allow any values to be changed.
557         if input == outbuf {
558             Ok(())
559         } else {
560             Err(ExecuteError::InvalidField)
561         }
562     }
563 }
564 
565 #[derive(
566     Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
567 )]
568 #[repr(C, packed)]
569 pub struct ModeSense6 {
570     opcode: u8,
571     dbd_field: u8,
572     page_control_and_page_code: u8,
573     subpage_code: u8,
574     alloc_len: u8,
575     control: u8,
576 }
577 
578 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
579 enum PageControl {
580     Current,
581     Default,
582     Changable,
583 }
584 
585 impl ModeSense6 {
alloc_len(&self) -> usize586     fn alloc_len(&self) -> usize {
587         self.alloc_len as usize
588     }
589 
disable_block_desc(&self) -> bool590     fn disable_block_desc(&self) -> bool {
591         self.dbd_field & 0x8 != 0
592     }
593 
page_code(&self) -> u8594     fn page_code(&self) -> u8 {
595         // The top two bits represents page control field, and the rest is page code.
596         self.page_control_and_page_code & 0x3f
597     }
598 
page_control(&self) -> Result<PageControl, ExecuteError>599     fn page_control(&self) -> Result<PageControl, ExecuteError> {
600         match self.page_control_and_page_code >> 6 {
601             0 => Ok(PageControl::Current),
602             1 => Ok(PageControl::Changable),
603             2 => Ok(PageControl::Default),
604             3 => Err(ExecuteError::SavingParamNotSupported),
605             _ => Err(ExecuteError::InvalidField),
606         }
607     }
608 
subpage_code(&self) -> u8609     fn subpage_code(&self) -> u8 {
610         self.subpage_code
611     }
612 
emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError>613     fn emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
614         let _trace = cros_tracing::trace_event!(VirtioScsi, "MODE_SENSE(6)");
615         let alloc_len = self.alloc_len();
616         let mut outbuf = vec![0u8; cmp::max(4096, alloc_len)];
617         // outbuf[0]: Represents data length. Will be filled later.
618         // outbuf[1]: Medium type should be 0.
619 
620         // Device specific parameter
621         // We do not support the disabled page out (DPO) and forced unit access (FUA) bit.
622         outbuf[2] = if dev.read_only { 0x80 } else { 0x00 };
623         let mut idx = if !self.disable_block_desc() && dev.max_lba > 0 {
624             // Block descriptor length.
625             outbuf[3] = 8;
626             // outbuf[4]: Density code is 0.
627             let sectors = dev.max_lba;
628             // Fill in the number of sectors if not bigger than 0xffffff, leave it with 0
629             // otherwise.
630             if sectors <= 0xffffff {
631                 outbuf[5..8].copy_from_slice(&(sectors as u32).to_be_bytes()[1..]);
632             }
633             // outbuf[8]: reserved.
634             outbuf[9..12].copy_from_slice(&dev.block_size.to_be_bytes()[1..]);
635             12
636         } else {
637             4
638         };
639 
640         let page_control = self.page_control()?;
641         let page_code = self.page_code();
642         let subpage_code = self.subpage_code();
643         // The pair of the page code and the subpage code specifies which mode pages and subpages
644         // to return. Refer to the Table 99 in the SPC-3 spec for more details:
645         // <https://www.t10.org/cgi-bin/ac.pl?t=f&f=spc3r23.pdf>
646         match (page_code, subpage_code) {
647             // Return all mode pages with subpage 0.
648             (0x3f, 0x00) => {
649                 Self::add_all_page_codes(subpage_code, page_control, &mut outbuf, &mut idx)
650             }
651             // Return all mode pages with subpages 0x00-0xfe.
652             (0x3f, 0xff) => {
653                 for subpage_code in 0..0xff {
654                     Self::add_all_page_codes(subpage_code, page_control, &mut outbuf, &mut idx)
655                 }
656             }
657             // subpage_code other than 0x00 or 0xff are reserved.
658             (0x3f, _) => return Err(ExecuteError::InvalidField),
659             // Return a specific mode page with subpages 0x00-0xfe.
660             (_, 0xff) => {
661                 for subpage_code in 0..0xff {
662                     match fill_mode_page(
663                         page_code,
664                         subpage_code,
665                         page_control,
666                         &mut outbuf[idx as usize..],
667                     ) {
668                         Some(n) => idx += n,
669                         None => return Err(ExecuteError::InvalidField),
670                     };
671                 }
672             }
673             (_, _) => {
674                 match fill_mode_page(
675                     page_code,
676                     subpage_code,
677                     page_control,
678                     &mut outbuf[idx as usize..],
679                 ) {
680                     Some(n) => idx += n,
681                     None => return Err(ExecuteError::InvalidField),
682                 };
683             }
684         };
685         outbuf[0] = idx - 1;
686         writer
687             .write_all(&outbuf[..alloc_len])
688             .map_err(ExecuteError::Write)
689     }
690 
691     // Fill in mode pages with a specific subpage_code.
add_all_page_codes( subpage_code: u8, page_control: PageControl, outbuf: &mut [u8], idx: &mut u8, )692     fn add_all_page_codes(
693         subpage_code: u8,
694         page_control: PageControl,
695         outbuf: &mut [u8],
696         idx: &mut u8,
697     ) {
698         for page_code in 1..0x3f {
699             if let Some(n) = fill_mode_page(
700                 page_code,
701                 subpage_code,
702                 page_control,
703                 &mut outbuf[*idx as usize..],
704             ) {
705                 *idx += n;
706             }
707         }
708         // Add mode page 0 after all other mode pages were returned.
709         if let Some(n) = fill_mode_page(0, subpage_code, page_control, &mut outbuf[*idx as usize..])
710         {
711             *idx += n;
712         }
713     }
714 }
715 
716 #[derive(
717     Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
718 )]
719 #[repr(C, packed)]
720 pub struct ReadCapacity10 {
721     opcode: u8,
722     _obsolete1: u8,
723     _obsolete2: [u8; 4],
724     _reserved: [u8; 2],
725     _obsolete3: u8,
726     control: u8,
727 }
728 
729 impl ReadCapacity10 {
emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError>730     fn emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
731         // Returned value is the block address of the last sector.
732         // If the block address exceeds u32::MAX, we return u32::MAX.
733         let block_address: u32 = dev.max_lba.saturating_sub(1).try_into().unwrap_or(u32::MAX);
734         let mut outbuf = [0u8; 8];
735         outbuf[..4].copy_from_slice(&block_address.to_be_bytes());
736         outbuf[4..8].copy_from_slice(&dev.block_size.to_be_bytes());
737         writer.write_all(&outbuf).map_err(ExecuteError::Write)
738     }
739 }
740 
741 #[derive(
742     Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
743 )]
744 #[repr(C, packed)]
745 pub struct ReadCapacity16 {
746     opcode: u8,
747     service_action_field: u8,
748     _obsolete: [u8; 8],
749     alloc_len_bytes: [u8; 4],
750     _reserved: u8,
751     control: u8,
752 }
753 
754 impl ReadCapacity16 {
emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError>755     fn emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
756         let _trace = cros_tracing::trace_event!(VirtioScsi, "READ_CAPACITY(16)");
757         let mut outbuf = [0u8; 32];
758         // Last logical block address
759         outbuf[..8].copy_from_slice(&dev.max_lba.saturating_sub(1).to_be_bytes());
760         // Block size
761         outbuf[8..12].copy_from_slice(&dev.block_size.to_be_bytes());
762         // crosvm implements logical block provisioning management.
763         outbuf[14] = 1 << 7;
764         writer.write_all(&outbuf).map_err(ExecuteError::Write)
765     }
766 }
767 
768 #[derive(
769     Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
770 )]
771 #[repr(C, packed)]
772 pub struct Read10 {
773     opcode: u8,
774     rdprotect: u8,
775     lba_bytes: [u8; 4],
776     group_number: u8,
777     xfer_len_bytes: [u8; 2],
778     control: u8,
779 }
780 
781 impl Read10 {
xfer_len(&self) -> usize782     fn xfer_len(&self) -> usize {
783         u16::from_be_bytes(self.xfer_len_bytes) as usize
784     }
785 
lba(&self) -> u64786     fn lba(&self) -> u64 {
787         u32::from_be_bytes(self.lba_bytes) as u64
788     }
789 
emulate( &self, writer: &mut Writer, dev: &AsyncLogicalUnit, ) -> Result<(), ExecuteError>790     async fn emulate(
791         &self,
792         writer: &mut Writer,
793         dev: &AsyncLogicalUnit,
794     ) -> Result<(), ExecuteError> {
795         let xfer_len = self.xfer_len();
796         let lba = self.lba();
797         let _trace = cros_tracing::trace_event!(VirtioScsi, "READ(10)", lba, xfer_len);
798         read_from_disk(writer, dev, xfer_len, lba).await
799     }
800 }
801 
802 #[derive(
803     Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
804 )]
805 #[repr(C, packed)]
806 pub struct Write10 {
807     opcode: u8,
808     wrprotect: u8,
809     lba_bytes: [u8; 4],
810     group_number: u8,
811     xfer_len_bytes: [u8; 2],
812     control: u8,
813 }
814 
815 impl Write10 {
lba(&self) -> u64816     fn lba(&self) -> u64 {
817         u32::from_be_bytes(self.lba_bytes) as u64
818     }
819 
xfer_len(&self) -> usize820     fn xfer_len(&self) -> usize {
821         u16::from_be_bytes(self.xfer_len_bytes) as usize
822     }
823 
emulate( &self, reader: &mut Reader, dev: &AsyncLogicalUnit, ) -> Result<(), ExecuteError>824     async fn emulate(
825         &self,
826         reader: &mut Reader,
827         dev: &AsyncLogicalUnit,
828     ) -> Result<(), ExecuteError> {
829         let xfer_len = self.xfer_len();
830         let lba = self.lba();
831         let _trace = cros_tracing::trace_event!(VirtioScsi, "WRITE(10)", lba, xfer_len);
832         write_to_disk(reader, dev, xfer_len, lba).await
833     }
834 }
835 
write_to_disk( reader: &mut Reader, dev: &AsyncLogicalUnit, xfer_blocks: usize, lba: u64, ) -> Result<(), ExecuteError>836 async fn write_to_disk(
837     reader: &mut Reader,
838     dev: &AsyncLogicalUnit,
839     xfer_blocks: usize,
840     lba: u64,
841 ) -> Result<(), ExecuteError> {
842     if dev.read_only {
843         return Err(ExecuteError::ReadOnly);
844     }
845     check_lba_range(dev.max_lba, lba, xfer_blocks)?;
846     let block_size = dev.block_size;
847     let count = xfer_blocks * block_size as usize;
848     let offset = lba * block_size as u64;
849     let before = reader.bytes_read();
850     reader
851         .read_exact_to_at_fut(&*dev.disk_image, count, offset)
852         .await
853         .map_err(|desc_error| {
854             let resid = count - (reader.bytes_read() - before);
855             ExecuteError::WriteIo { resid, desc_error }
856         })
857 }
858 
859 #[derive(
860     Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
861 )]
862 #[repr(C, packed)]
863 pub struct SynchronizeCache10 {
864     opcode: u8,
865     immed_byte: u8,
866     lba_bytes: [u8; 4],
867     group_number: u8,
868     block_num_bytes: [u8; 2],
869     control: u8,
870 }
871 
872 impl SynchronizeCache10 {
emulate(&self, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError>873     async fn emulate(&self, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
874         let _trace = cros_tracing::trace_event!(VirtioScsi, "SYNCHRONIZE_CACHE(10)");
875         if dev.read_only {
876             return Err(ExecuteError::ReadOnly);
877         }
878         dev.disk_image.fdatasync().await.map_err(|e| {
879             warn!("failed to sync: {e}");
880             ExecuteError::SynchronizationError
881         })
882     }
883 }
884 
unmap(dev: &AsyncLogicalUnit, lba: u64, nblocks: u64) -> Result<(), ExecuteError>885 async fn unmap(dev: &AsyncLogicalUnit, lba: u64, nblocks: u64) -> Result<(), ExecuteError> {
886     check_lba_range(dev.max_lba, lba, nblocks as usize)?;
887     let offset = lba * dev.block_size as u64;
888     let length = nblocks * dev.block_size as u64;
889     // Ignore the errors here since the device is not strictly required to unmap the LBAs.
890     let _ = dev.disk_image.punch_hole(offset, length).await;
891     Ok(())
892 }
893 
write_same( dev: &AsyncLogicalUnit, lba: u64, nblocks: u64, reader: &mut Reader, ) -> Result<(), ExecuteError>894 async fn write_same(
895     dev: &AsyncLogicalUnit,
896     lba: u64,
897     nblocks: u64,
898     reader: &mut Reader,
899 ) -> Result<(), ExecuteError> {
900     check_lba_range(dev.max_lba, lba, nblocks as usize)?;
901     // The WRITE SAME command expects the device to transfer a single logical block from the
902     // Data-Out buffer.
903     reader.split_at(dev.block_size as usize);
904     if reader.get_remaining().iter().all(|s| s.is_all_zero()) {
905         let block_size = dev.block_size as u64;
906         // Ignore the errors here since the device is not strictly required to unmap the LBAs.
907         let _ = dev
908             .disk_image
909             .write_zeroes_at(lba * block_size, nblocks * block_size)
910             .await;
911         Ok(())
912     } else {
913         // TODO(b/309376528): If the specified data is not zero, raise error for now.
914         Err(ExecuteError::InvalidField)
915     }
916 }
917 
918 #[derive(
919     Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
920 )]
921 #[repr(C, packed)]
922 pub struct WriteSame10 {
923     opcode: u8,
924     wrprotect_anchor_unmap: u8,
925     lba_bytes: [u8; 4],
926     group_number_field: u8,
927     nblocks_bytes: [u8; 2],
928     control: u8,
929 }
930 
931 impl WriteSame10 {
lba(&self) -> u32932     fn lba(&self) -> u32 {
933         u32::from_be_bytes(self.lba_bytes)
934     }
935 
nblocks(&self) -> u16936     fn nblocks(&self) -> u16 {
937         u16::from_be_bytes(self.nblocks_bytes)
938     }
939 
unmap(&self) -> bool940     fn unmap(&self) -> bool {
941         self.wrprotect_anchor_unmap & 0x8 != 0
942     }
943 
anchor(&self) -> bool944     fn anchor(&self) -> bool {
945         self.wrprotect_anchor_unmap & 0x10 != 0
946     }
947 
emulate( &self, reader: &mut Reader, dev: &AsyncLogicalUnit, ) -> Result<(), ExecuteError>948     async fn emulate(
949         &self,
950         reader: &mut Reader,
951         dev: &AsyncLogicalUnit,
952     ) -> Result<(), ExecuteError> {
953         let lba = self.lba() as u64;
954         let nblocks = self.nblocks() as u64;
955         let _trace = cros_tracing::trace_event!(VirtioScsi, "WRITE_SAME(10)", lba, nblocks);
956         if dev.read_only {
957             return Err(ExecuteError::ReadOnly);
958         }
959         if nblocks == 0 {
960             // crosvm does not allow the number of blocks to be zero.
961             return Err(ExecuteError::InvalidField);
962         }
963         if self.anchor() {
964             // crosvm currently does not support anchor operations.
965             return Err(ExecuteError::InvalidField);
966         }
967         if self.unmap() {
968             unmap(dev, lba, nblocks).await
969         } else {
970             write_same(dev, lba, nblocks, reader).await
971         }
972     }
973 }
974 
975 #[derive(
976     Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
977 )]
978 #[repr(C, packed)]
979 pub struct Unmap {
980     opcode: u8,
981     anchor_field: u8,
982     _reserved: [u8; 4],
983     group_number_field: u8,
984     param_list_len_bytes: [u8; 2],
985     control: u8,
986 }
987 
988 impl Unmap {
anchor(&self) -> bool989     fn anchor(&self) -> bool {
990         self.anchor_field & 0x01 != 0
991     }
992 
param_list_len(&self) -> u16993     fn param_list_len(&self) -> u16 {
994         u16::from_be_bytes(self.param_list_len_bytes)
995     }
996 
emulate( &self, reader: &mut Reader, dev: &AsyncLogicalUnit, ) -> Result<(), ExecuteError>997     async fn emulate(
998         &self,
999         reader: &mut Reader,
1000         dev: &AsyncLogicalUnit,
1001     ) -> Result<(), ExecuteError> {
1002         let _trace = cros_tracing::trace_event!(VirtioScsi, "UNMAP");
1003         // Reject anchor == 1
1004         if self.anchor() {
1005             return Err(ExecuteError::InvalidField);
1006         }
1007         if dev.read_only {
1008             return Err(ExecuteError::ReadOnly);
1009         }
1010         let param_list_len = self.param_list_len();
1011         if 0 < param_list_len && param_list_len < 8 {
1012             return Err(ExecuteError::InvalidParamLen);
1013         }
1014         // unmap data len
1015         reader.consume(2);
1016         let unmap_block_descriptors = {
1017             let block_data_len = reader
1018                 .read_obj::<Be16>()
1019                 .map_err(ExecuteError::Read)?
1020                 .to_native();
1021             // If the data length is not a multiple of 16, the last unmap block should be ignored.
1022             block_data_len / 16
1023         };
1024         // reserved
1025         reader.consume(4);
1026         for _ in 0..unmap_block_descriptors {
1027             let lba = reader
1028                 .read_obj::<Be64>()
1029                 .map_err(ExecuteError::Read)?
1030                 .to_native();
1031             let nblocks = reader
1032                 .read_obj::<Be32>()
1033                 .map_err(ExecuteError::Read)?
1034                 .to_native() as u64;
1035             // reserved
1036             reader.consume(4);
1037             unmap(dev, lba, nblocks).await?;
1038         }
1039         Ok(())
1040     }
1041 }
1042 
1043 #[derive(
1044     Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
1045 )]
1046 #[repr(C, packed)]
1047 pub struct WriteSame16 {
1048     opcode: u8,
1049     wrprotect_anchor_unmap: u8,
1050     lba_bytes: [u8; 8],
1051     nblocks_bytes: [u8; 4],
1052     group_number_field: u8,
1053     control: u8,
1054 }
1055 
1056 impl WriteSame16 {
lba(&self) -> u641057     fn lba(&self) -> u64 {
1058         u64::from_be_bytes(self.lba_bytes)
1059     }
1060 
nblocks(&self) -> u321061     fn nblocks(&self) -> u32 {
1062         u32::from_be_bytes(self.nblocks_bytes)
1063     }
1064 
unmap(&self) -> bool1065     fn unmap(&self) -> bool {
1066         self.wrprotect_anchor_unmap & 0x8 != 0
1067     }
1068 
anchor(&self) -> bool1069     fn anchor(&self) -> bool {
1070         self.wrprotect_anchor_unmap & 0x10 != 0
1071     }
1072 
emulate( &self, reader: &mut Reader, dev: &AsyncLogicalUnit, ) -> Result<(), ExecuteError>1073     async fn emulate(
1074         &self,
1075         reader: &mut Reader,
1076         dev: &AsyncLogicalUnit,
1077     ) -> Result<(), ExecuteError> {
1078         let lba = self.lba();
1079         let nblocks = self.nblocks() as u64;
1080         let _trace = cros_tracing::trace_event!(VirtioScsi, "WRITE_SAME(16)", lba, nblocks);
1081         if nblocks == 0 {
1082             // crosvm does not allow the number of blocks to be zero.
1083             return Err(ExecuteError::InvalidField);
1084         }
1085         if self.anchor() {
1086             // crosvm currently does not support anchor operations.
1087             return Err(ExecuteError::InvalidField);
1088         }
1089         if self.unmap() {
1090             unmap(dev, lba, nblocks).await
1091         } else {
1092             write_same(dev, lba, nblocks, reader).await
1093         }
1094     }
1095 }
1096 
1097 #[derive(
1098     Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
1099 )]
1100 #[repr(C, packed)]
1101 pub struct ReportLuns {
1102     opcode: u8,
1103     _reserved: u8,
1104     select_report: u8,
1105     _reserved2: [u8; 3],
1106     alloc_len_bytes: [u8; 4],
1107     _reserved3: u8,
1108     control: u8,
1109 }
1110 
1111 impl ReportLuns {
alloc_len(&self) -> usize1112     fn alloc_len(&self) -> usize {
1113         u32::from_be_bytes(self.alloc_len_bytes) as usize
1114     }
1115 
emulate(&self, writer: &mut Writer) -> Result<(), ExecuteError>1116     fn emulate(&self, writer: &mut Writer) -> Result<(), ExecuteError> {
1117         let _trace = cros_tracing::trace_event!(VirtioScsi, "REPORT_LUNS");
1118         // We need at least 16 bytes.
1119         if self.alloc_len() < 16 {
1120             return Err(ExecuteError::InvalidField);
1121         }
1122         // Each LUN takes 8 bytes and we only support LUN0.
1123         let lun_list_len = 8u32;
1124         writer
1125             .write_all(&lun_list_len.to_be_bytes())
1126             .map_err(ExecuteError::Write)?;
1127         let reserved = [0; 4];
1128         writer.write_all(&reserved).map_err(ExecuteError::Write)?;
1129         let lun0 = 0u64;
1130         writer
1131             .write_all(&lun0.to_be_bytes())
1132             .map_err(ExecuteError::Write)
1133     }
1134 }
1135 
1136 #[derive(
1137     Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
1138 )]
1139 #[repr(C, packed)]
1140 pub struct ReportSupportedTMFs {
1141     opcode: u8,
1142     service_action_field: u8,
1143     _reserved1: [u8; 4],
1144     alloc_len_bytes: [u8; 4],
1145     _reserved2: u8,
1146     control: u8,
1147 }
1148 
1149 impl ReportSupportedTMFs {
alloc_len(&self) -> u321150     fn alloc_len(&self) -> u32 {
1151         u32::from_be_bytes(self.alloc_len_bytes)
1152     }
1153 
emulate(&self, writer: &mut Writer) -> Result<(), ExecuteError>1154     fn emulate(&self, writer: &mut Writer) -> Result<(), ExecuteError> {
1155         let _trace = cros_tracing::trace_event!(VirtioScsi, "REPORT_SUPPORTED_TMFs");
1156         // The allocation length should be at least four.
1157         if self.alloc_len() < 4 {
1158             return Err(ExecuteError::InvalidField);
1159         }
1160         // We support LOGICAL UNIT RESET and TARGET RESET.
1161         const LOGICAL_UNIT_RESET: u8 = 1 << 3;
1162         const TARGET_RESET: u8 = 1 << 1;
1163         writer
1164             .write_obj(LOGICAL_UNIT_RESET | TARGET_RESET)
1165             .map_err(ExecuteError::Write)?;
1166         // Push reserved bytes.
1167         let reserved = [0u8; 3];
1168         writer.write_all(&reserved).map_err(ExecuteError::Write)?;
1169         Ok(())
1170     }
1171 }
1172 
1173 #[cfg(test)]
1174 mod tests {
1175     use super::*;
1176 
1177     #[test]
parse_test_unit_ready()1178     fn parse_test_unit_ready() {
1179         let cdb = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00];
1180         let command = Command::new(&cdb).unwrap();
1181         assert_eq!(
1182             command,
1183             Command::TestUnitReady(TestUnitReady {
1184                 opcode: TEST_UNIT_READY,
1185                 reserved: [0; 4],
1186                 control: 0
1187             })
1188         );
1189     }
1190 
1191     #[test]
parse_read6()1192     fn parse_read6() {
1193         let cdb = [0x08, 0xab, 0xcd, 0xef, 0x00, 0x00];
1194         let command = Command::new(&cdb).unwrap();
1195         let read6 = match command {
1196             Command::Read6(r) => r,
1197             _ => panic!("unexpected command type: {:?}", command),
1198         };
1199         assert_eq!(read6.xfer_len(), 256);
1200         assert_eq!(read6.lba(), 0x0bcdef);
1201     }
1202 
1203     #[test]
parse_inquiry()1204     fn parse_inquiry() {
1205         let cdb = [0x12, 0x01, 0x00, 0x00, 0x40, 0x00];
1206         let command = Command::new(&cdb).unwrap();
1207         let inquiry = match command {
1208             Command::Inquiry(inq) => inq,
1209             _ => panic!("unexpected command type: {:?}", command),
1210         };
1211         assert!(inquiry.vital_product_data_enabled());
1212         assert_eq!(inquiry.alloc_len(), 0x0040);
1213         assert_eq!(inquiry.page_code(), 0x00);
1214     }
1215 
1216     #[test]
parse_mode_sense_6()1217     fn parse_mode_sense_6() {
1218         let cdb = [0x1a, 0x00, 0xa8, 0x00, 0x04, 0x00];
1219         let command = Command::new(&cdb).unwrap();
1220         let mode_sense_6 = match command {
1221             Command::ModeSense6(m) => m,
1222             _ => panic!("unexpected command type: {:?}", command),
1223         };
1224         assert_eq!(mode_sense_6.alloc_len(), 0x04);
1225         assert_eq!(mode_sense_6.page_code(), 0x28);
1226         assert_eq!(mode_sense_6.page_control().unwrap(), PageControl::Default);
1227     }
1228 
1229     #[test]
parse_read_capacity_10()1230     fn parse_read_capacity_10() {
1231         let cdb = [0x25, 0x00, 0xab, 0xcd, 0xef, 0x01, 0x00, 0x00, 0x9, 0x0];
1232         let command = Command::new(&cdb).unwrap();
1233         match command {
1234             Command::ReadCapacity10(_) => (),
1235             _ => panic!("unexpected command type: {:?}", command),
1236         };
1237     }
1238 
1239     #[test]
parse_read10()1240     fn parse_read10() {
1241         let cdb = [0x28, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00];
1242         let command = Command::new(&cdb).unwrap();
1243         let read10 = match command {
1244             Command::Read10(r) => r,
1245             _ => panic!("unexpected command type: {:?}", command),
1246         };
1247         assert_eq!(read10.xfer_len(), 0x0008);
1248         assert_eq!(read10.lba(), 0x003c0000);
1249     }
1250 
1251     #[test]
parse_write10()1252     fn parse_write10() {
1253         let cdb = [0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00];
1254         let command = Command::new(&cdb).unwrap();
1255         let write10 = match command {
1256             Command::Write10(w) => w,
1257             _ => panic!("unexpected command type: {:?}", command),
1258         };
1259         assert_eq!(write10.xfer_len(), 0x0008);
1260         assert_eq!(write10.lba(), 0x00000000);
1261     }
1262 
1263     #[test]
parse_synchronize_cache_10()1264     fn parse_synchronize_cache_10() {
1265         let cdb = [0x35, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00];
1266         let command = Command::new(&cdb).unwrap();
1267         assert_eq!(
1268             command,
1269             Command::SynchronizeCache10(SynchronizeCache10 {
1270                 opcode: SYNCHRONIZE_CACHE_10,
1271                 immed_byte: 0,
1272                 lba_bytes: [0x00, 0x00, 0x00, 0x00],
1273                 group_number: 0x00,
1274                 block_num_bytes: [0x00, 0x00],
1275                 control: 0x00,
1276             })
1277         );
1278     }
1279 
1280     #[test]
parse_report_luns()1281     fn parse_report_luns() {
1282         let cdb = [
1283             0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0xcd, 0xef, 0x12, 0x00, 0x00,
1284         ];
1285         let command = Command::new(&cdb).unwrap();
1286         let report_luns = match command {
1287             Command::ReportLuns(r) => r,
1288             _ => panic!("unexpected command type: {:?}", command),
1289         };
1290         assert_eq!(report_luns.alloc_len(), 0xabcdef12);
1291     }
1292 
1293     #[test]
parse_report_supported_tmfs()1294     fn parse_report_supported_tmfs() {
1295         let cdb = [
1296             0xa3, 0x0d, 0x00, 0x00, 0x00, 0x00, 0xab, 0xcd, 0xef, 0x12, 0x00, 0x00,
1297         ];
1298         let command = Command::new(&cdb).unwrap();
1299         let report_supported_tmfs = match command {
1300             Command::ReportSupportedTMFs(r) => r,
1301             _ => panic!("unexpected command type: {:?}", command),
1302         };
1303         assert_eq!(report_supported_tmfs.alloc_len(), 0xabcdef12);
1304     }
1305 }
1306