1 // Copyright 2019 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // https://android.googlesource.com/platform/system/core/+/7b444f0/libsparse/sparse_format.h
6
7 use std::collections::BTreeMap;
8 use std::fs::File;
9 use std::io;
10 use std::io::ErrorKind;
11 use std::io::Read;
12 use std::io::Seek;
13 use std::io::SeekFrom;
14 use std::mem;
15 use std::sync::Arc;
16
17 use async_trait::async_trait;
18 use base::AsRawDescriptor;
19 use base::FileAllocate;
20 use base::FileReadWriteAtVolatile;
21 use base::FileSetLen;
22 use base::RawDescriptor;
23 use base::VolatileSlice;
24 use cros_async::BackingMemory;
25 use cros_async::Executor;
26 use cros_async::IoSource;
27 use data_model::Le16;
28 use data_model::Le32;
29 use remain::sorted;
30 use thiserror::Error;
31 use zerocopy::FromBytes;
32 use zerocopy::FromZeros;
33 use zerocopy::Immutable;
34 use zerocopy::IntoBytes;
35 use zerocopy::KnownLayout;
36
37 use crate::AsyncDisk;
38 use crate::DiskFile;
39 use crate::DiskGetLen;
40 use crate::Error as DiskError;
41 use crate::Result as DiskResult;
42 use crate::ToAsyncDisk;
43
44 #[sorted]
45 #[derive(Error, Debug)]
46 pub enum Error {
47 #[error("invalid magic header for android sparse format")]
48 InvalidMagicHeader,
49 #[error("invalid specification: \"{0}\"")]
50 InvalidSpecification(String),
51 #[error("failed to read specification: \"{0}\"")]
52 ReadSpecificationError(io::Error),
53 }
54
55 pub type Result<T> = std::result::Result<T, Error>;
56
57 pub const SPARSE_HEADER_MAGIC: u32 = 0xed26ff3a;
58 const MAJOR_VERSION: u16 = 1;
59
60 #[repr(C)]
61 #[derive(Clone, Copy, Debug, FromBytes, Immutable, IntoBytes, KnownLayout)]
62 struct SparseHeader {
63 magic: Le32, // SPARSE_HEADER_MAGIC
64 major_version: Le16, // (0x1) - reject images with higher major versions
65 minor_version: Le16, // (0x0) - allow images with higer minor versions
66 file_hdr_sz: Le16, // 28 bytes for first revision of the file format
67 chunk_hdr_size: Le16, // 12 bytes for first revision of the file format
68 blk_sz: Le32, // block size in bytes, must be a multiple of 4 (4096)
69 total_blks: Le32, // total blocks in the non-sparse output image
70 total_chunks: Le32, // total chunks in the sparse input image
71 // CRC32 checksum of the original data, counting "don't care" as 0. Standard 802.3 polynomial,
72 // use a Public Domain table implementation
73 image_checksum: Le32,
74 }
75
76 const CHUNK_TYPE_RAW: u16 = 0xCAC1;
77 const CHUNK_TYPE_FILL: u16 = 0xCAC2;
78 const CHUNK_TYPE_DONT_CARE: u16 = 0xCAC3;
79 const CHUNK_TYPE_CRC32: u16 = 0xCAC4;
80
81 #[repr(C)]
82 #[derive(Clone, Copy, Debug, FromBytes, Immutable, IntoBytes, KnownLayout)]
83 struct ChunkHeader {
84 chunk_type: Le16, /* 0xCAC1 -> raw; 0xCAC2 -> fill; 0xCAC3 -> don't care */
85 reserved1: u16,
86 chunk_sz: Le32, /* in blocks in output image */
87 total_sz: Le32, /* in bytes of chunk input file including chunk header and data */
88 }
89
90 #[derive(Clone, Debug, PartialEq, Eq)]
91 enum Chunk {
92 Raw(u64), // Offset into the file
93 Fill([u8; 4]),
94 DontCare,
95 }
96
97 #[derive(Clone, Debug, PartialEq, Eq)]
98 struct ChunkWithSize {
99 chunk: Chunk,
100 expanded_size: u64,
101 }
102
103 /* Following a Raw or Fill or CRC32 chunk is data.
104 * For a Raw chunk, it's the data in chunk_sz * blk_sz.
105 * For a Fill chunk, it's 4 bytes of the fill data.
106 * For a CRC32 chunk, it's 4 bytes of CRC32
107 */
108 #[derive(Debug)]
109 pub struct AndroidSparse {
110 file: File,
111 total_size: u64,
112 chunks: BTreeMap<u64, ChunkWithSize>,
113 }
114
parse_chunk<T: Read + Seek>(input: &mut T, blk_sz: u64) -> Result<Option<ChunkWithSize>>115 fn parse_chunk<T: Read + Seek>(input: &mut T, blk_sz: u64) -> Result<Option<ChunkWithSize>> {
116 const HEADER_SIZE: usize = mem::size_of::<ChunkHeader>();
117 let current_offset = input
118 .stream_position()
119 .map_err(Error::ReadSpecificationError)?;
120 let mut chunk_header = ChunkHeader::new_zeroed();
121 input
122 .read_exact(chunk_header.as_mut_bytes())
123 .map_err(Error::ReadSpecificationError)?;
124 let chunk_body_size = (chunk_header.total_sz.to_native() as usize)
125 .checked_sub(HEADER_SIZE)
126 .ok_or(Error::InvalidSpecification(format!(
127 "chunk total_sz {} smaller than header size {}",
128 chunk_header.total_sz.to_native(),
129 HEADER_SIZE
130 )))?;
131 let chunk = match chunk_header.chunk_type.to_native() {
132 CHUNK_TYPE_RAW => {
133 input
134 .seek(SeekFrom::Current(chunk_body_size as i64))
135 .map_err(Error::ReadSpecificationError)?;
136 Chunk::Raw(current_offset + HEADER_SIZE as u64)
137 }
138 CHUNK_TYPE_FILL => {
139 let mut fill_bytes = [0u8; 4];
140 if chunk_body_size != fill_bytes.len() {
141 return Err(Error::InvalidSpecification(format!(
142 "Fill chunk had bad size. Expected {}, was {}",
143 fill_bytes.len(),
144 chunk_body_size
145 )));
146 }
147 input
148 .read_exact(&mut fill_bytes)
149 .map_err(Error::ReadSpecificationError)?;
150 Chunk::Fill(fill_bytes)
151 }
152 CHUNK_TYPE_DONT_CARE => Chunk::DontCare,
153 CHUNK_TYPE_CRC32 => return Ok(None), // TODO(schuffelen): Validate crc32s in input
154 unknown_type => {
155 return Err(Error::InvalidSpecification(format!(
156 "Chunk had invalid type, was {:x}",
157 unknown_type
158 )))
159 }
160 };
161 let expanded_size = chunk_header.chunk_sz.to_native() as u64 * blk_sz;
162 Ok(Some(ChunkWithSize {
163 chunk,
164 expanded_size,
165 }))
166 }
167
168 impl AndroidSparse {
from_file(mut file: File) -> Result<AndroidSparse>169 pub fn from_file(mut file: File) -> Result<AndroidSparse> {
170 file.seek(SeekFrom::Start(0))
171 .map_err(Error::ReadSpecificationError)?;
172 let mut sparse_header = SparseHeader::new_zeroed();
173 file.read_exact(sparse_header.as_mut_bytes())
174 .map_err(Error::ReadSpecificationError)?;
175 if sparse_header.magic != SPARSE_HEADER_MAGIC {
176 return Err(Error::InvalidSpecification(format!(
177 "Header did not match magic constant. Expected {:x}, was {:x}",
178 SPARSE_HEADER_MAGIC,
179 sparse_header.magic.to_native()
180 )));
181 } else if sparse_header.major_version != MAJOR_VERSION {
182 return Err(Error::InvalidSpecification(format!(
183 "Header major version did not match. Expected {}, was {}",
184 MAJOR_VERSION,
185 sparse_header.major_version.to_native(),
186 )));
187 } else if sparse_header.chunk_hdr_size.to_native() as usize != mem::size_of::<ChunkHeader>()
188 {
189 // The canonical parser for this format allows `chunk_hdr_size >= sizeof(ChunkHeader)`,
190 // but we've chosen to be stricter for simplicity.
191 return Err(Error::InvalidSpecification(format!(
192 "Chunk header size does not match chunk header struct, expected {}, was {}",
193 sparse_header.chunk_hdr_size.to_native(),
194 mem::size_of::<ChunkHeader>()
195 )));
196 }
197 let block_size = sparse_header.blk_sz.to_native() as u64;
198 let chunks = (0..sparse_header.total_chunks.to_native())
199 .filter_map(|_| parse_chunk(&mut file, block_size).transpose())
200 .collect::<Result<Vec<ChunkWithSize>>>()?;
201 let total_size =
202 sparse_header.total_blks.to_native() as u64 * sparse_header.blk_sz.to_native() as u64;
203 AndroidSparse::from_parts(file, total_size, chunks)
204 }
205
from_parts(file: File, size: u64, chunks: Vec<ChunkWithSize>) -> Result<AndroidSparse>206 fn from_parts(file: File, size: u64, chunks: Vec<ChunkWithSize>) -> Result<AndroidSparse> {
207 let mut chunks_map: BTreeMap<u64, ChunkWithSize> = BTreeMap::new();
208 let mut expanded_location: u64 = 0;
209 for chunk_with_size in chunks {
210 let size = chunk_with_size.expanded_size;
211 if chunks_map
212 .insert(expanded_location, chunk_with_size)
213 .is_some()
214 {
215 return Err(Error::InvalidSpecification(format!(
216 "Two chunks were at {}",
217 expanded_location
218 )));
219 }
220 expanded_location += size;
221 }
222 let image = AndroidSparse {
223 file,
224 total_size: size,
225 chunks: chunks_map,
226 };
227 let calculated_len: u64 = image.chunks.iter().map(|x| x.1.expanded_size).sum();
228 if calculated_len != size {
229 return Err(Error::InvalidSpecification(format!(
230 "Header promised size {}, chunks added up to {}",
231 size, calculated_len
232 )));
233 }
234 Ok(image)
235 }
236 }
237
238 impl DiskGetLen for AndroidSparse {
get_len(&self) -> io::Result<u64>239 fn get_len(&self) -> io::Result<u64> {
240 Ok(self.total_size)
241 }
242 }
243
244 impl FileSetLen for AndroidSparse {
set_len(&self, _len: u64) -> io::Result<()>245 fn set_len(&self, _len: u64) -> io::Result<()> {
246 Err(io::Error::new(
247 ErrorKind::PermissionDenied,
248 "unsupported operation",
249 ))
250 }
251 }
252
253 impl AsRawDescriptor for AndroidSparse {
as_raw_descriptor(&self) -> RawDescriptor254 fn as_raw_descriptor(&self) -> RawDescriptor {
255 self.file.as_raw_descriptor()
256 }
257 }
258
259 // Performs reads up to the chunk boundary.
260 impl FileReadWriteAtVolatile for AndroidSparse {
read_at_volatile(&self, slice: VolatileSlice, offset: u64) -> io::Result<usize>261 fn read_at_volatile(&self, slice: VolatileSlice, offset: u64) -> io::Result<usize> {
262 let found_chunk = self.chunks.range(..=offset).next_back();
263 let (
264 chunk_start,
265 ChunkWithSize {
266 chunk,
267 expanded_size,
268 },
269 ) = found_chunk.ok_or_else(|| {
270 io::Error::new(
271 ErrorKind::UnexpectedEof,
272 format!("no chunk for offset {}", offset),
273 )
274 })?;
275 let chunk_offset = offset - chunk_start;
276 let chunk_size = *expanded_size;
277 let subslice = if chunk_offset + (slice.size() as u64) > chunk_size {
278 slice
279 .sub_slice(0, (chunk_size - chunk_offset) as usize)
280 .map_err(|e| io::Error::new(ErrorKind::InvalidData, format!("{:?}", e)))?
281 } else {
282 slice
283 };
284 match chunk {
285 Chunk::DontCare => {
286 subslice.write_bytes(0);
287 Ok(subslice.size())
288 }
289 Chunk::Raw(file_offset) => self
290 .file
291 .read_at_volatile(subslice, *file_offset + chunk_offset),
292 Chunk::Fill(fill_bytes) => {
293 let chunk_offset_mod = chunk_offset % fill_bytes.len() as u64;
294 let filled_memory: Vec<u8> = fill_bytes
295 .iter()
296 .cloned()
297 .cycle()
298 .skip(chunk_offset_mod as usize)
299 .take(subslice.size())
300 .collect();
301 subslice.copy_from(&filled_memory);
302 Ok(subslice.size())
303 }
304 }
305 }
write_at_volatile(&self, _slice: VolatileSlice, _offset: u64) -> io::Result<usize>306 fn write_at_volatile(&self, _slice: VolatileSlice, _offset: u64) -> io::Result<usize> {
307 Err(io::Error::new(
308 ErrorKind::PermissionDenied,
309 "unsupported operation",
310 ))
311 }
312 }
313
314 // TODO(b/271381851): implement `try_clone`. It allows virtio-blk to run multiple workers.
315 impl DiskFile for AndroidSparse {}
316
317 /// An Android Sparse disk that implements `AsyncDisk` for access.
318 pub struct AsyncAndroidSparse {
319 inner: IoSource<File>,
320 total_size: u64,
321 chunks: BTreeMap<u64, ChunkWithSize>,
322 }
323
324 impl ToAsyncDisk for AndroidSparse {
to_async_disk(self: Box<Self>, ex: &Executor) -> DiskResult<Box<dyn AsyncDisk>>325 fn to_async_disk(self: Box<Self>, ex: &Executor) -> DiskResult<Box<dyn AsyncDisk>> {
326 Ok(Box::new(AsyncAndroidSparse {
327 inner: ex.async_from(self.file).map_err(DiskError::ToAsync)?,
328 total_size: self.total_size,
329 chunks: self.chunks,
330 }))
331 }
332 }
333
334 impl DiskGetLen for AsyncAndroidSparse {
get_len(&self) -> io::Result<u64>335 fn get_len(&self) -> io::Result<u64> {
336 Ok(self.total_size)
337 }
338 }
339
340 impl FileSetLen for AsyncAndroidSparse {
set_len(&self, _len: u64) -> io::Result<()>341 fn set_len(&self, _len: u64) -> io::Result<()> {
342 Err(io::Error::new(
343 ErrorKind::PermissionDenied,
344 "unsupported operation",
345 ))
346 }
347 }
348
349 impl FileAllocate for AsyncAndroidSparse {
allocate(&self, _offset: u64, _length: u64) -> io::Result<()>350 fn allocate(&self, _offset: u64, _length: u64) -> io::Result<()> {
351 Err(io::Error::new(
352 ErrorKind::PermissionDenied,
353 "unsupported operation",
354 ))
355 }
356 }
357
358 #[async_trait(?Send)]
359 impl AsyncDisk for AsyncAndroidSparse {
flush(&self) -> crate::Result<()>360 async fn flush(&self) -> crate::Result<()> {
361 // android sparse is read-only, nothing to flush.
362 Ok(())
363 }
364
fsync(&self) -> DiskResult<()>365 async fn fsync(&self) -> DiskResult<()> {
366 // Do nothing because it's read-only.
367 Ok(())
368 }
369
fdatasync(&self) -> DiskResult<()>370 async fn fdatasync(&self) -> DiskResult<()> {
371 // Do nothing because it's read-only.
372 Ok(())
373 }
374
375 /// Reads data from `file_offset` to the end of the current chunk and write them into memory
376 /// `mem` at `mem_offsets`.
read_to_mem<'a>( &'a self, file_offset: u64, mem: Arc<dyn BackingMemory + Send + Sync>, mem_offsets: cros_async::MemRegionIter<'a>, ) -> DiskResult<usize>377 async fn read_to_mem<'a>(
378 &'a self,
379 file_offset: u64,
380 mem: Arc<dyn BackingMemory + Send + Sync>,
381 mem_offsets: cros_async::MemRegionIter<'a>,
382 ) -> DiskResult<usize> {
383 let found_chunk = self.chunks.range(..=file_offset).next_back();
384 let (
385 chunk_start,
386 ChunkWithSize {
387 chunk,
388 expanded_size,
389 },
390 ) = found_chunk.ok_or(DiskError::ReadingData(io::Error::new(
391 ErrorKind::UnexpectedEof,
392 format!("no chunk for offset {}", file_offset),
393 )))?;
394 let chunk_offset = file_offset - chunk_start;
395 let chunk_size = *expanded_size;
396
397 // Truncate `mem_offsets` to the remaining size of the current chunk.
398 let mem_offsets = mem_offsets.take_bytes((chunk_size - chunk_offset) as usize);
399 let mem_size = mem_offsets.clone().map(|x| x.len).sum();
400 match chunk {
401 Chunk::DontCare => {
402 for region in mem_offsets {
403 mem.get_volatile_slice(region)
404 .map_err(DiskError::GuestMemory)?
405 .write_bytes(0);
406 }
407 Ok(mem_size)
408 }
409 Chunk::Raw(offset) => self
410 .inner
411 .read_to_mem(Some(offset + chunk_offset), mem, mem_offsets)
412 .await
413 .map_err(DiskError::ReadToMem),
414 Chunk::Fill(fill_bytes) => {
415 let chunk_offset_mod = chunk_offset % fill_bytes.len() as u64;
416 let filled_memory: Vec<u8> = fill_bytes
417 .iter()
418 .cloned()
419 .cycle()
420 .skip(chunk_offset_mod as usize)
421 .take(mem_size)
422 .collect();
423
424 let mut filled_count = 0;
425 for region in mem_offsets {
426 let buf = &filled_memory[filled_count..filled_count + region.len];
427 mem.get_volatile_slice(region)
428 .map_err(DiskError::GuestMemory)?
429 .copy_from(buf);
430 filled_count += region.len;
431 }
432 Ok(mem_size)
433 }
434 }
435 }
436
write_from_mem<'a>( &'a self, _file_offset: u64, _mem: Arc<dyn BackingMemory + Send + Sync>, _mem_offsets: cros_async::MemRegionIter<'a>, ) -> DiskResult<usize>437 async fn write_from_mem<'a>(
438 &'a self,
439 _file_offset: u64,
440 _mem: Arc<dyn BackingMemory + Send + Sync>,
441 _mem_offsets: cros_async::MemRegionIter<'a>,
442 ) -> DiskResult<usize> {
443 Err(DiskError::UnsupportedOperation)
444 }
445
punch_hole(&self, _file_offset: u64, _length: u64) -> DiskResult<()>446 async fn punch_hole(&self, _file_offset: u64, _length: u64) -> DiskResult<()> {
447 Err(DiskError::UnsupportedOperation)
448 }
449
write_zeroes_at(&self, _file_offset: u64, _length: u64) -> DiskResult<()>450 async fn write_zeroes_at(&self, _file_offset: u64, _length: u64) -> DiskResult<()> {
451 Err(DiskError::UnsupportedOperation)
452 }
453 }
454
455 #[cfg(test)]
456 mod tests {
457 use std::io::Cursor;
458 use std::io::Write;
459
460 use super::*;
461
462 const CHUNK_SIZE: usize = mem::size_of::<ChunkHeader>();
463
464 #[test]
parse_raw()465 fn parse_raw() {
466 let chunk_raw = ChunkHeader {
467 chunk_type: CHUNK_TYPE_RAW.into(),
468 reserved1: 0,
469 chunk_sz: 1.into(),
470 total_sz: (CHUNK_SIZE as u32 + 123).into(),
471 };
472 let header_bytes = chunk_raw.as_bytes();
473 let mut chunk_bytes: Vec<u8> = Vec::new();
474 chunk_bytes.extend_from_slice(header_bytes);
475 chunk_bytes.extend_from_slice(&[0u8; 123]);
476 let mut chunk_cursor = Cursor::new(chunk_bytes);
477 let chunk = parse_chunk(&mut chunk_cursor, 123)
478 .expect("Failed to parse")
479 .expect("Failed to determine chunk type");
480 let expected_chunk = ChunkWithSize {
481 chunk: Chunk::Raw(CHUNK_SIZE as u64),
482 expanded_size: 123,
483 };
484 assert_eq!(expected_chunk, chunk);
485 }
486
487 #[test]
parse_dont_care()488 fn parse_dont_care() {
489 let chunk_raw = ChunkHeader {
490 chunk_type: CHUNK_TYPE_DONT_CARE.into(),
491 reserved1: 0,
492 chunk_sz: 100.into(),
493 total_sz: (CHUNK_SIZE as u32).into(),
494 };
495 let header_bytes = chunk_raw.as_bytes();
496 let mut chunk_cursor = Cursor::new(header_bytes);
497 let chunk = parse_chunk(&mut chunk_cursor, 123)
498 .expect("Failed to parse")
499 .expect("Failed to determine chunk type");
500 let expected_chunk = ChunkWithSize {
501 chunk: Chunk::DontCare,
502 expanded_size: 12300,
503 };
504 assert_eq!(expected_chunk, chunk);
505 }
506
507 #[test]
parse_fill()508 fn parse_fill() {
509 let chunk_raw = ChunkHeader {
510 chunk_type: CHUNK_TYPE_FILL.into(),
511 reserved1: 0,
512 chunk_sz: 100.into(),
513 total_sz: (CHUNK_SIZE as u32 + 4).into(),
514 };
515 let header_bytes = chunk_raw.as_bytes();
516 let mut chunk_bytes: Vec<u8> = Vec::new();
517 chunk_bytes.extend_from_slice(header_bytes);
518 chunk_bytes.extend_from_slice(&[123u8; 4]);
519 let mut chunk_cursor = Cursor::new(chunk_bytes);
520 let chunk = parse_chunk(&mut chunk_cursor, 123)
521 .expect("Failed to parse")
522 .expect("Failed to determine chunk type");
523 let expected_chunk = ChunkWithSize {
524 chunk: Chunk::Fill([123, 123, 123, 123]),
525 expanded_size: 12300,
526 };
527 assert_eq!(expected_chunk, chunk);
528 }
529
530 #[test]
parse_crc32()531 fn parse_crc32() {
532 let chunk_raw = ChunkHeader {
533 chunk_type: CHUNK_TYPE_CRC32.into(),
534 reserved1: 0,
535 chunk_sz: 0.into(),
536 total_sz: (CHUNK_SIZE as u32 + 4).into(),
537 };
538 let header_bytes = chunk_raw.as_bytes();
539 let mut chunk_bytes: Vec<u8> = Vec::new();
540 chunk_bytes.extend_from_slice(header_bytes);
541 chunk_bytes.extend_from_slice(&[123u8; 4]);
542 let mut chunk_cursor = Cursor::new(chunk_bytes);
543 let chunk = parse_chunk(&mut chunk_cursor, 123).expect("Failed to parse");
544 assert_eq!(None, chunk);
545 }
546
test_image(chunks: Vec<ChunkWithSize>) -> AndroidSparse547 fn test_image(chunks: Vec<ChunkWithSize>) -> AndroidSparse {
548 let file = tempfile::tempfile().expect("failed to create tempfile");
549 let size = chunks.iter().map(|x| x.expanded_size).sum();
550 AndroidSparse::from_parts(file, size, chunks).expect("Could not create image")
551 }
552
553 #[test]
read_dontcare()554 fn read_dontcare() {
555 let chunks = vec![ChunkWithSize {
556 chunk: Chunk::DontCare,
557 expanded_size: 100,
558 }];
559 let image = test_image(chunks);
560 let mut input_memory = [55u8; 100];
561 image
562 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
563 .expect("Could not read");
564 let expected = [0u8; 100];
565 assert_eq!(&expected[..], &input_memory[..]);
566 }
567
568 #[test]
read_fill_simple()569 fn read_fill_simple() {
570 let chunks = vec![ChunkWithSize {
571 chunk: Chunk::Fill([10, 20, 10, 20]),
572 expanded_size: 8,
573 }];
574 let image = test_image(chunks);
575 let mut input_memory = [55u8; 8];
576 image
577 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
578 .expect("Could not read");
579 let expected = [10, 20, 10, 20, 10, 20, 10, 20];
580 assert_eq!(&expected[..], &input_memory[..]);
581 }
582
583 #[test]
read_fill_edges()584 fn read_fill_edges() {
585 let chunks = vec![ChunkWithSize {
586 chunk: Chunk::Fill([10, 20, 30, 40]),
587 expanded_size: 8,
588 }];
589 let image = test_image(chunks);
590 let mut input_memory = [55u8; 6];
591 image
592 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 1)
593 .expect("Could not read");
594 let expected = [20, 30, 40, 10, 20, 30];
595 assert_eq!(&expected[..], &input_memory[..]);
596 }
597
598 #[test]
read_fill_offset_edges()599 fn read_fill_offset_edges() {
600 let chunks = vec![
601 ChunkWithSize {
602 chunk: Chunk::DontCare,
603 expanded_size: 20,
604 },
605 ChunkWithSize {
606 chunk: Chunk::Fill([10, 20, 30, 40]),
607 expanded_size: 100,
608 },
609 ];
610 let image = test_image(chunks);
611 let mut input_memory = [55u8; 7];
612 image
613 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 39)
614 .expect("Could not read");
615 let expected = [40, 10, 20, 30, 40, 10, 20];
616 assert_eq!(&expected[..], &input_memory[..]);
617 }
618
619 #[test]
read_raw()620 fn read_raw() {
621 let chunks = vec![ChunkWithSize {
622 chunk: Chunk::Raw(0),
623 expanded_size: 100,
624 }];
625 let mut image = test_image(chunks);
626 write!(image.file, "hello").expect("Failed to write into internal file");
627 let mut input_memory = [55u8; 5];
628 image
629 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
630 .expect("Could not read");
631 let expected = [104, 101, 108, 108, 111];
632 assert_eq!(&expected[..], &input_memory[..]);
633 }
634
635 #[test]
read_two_fills()636 fn read_two_fills() {
637 let chunks = vec![
638 ChunkWithSize {
639 chunk: Chunk::Fill([10, 20, 10, 20]),
640 expanded_size: 4,
641 },
642 ChunkWithSize {
643 chunk: Chunk::Fill([30, 40, 30, 40]),
644 expanded_size: 4,
645 },
646 ];
647 let image = test_image(chunks);
648 let mut input_memory = [55u8; 8];
649 image
650 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
651 .expect("Could not read");
652 let expected = [10, 20, 10, 20, 30, 40, 30, 40];
653 assert_eq!(&expected[..], &input_memory[..]);
654 }
655
656 /**
657 * Tests for Async.
658 */
659 use cros_async::MemRegion;
660 use cros_async::MemRegionIter;
661 use vm_memory::GuestAddress;
662 use vm_memory::GuestMemory;
663
test_async_image( chunks: Vec<ChunkWithSize>, ex: &Executor, ) -> DiskResult<Box<dyn AsyncDisk>>664 fn test_async_image(
665 chunks: Vec<ChunkWithSize>,
666 ex: &Executor,
667 ) -> DiskResult<Box<dyn AsyncDisk>> {
668 Box::new(test_image(chunks)).to_async_disk(ex)
669 }
670
671 /// Reads `len` bytes of data from `image` at 'offset'.
read_exact_at(image: &dyn AsyncDisk, offset: usize, len: usize) -> Vec<u8>672 async fn read_exact_at(image: &dyn AsyncDisk, offset: usize, len: usize) -> Vec<u8> {
673 let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap());
674 // Fill in guest_mem with dirty data.
675 guest_mem
676 .write_all_at_addr(&vec![55u8; len], GuestAddress(0))
677 .unwrap();
678
679 let mut count = 0usize;
680 while count < len {
681 let result = image
682 .read_to_mem(
683 (offset + count) as u64,
684 guest_mem.clone(),
685 MemRegionIter::new(&[MemRegion {
686 offset: count as u64,
687 len: len - count,
688 }]),
689 )
690 .await;
691 count += result.unwrap();
692 }
693
694 let mut buf = vec![0; len];
695 guest_mem.read_at_addr(&mut buf, GuestAddress(0)).unwrap();
696 buf
697 }
698
699 #[test]
async_read_dontcare()700 fn async_read_dontcare() {
701 let ex = Executor::new().unwrap();
702 ex.run_until(async {
703 let chunks = vec![ChunkWithSize {
704 chunk: Chunk::DontCare,
705 expanded_size: 100,
706 }];
707 let image = test_async_image(chunks, &ex).unwrap();
708 let buf = read_exact_at(&*image, 0, 100).await;
709 assert!(buf.iter().all(|x| *x == 0));
710 })
711 .unwrap();
712 }
713
714 #[test]
async_read_dontcare_with_offsets()715 fn async_read_dontcare_with_offsets() {
716 let ex = Executor::new().unwrap();
717 ex.run_until(async {
718 let chunks = vec![ChunkWithSize {
719 chunk: Chunk::DontCare,
720 expanded_size: 10,
721 }];
722 let image = test_async_image(chunks, &ex).unwrap();
723 // Prepare guest_mem with dirty data.
724 let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap());
725 guest_mem
726 .write_all_at_addr(&[55u8; 20], GuestAddress(0))
727 .unwrap();
728
729 // Pass multiple `MemRegion` to `read_to_mem`.
730 image
731 .read_to_mem(
732 0,
733 guest_mem.clone(),
734 MemRegionIter::new(&[
735 MemRegion { offset: 1, len: 3 },
736 MemRegion { offset: 6, len: 2 },
737 ]),
738 )
739 .await
740 .unwrap();
741 let mut buf = vec![0; 10];
742 guest_mem.read_at_addr(&mut buf, GuestAddress(0)).unwrap();
743 let expected = [55, 0, 0, 0, 55, 55, 0, 0, 55, 55];
744 assert_eq!(expected[..], buf[..]);
745 })
746 .unwrap();
747 }
748
749 #[test]
async_read_fill_simple()750 fn async_read_fill_simple() {
751 let ex = Executor::new().unwrap();
752 ex.run_until(async {
753 let chunks = vec![ChunkWithSize {
754 chunk: Chunk::Fill([10, 20, 10, 20]),
755 expanded_size: 8,
756 }];
757 let image = test_async_image(chunks, &ex).unwrap();
758 let buf = read_exact_at(&*image, 0, 8).await;
759 let expected = [10, 20, 10, 20, 10, 20, 10, 20];
760 assert_eq!(expected[..], buf[..]);
761 })
762 .unwrap();
763 }
764
765 #[test]
async_read_fill_simple_with_offset()766 fn async_read_fill_simple_with_offset() {
767 let ex = Executor::new().unwrap();
768 ex.run_until(async {
769 let chunks = vec![ChunkWithSize {
770 chunk: Chunk::Fill([10, 20, 10, 20]),
771 expanded_size: 8,
772 }];
773 let image = test_async_image(chunks, &ex).unwrap();
774 // Prepare guest_mem with dirty data.
775 let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap());
776 guest_mem
777 .write_all_at_addr(&[55u8; 20], GuestAddress(0))
778 .unwrap();
779
780 // Pass multiple `MemRegion` to `read_to_mem`.
781 image
782 .read_to_mem(
783 0,
784 guest_mem.clone(),
785 MemRegionIter::new(&[
786 MemRegion { offset: 1, len: 3 },
787 MemRegion { offset: 6, len: 2 },
788 ]),
789 )
790 .await
791 .unwrap();
792 let mut buf = vec![0; 10];
793 guest_mem.read_at_addr(&mut buf, GuestAddress(0)).unwrap();
794 let expected = [55, 10, 20, 10, 55, 55, 20, 10, 55, 55];
795 assert_eq!(expected[..], buf[..]);
796 })
797 .unwrap();
798 }
799
800 #[test]
async_read_fill_edges()801 fn async_read_fill_edges() {
802 let ex = Executor::new().unwrap();
803 ex.run_until(async {
804 let chunks = vec![ChunkWithSize {
805 chunk: Chunk::Fill([10, 20, 30, 40]),
806 expanded_size: 8,
807 }];
808 let image = test_async_image(chunks, &ex).unwrap();
809 let buf = read_exact_at(&*image, 1, 6).await;
810 let expected = [20, 30, 40, 10, 20, 30];
811 assert_eq!(expected[..], buf[..]);
812 })
813 .unwrap();
814 }
815
816 #[test]
async_read_fill_offset_edges()817 fn async_read_fill_offset_edges() {
818 let ex = Executor::new().unwrap();
819 ex.run_until(async {
820 let chunks = vec![
821 ChunkWithSize {
822 chunk: Chunk::DontCare,
823 expanded_size: 20,
824 },
825 ChunkWithSize {
826 chunk: Chunk::Fill([10, 20, 30, 40]),
827 expanded_size: 100,
828 },
829 ];
830 let image = test_async_image(chunks, &ex).unwrap();
831 let buf = read_exact_at(&*image, 39, 7).await;
832 let expected = [40, 10, 20, 30, 40, 10, 20];
833 assert_eq!(expected[..], buf[..]);
834 })
835 .unwrap();
836 }
837
838 #[test]
async_read_raw()839 fn async_read_raw() {
840 let ex = Executor::new().unwrap();
841 ex.run_until(async {
842 let chunks = vec![ChunkWithSize {
843 chunk: Chunk::Raw(0),
844 expanded_size: 100,
845 }];
846 let mut image = Box::new(test_image(chunks));
847 write!(image.file, "hello").unwrap();
848 let async_image = image.to_async_disk(&ex).unwrap();
849 let buf = read_exact_at(&*async_image, 0, 5).await;
850 let expected = [104, 101, 108, 108, 111];
851 assert_eq!(&expected[..], &buf[..]);
852 })
853 .unwrap();
854 }
855
856 #[test]
async_read_fill_raw_with_offset()857 fn async_read_fill_raw_with_offset() {
858 let ex = Executor::new().unwrap();
859 ex.run_until(async {
860 let chunks = vec![ChunkWithSize {
861 chunk: Chunk::Raw(0),
862 expanded_size: 100,
863 }];
864 let mut image = Box::new(test_image(chunks));
865 write!(image.file, "hello").unwrap();
866 let async_image = image.to_async_disk(&ex).unwrap();
867 // Prepare guest_mem with dirty data.
868 let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap());
869 guest_mem
870 .write_all_at_addr(&[55u8; 20], GuestAddress(0))
871 .unwrap();
872
873 // Pass multiple `MemRegion` to `read_to_mem`.
874 async_image
875 .read_to_mem(
876 0,
877 guest_mem.clone(),
878 MemRegionIter::new(&[
879 MemRegion { offset: 1, len: 3 },
880 MemRegion { offset: 6, len: 2 },
881 ]),
882 )
883 .await
884 .unwrap();
885 let mut buf = vec![0; 10];
886 guest_mem.read_at_addr(&mut buf, GuestAddress(0)).unwrap();
887 let expected = [55, 104, 101, 108, 55, 55, 108, 111, 55, 55];
888 assert_eq!(expected[..], buf[..]);
889 })
890 .unwrap();
891 }
892
893 #[test]
async_read_two_fills()894 fn async_read_two_fills() {
895 let ex = Executor::new().unwrap();
896 ex.run_until(async {
897 let chunks = vec![
898 ChunkWithSize {
899 chunk: Chunk::Fill([10, 20, 10, 20]),
900 expanded_size: 4,
901 },
902 ChunkWithSize {
903 chunk: Chunk::Fill([30, 40, 30, 40]),
904 expanded_size: 4,
905 },
906 ];
907 let image = test_async_image(chunks, &ex).unwrap();
908 let buf = read_exact_at(&*image, 0, 8).await;
909 let expected = [10, 20, 10, 20, 30, 40, 30, 40];
910 assert_eq!(&expected[..], &buf[..]);
911 })
912 .unwrap();
913 }
914 }
915