1 // Copyright 2019 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // https://android.googlesource.com/platform/system/core/+/7b444f0/libsparse/sparse_format.h
6
7 use std::collections::BTreeMap;
8 use std::fs::File;
9 use std::io;
10 use std::io::ErrorKind;
11 use std::io::Read;
12 use std::io::Seek;
13 use std::io::SeekFrom;
14 use std::mem;
15 use std::sync::Arc;
16
17 use async_trait::async_trait;
18 use base::AsRawDescriptor;
19 use base::FileAllocate;
20 use base::FileReadWriteAtVolatile;
21 use base::FileSetLen;
22 use base::RawDescriptor;
23 use base::VolatileSlice;
24 use cros_async::BackingMemory;
25 use cros_async::Executor;
26 use cros_async::IoSource;
27 use data_model::Le16;
28 use data_model::Le32;
29 use remain::sorted;
30 use thiserror::Error;
31 use zerocopy::AsBytes;
32 use zerocopy::FromBytes;
33 use zerocopy::FromZeroes;
34
35 use crate::AsyncDisk;
36 use crate::DiskFile;
37 use crate::DiskGetLen;
38 use crate::Error as DiskError;
39 use crate::Result as DiskResult;
40 use crate::ToAsyncDisk;
41
42 #[sorted]
43 #[derive(Error, Debug)]
44 pub enum Error {
45 #[error("invalid magic header for android sparse format")]
46 InvalidMagicHeader,
47 #[error("invalid specification: \"{0}\"")]
48 InvalidSpecification(String),
49 #[error("failed to read specification: \"{0}\"")]
50 ReadSpecificationError(io::Error),
51 }
52
53 pub type Result<T> = std::result::Result<T, Error>;
54
55 pub const SPARSE_HEADER_MAGIC: u32 = 0xed26ff3a;
56 const MAJOR_VERSION: u16 = 1;
57
58 #[repr(C)]
59 #[derive(Clone, Copy, Debug, AsBytes, FromZeroes, FromBytes)]
60 struct SparseHeader {
61 magic: Le32, // SPARSE_HEADER_MAGIC
62 major_version: Le16, // (0x1) - reject images with higher major versions
63 minor_version: Le16, // (0x0) - allow images with higer minor versions
64 file_hdr_sz: Le16, // 28 bytes for first revision of the file format
65 chunk_hdr_size: Le16, // 12 bytes for first revision of the file format
66 blk_sz: Le32, // block size in bytes, must be a multiple of 4 (4096)
67 total_blks: Le32, // total blocks in the non-sparse output image
68 total_chunks: Le32, // total chunks in the sparse input image
69 // CRC32 checksum of the original data, counting "don't care" as 0. Standard 802.3 polynomial,
70 // use a Public Domain table implementation
71 image_checksum: Le32,
72 }
73
74 const CHUNK_TYPE_RAW: u16 = 0xCAC1;
75 const CHUNK_TYPE_FILL: u16 = 0xCAC2;
76 const CHUNK_TYPE_DONT_CARE: u16 = 0xCAC3;
77 const CHUNK_TYPE_CRC32: u16 = 0xCAC4;
78
79 #[repr(C)]
80 #[derive(Clone, Copy, Debug, AsBytes, FromZeroes, FromBytes)]
81 struct ChunkHeader {
82 chunk_type: Le16, /* 0xCAC1 -> raw; 0xCAC2 -> fill; 0xCAC3 -> don't care */
83 reserved1: u16,
84 chunk_sz: Le32, /* in blocks in output image */
85 total_sz: Le32, /* in bytes of chunk input file including chunk header and data */
86 }
87
88 #[derive(Clone, Debug, PartialEq, Eq)]
89 enum Chunk {
90 Raw(u64), // Offset into the file
91 Fill([u8; 4]),
92 DontCare,
93 }
94
95 #[derive(Clone, Debug, PartialEq, Eq)]
96 struct ChunkWithSize {
97 chunk: Chunk,
98 expanded_size: u64,
99 }
100
101 /* Following a Raw or Fill or CRC32 chunk is data.
102 * For a Raw chunk, it's the data in chunk_sz * blk_sz.
103 * For a Fill chunk, it's 4 bytes of the fill data.
104 * For a CRC32 chunk, it's 4 bytes of CRC32
105 */
106 #[derive(Debug)]
107 pub struct AndroidSparse {
108 file: File,
109 total_size: u64,
110 chunks: BTreeMap<u64, ChunkWithSize>,
111 }
112
parse_chunk<T: Read + Seek>(input: &mut T, blk_sz: u64) -> Result<Option<ChunkWithSize>>113 fn parse_chunk<T: Read + Seek>(input: &mut T, blk_sz: u64) -> Result<Option<ChunkWithSize>> {
114 const HEADER_SIZE: usize = mem::size_of::<ChunkHeader>();
115 let current_offset = input
116 .stream_position()
117 .map_err(Error::ReadSpecificationError)?;
118 let mut chunk_header = ChunkHeader::new_zeroed();
119 input
120 .read_exact(chunk_header.as_bytes_mut())
121 .map_err(Error::ReadSpecificationError)?;
122 let chunk_body_size = (chunk_header.total_sz.to_native() as usize)
123 .checked_sub(HEADER_SIZE)
124 .ok_or(Error::InvalidSpecification(format!(
125 "chunk total_sz {} smaller than header size {}",
126 chunk_header.total_sz.to_native(),
127 HEADER_SIZE
128 )))?;
129 let chunk = match chunk_header.chunk_type.to_native() {
130 CHUNK_TYPE_RAW => {
131 input
132 .seek(SeekFrom::Current(chunk_body_size as i64))
133 .map_err(Error::ReadSpecificationError)?;
134 Chunk::Raw(current_offset + HEADER_SIZE as u64)
135 }
136 CHUNK_TYPE_FILL => {
137 let mut fill_bytes = [0u8; 4];
138 if chunk_body_size != fill_bytes.len() {
139 return Err(Error::InvalidSpecification(format!(
140 "Fill chunk had bad size. Expected {}, was {}",
141 fill_bytes.len(),
142 chunk_body_size
143 )));
144 }
145 input
146 .read_exact(&mut fill_bytes)
147 .map_err(Error::ReadSpecificationError)?;
148 Chunk::Fill(fill_bytes)
149 }
150 CHUNK_TYPE_DONT_CARE => Chunk::DontCare,
151 CHUNK_TYPE_CRC32 => return Ok(None), // TODO(schuffelen): Validate crc32s in input
152 unknown_type => {
153 return Err(Error::InvalidSpecification(format!(
154 "Chunk had invalid type, was {:x}",
155 unknown_type
156 )))
157 }
158 };
159 let expanded_size = chunk_header.chunk_sz.to_native() as u64 * blk_sz;
160 Ok(Some(ChunkWithSize {
161 chunk,
162 expanded_size,
163 }))
164 }
165
166 impl AndroidSparse {
from_file(mut file: File) -> Result<AndroidSparse>167 pub fn from_file(mut file: File) -> Result<AndroidSparse> {
168 file.seek(SeekFrom::Start(0))
169 .map_err(Error::ReadSpecificationError)?;
170 let mut sparse_header = SparseHeader::new_zeroed();
171 file.read_exact(sparse_header.as_bytes_mut())
172 .map_err(Error::ReadSpecificationError)?;
173 if sparse_header.magic != SPARSE_HEADER_MAGIC {
174 return Err(Error::InvalidSpecification(format!(
175 "Header did not match magic constant. Expected {:x}, was {:x}",
176 SPARSE_HEADER_MAGIC,
177 sparse_header.magic.to_native()
178 )));
179 } else if sparse_header.major_version != MAJOR_VERSION {
180 return Err(Error::InvalidSpecification(format!(
181 "Header major version did not match. Expected {}, was {}",
182 MAJOR_VERSION,
183 sparse_header.major_version.to_native(),
184 )));
185 } else if sparse_header.chunk_hdr_size.to_native() as usize != mem::size_of::<ChunkHeader>()
186 {
187 // The canonical parser for this format allows `chunk_hdr_size >= sizeof(ChunkHeader)`,
188 // but we've chosen to be stricter for simplicity.
189 return Err(Error::InvalidSpecification(format!(
190 "Chunk header size does not match chunk header struct, expected {}, was {}",
191 sparse_header.chunk_hdr_size.to_native(),
192 mem::size_of::<ChunkHeader>()
193 )));
194 }
195 let block_size = sparse_header.blk_sz.to_native() as u64;
196 let chunks = (0..sparse_header.total_chunks.to_native())
197 .filter_map(|_| parse_chunk(&mut file, block_size).transpose())
198 .collect::<Result<Vec<ChunkWithSize>>>()?;
199 let total_size =
200 sparse_header.total_blks.to_native() as u64 * sparse_header.blk_sz.to_native() as u64;
201 AndroidSparse::from_parts(file, total_size, chunks)
202 }
203
from_parts(file: File, size: u64, chunks: Vec<ChunkWithSize>) -> Result<AndroidSparse>204 fn from_parts(file: File, size: u64, chunks: Vec<ChunkWithSize>) -> Result<AndroidSparse> {
205 let mut chunks_map: BTreeMap<u64, ChunkWithSize> = BTreeMap::new();
206 let mut expanded_location: u64 = 0;
207 for chunk_with_size in chunks {
208 let size = chunk_with_size.expanded_size;
209 if chunks_map
210 .insert(expanded_location, chunk_with_size)
211 .is_some()
212 {
213 return Err(Error::InvalidSpecification(format!(
214 "Two chunks were at {}",
215 expanded_location
216 )));
217 }
218 expanded_location += size;
219 }
220 let image = AndroidSparse {
221 file,
222 total_size: size,
223 chunks: chunks_map,
224 };
225 let calculated_len: u64 = image.chunks.iter().map(|x| x.1.expanded_size).sum();
226 if calculated_len != size {
227 return Err(Error::InvalidSpecification(format!(
228 "Header promised size {}, chunks added up to {}",
229 size, calculated_len
230 )));
231 }
232 Ok(image)
233 }
234 }
235
236 impl DiskGetLen for AndroidSparse {
get_len(&self) -> io::Result<u64>237 fn get_len(&self) -> io::Result<u64> {
238 Ok(self.total_size)
239 }
240 }
241
242 impl FileSetLen for AndroidSparse {
set_len(&self, _len: u64) -> io::Result<()>243 fn set_len(&self, _len: u64) -> io::Result<()> {
244 Err(io::Error::new(
245 ErrorKind::PermissionDenied,
246 "unsupported operation",
247 ))
248 }
249 }
250
251 impl AsRawDescriptor for AndroidSparse {
as_raw_descriptor(&self) -> RawDescriptor252 fn as_raw_descriptor(&self) -> RawDescriptor {
253 self.file.as_raw_descriptor()
254 }
255 }
256
257 // Performs reads up to the chunk boundary.
258 impl FileReadWriteAtVolatile for AndroidSparse {
read_at_volatile(&mut self, slice: VolatileSlice, offset: u64) -> io::Result<usize>259 fn read_at_volatile(&mut self, slice: VolatileSlice, offset: u64) -> io::Result<usize> {
260 let found_chunk = self.chunks.range(..=offset).next_back();
261 let (
262 chunk_start,
263 ChunkWithSize {
264 chunk,
265 expanded_size,
266 },
267 ) = found_chunk.ok_or_else(|| {
268 io::Error::new(
269 ErrorKind::UnexpectedEof,
270 format!("no chunk for offset {}", offset),
271 )
272 })?;
273 let chunk_offset = offset - chunk_start;
274 let chunk_size = *expanded_size;
275 let subslice = if chunk_offset + (slice.size() as u64) > chunk_size {
276 slice
277 .sub_slice(0, (chunk_size - chunk_offset) as usize)
278 .map_err(|e| io::Error::new(ErrorKind::InvalidData, format!("{:?}", e)))?
279 } else {
280 slice
281 };
282 match chunk {
283 Chunk::DontCare => {
284 subslice.write_bytes(0);
285 Ok(subslice.size())
286 }
287 Chunk::Raw(file_offset) => self
288 .file
289 .read_at_volatile(subslice, *file_offset + chunk_offset),
290 Chunk::Fill(fill_bytes) => {
291 let chunk_offset_mod = chunk_offset % fill_bytes.len() as u64;
292 let filled_memory: Vec<u8> = fill_bytes
293 .iter()
294 .cloned()
295 .cycle()
296 .skip(chunk_offset_mod as usize)
297 .take(subslice.size())
298 .collect();
299 subslice.copy_from(&filled_memory);
300 Ok(subslice.size())
301 }
302 }
303 }
write_at_volatile(&mut self, _slice: VolatileSlice, _offset: u64) -> io::Result<usize>304 fn write_at_volatile(&mut self, _slice: VolatileSlice, _offset: u64) -> io::Result<usize> {
305 Err(io::Error::new(
306 ErrorKind::PermissionDenied,
307 "unsupported operation",
308 ))
309 }
310 }
311
312 // TODO(b/271381851): implement `try_clone`. It allows virtio-blk to run multiple workers.
313 impl DiskFile for AndroidSparse {}
314
315 /// An Android Sparse disk that implements `AsyncDisk` for access.
316 pub struct AsyncAndroidSparse {
317 inner: IoSource<File>,
318 total_size: u64,
319 chunks: BTreeMap<u64, ChunkWithSize>,
320 }
321
322 impl ToAsyncDisk for AndroidSparse {
to_async_disk(self: Box<Self>, ex: &Executor) -> DiskResult<Box<dyn AsyncDisk>>323 fn to_async_disk(self: Box<Self>, ex: &Executor) -> DiskResult<Box<dyn AsyncDisk>> {
324 Ok(Box::new(AsyncAndroidSparse {
325 inner: ex.async_from(self.file).map_err(DiskError::ToAsync)?,
326 total_size: self.total_size,
327 chunks: self.chunks,
328 }))
329 }
330 }
331
332 impl DiskGetLen for AsyncAndroidSparse {
get_len(&self) -> io::Result<u64>333 fn get_len(&self) -> io::Result<u64> {
334 Ok(self.total_size)
335 }
336 }
337
338 impl FileSetLen for AsyncAndroidSparse {
set_len(&self, _len: u64) -> io::Result<()>339 fn set_len(&self, _len: u64) -> io::Result<()> {
340 Err(io::Error::new(
341 ErrorKind::PermissionDenied,
342 "unsupported operation",
343 ))
344 }
345 }
346
347 impl FileAllocate for AsyncAndroidSparse {
allocate(&mut self, _offset: u64, _length: u64) -> io::Result<()>348 fn allocate(&mut self, _offset: u64, _length: u64) -> io::Result<()> {
349 Err(io::Error::new(
350 ErrorKind::PermissionDenied,
351 "unsupported operation",
352 ))
353 }
354 }
355
356 #[async_trait(?Send)]
357 impl AsyncDisk for AsyncAndroidSparse {
into_inner(self: Box<Self>) -> Box<dyn DiskFile>358 fn into_inner(self: Box<Self>) -> Box<dyn DiskFile> {
359 Box::new(AndroidSparse {
360 file: self.inner.into_source(),
361 total_size: self.total_size,
362 chunks: self.chunks,
363 })
364 }
365
flush(&self) -> crate::Result<()>366 async fn flush(&self) -> crate::Result<()> {
367 // android sparse is read-only, nothing to flush.
368 Ok(())
369 }
370
fsync(&self) -> DiskResult<()>371 async fn fsync(&self) -> DiskResult<()> {
372 // Do nothing because it's read-only.
373 Ok(())
374 }
375
fdatasync(&self) -> DiskResult<()>376 async fn fdatasync(&self) -> DiskResult<()> {
377 // Do nothing because it's read-only.
378 Ok(())
379 }
380
381 /// Reads data from `file_offset` to the end of the current chunk and write them into memory
382 /// `mem` at `mem_offsets`.
read_to_mem<'a>( &'a self, file_offset: u64, mem: Arc<dyn BackingMemory + Send + Sync>, mem_offsets: cros_async::MemRegionIter<'a>, ) -> DiskResult<usize>383 async fn read_to_mem<'a>(
384 &'a self,
385 file_offset: u64,
386 mem: Arc<dyn BackingMemory + Send + Sync>,
387 mem_offsets: cros_async::MemRegionIter<'a>,
388 ) -> DiskResult<usize> {
389 let found_chunk = self.chunks.range(..=file_offset).next_back();
390 let (
391 chunk_start,
392 ChunkWithSize {
393 chunk,
394 expanded_size,
395 },
396 ) = found_chunk.ok_or(DiskError::ReadingData(io::Error::new(
397 ErrorKind::UnexpectedEof,
398 format!("no chunk for offset {}", file_offset),
399 )))?;
400 let chunk_offset = file_offset - chunk_start;
401 let chunk_size = *expanded_size;
402
403 // Truncate `mem_offsets` to the remaining size of the current chunk.
404 let mem_offsets = mem_offsets.take_bytes((chunk_size - chunk_offset) as usize);
405 let mem_size = mem_offsets.clone().map(|x| x.len).sum();
406 match chunk {
407 Chunk::DontCare => {
408 for region in mem_offsets {
409 mem.get_volatile_slice(region)
410 .map_err(DiskError::GuestMemory)?
411 .write_bytes(0);
412 }
413 Ok(mem_size)
414 }
415 Chunk::Raw(offset) => self
416 .inner
417 .read_to_mem(Some(offset + chunk_offset), mem, mem_offsets)
418 .await
419 .map_err(DiskError::ReadToMem),
420 Chunk::Fill(fill_bytes) => {
421 let chunk_offset_mod = chunk_offset % fill_bytes.len() as u64;
422 let filled_memory: Vec<u8> = fill_bytes
423 .iter()
424 .cloned()
425 .cycle()
426 .skip(chunk_offset_mod as usize)
427 .take(mem_size)
428 .collect();
429
430 let mut filled_count = 0;
431 for region in mem_offsets {
432 let buf = &filled_memory[filled_count..filled_count + region.len];
433 mem.get_volatile_slice(region)
434 .map_err(DiskError::GuestMemory)?
435 .copy_from(buf);
436 filled_count += region.len;
437 }
438 Ok(mem_size)
439 }
440 }
441 }
442
write_from_mem<'a>( &'a self, _file_offset: u64, _mem: Arc<dyn BackingMemory + Send + Sync>, _mem_offsets: cros_async::MemRegionIter<'a>, ) -> DiskResult<usize>443 async fn write_from_mem<'a>(
444 &'a self,
445 _file_offset: u64,
446 _mem: Arc<dyn BackingMemory + Send + Sync>,
447 _mem_offsets: cros_async::MemRegionIter<'a>,
448 ) -> DiskResult<usize> {
449 Err(DiskError::UnsupportedOperation)
450 }
451
punch_hole(&self, _file_offset: u64, _length: u64) -> DiskResult<()>452 async fn punch_hole(&self, _file_offset: u64, _length: u64) -> DiskResult<()> {
453 Err(DiskError::UnsupportedOperation)
454 }
455
write_zeroes_at(&self, _file_offset: u64, _length: u64) -> DiskResult<()>456 async fn write_zeroes_at(&self, _file_offset: u64, _length: u64) -> DiskResult<()> {
457 Err(DiskError::UnsupportedOperation)
458 }
459 }
460
461 #[cfg(test)]
462 mod tests {
463 use std::io::Cursor;
464 use std::io::Write;
465
466 use super::*;
467
468 const CHUNK_SIZE: usize = mem::size_of::<ChunkHeader>();
469
470 #[test]
parse_raw()471 fn parse_raw() {
472 let chunk_raw = ChunkHeader {
473 chunk_type: CHUNK_TYPE_RAW.into(),
474 reserved1: 0,
475 chunk_sz: 1.into(),
476 total_sz: (CHUNK_SIZE as u32 + 123).into(),
477 };
478 let header_bytes = chunk_raw.as_bytes();
479 let mut chunk_bytes: Vec<u8> = Vec::new();
480 chunk_bytes.extend_from_slice(header_bytes);
481 chunk_bytes.extend_from_slice(&[0u8; 123]);
482 let mut chunk_cursor = Cursor::new(chunk_bytes);
483 let chunk = parse_chunk(&mut chunk_cursor, 123)
484 .expect("Failed to parse")
485 .expect("Failed to determine chunk type");
486 let expected_chunk = ChunkWithSize {
487 chunk: Chunk::Raw(CHUNK_SIZE as u64),
488 expanded_size: 123,
489 };
490 assert_eq!(expected_chunk, chunk);
491 }
492
493 #[test]
parse_dont_care()494 fn parse_dont_care() {
495 let chunk_raw = ChunkHeader {
496 chunk_type: CHUNK_TYPE_DONT_CARE.into(),
497 reserved1: 0,
498 chunk_sz: 100.into(),
499 total_sz: (CHUNK_SIZE as u32).into(),
500 };
501 let header_bytes = chunk_raw.as_bytes();
502 let mut chunk_cursor = Cursor::new(header_bytes);
503 let chunk = parse_chunk(&mut chunk_cursor, 123)
504 .expect("Failed to parse")
505 .expect("Failed to determine chunk type");
506 let expected_chunk = ChunkWithSize {
507 chunk: Chunk::DontCare,
508 expanded_size: 12300,
509 };
510 assert_eq!(expected_chunk, chunk);
511 }
512
513 #[test]
parse_fill()514 fn parse_fill() {
515 let chunk_raw = ChunkHeader {
516 chunk_type: CHUNK_TYPE_FILL.into(),
517 reserved1: 0,
518 chunk_sz: 100.into(),
519 total_sz: (CHUNK_SIZE as u32 + 4).into(),
520 };
521 let header_bytes = chunk_raw.as_bytes();
522 let mut chunk_bytes: Vec<u8> = Vec::new();
523 chunk_bytes.extend_from_slice(header_bytes);
524 chunk_bytes.extend_from_slice(&[123u8; 4]);
525 let mut chunk_cursor = Cursor::new(chunk_bytes);
526 let chunk = parse_chunk(&mut chunk_cursor, 123)
527 .expect("Failed to parse")
528 .expect("Failed to determine chunk type");
529 let expected_chunk = ChunkWithSize {
530 chunk: Chunk::Fill([123, 123, 123, 123]),
531 expanded_size: 12300,
532 };
533 assert_eq!(expected_chunk, chunk);
534 }
535
536 #[test]
parse_crc32()537 fn parse_crc32() {
538 let chunk_raw = ChunkHeader {
539 chunk_type: CHUNK_TYPE_CRC32.into(),
540 reserved1: 0,
541 chunk_sz: 0.into(),
542 total_sz: (CHUNK_SIZE as u32 + 4).into(),
543 };
544 let header_bytes = chunk_raw.as_bytes();
545 let mut chunk_bytes: Vec<u8> = Vec::new();
546 chunk_bytes.extend_from_slice(header_bytes);
547 chunk_bytes.extend_from_slice(&[123u8; 4]);
548 let mut chunk_cursor = Cursor::new(chunk_bytes);
549 let chunk = parse_chunk(&mut chunk_cursor, 123).expect("Failed to parse");
550 assert_eq!(None, chunk);
551 }
552
test_image(chunks: Vec<ChunkWithSize>) -> AndroidSparse553 fn test_image(chunks: Vec<ChunkWithSize>) -> AndroidSparse {
554 let file = tempfile::tempfile().expect("failed to create tempfile");
555 let size = chunks.iter().map(|x| x.expanded_size).sum();
556 AndroidSparse::from_parts(file, size, chunks).expect("Could not create image")
557 }
558
559 #[test]
read_dontcare()560 fn read_dontcare() {
561 let chunks = vec![ChunkWithSize {
562 chunk: Chunk::DontCare,
563 expanded_size: 100,
564 }];
565 let mut image = test_image(chunks);
566 let mut input_memory = [55u8; 100];
567 image
568 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
569 .expect("Could not read");
570 let expected = [0u8; 100];
571 assert_eq!(&expected[..], &input_memory[..]);
572 }
573
574 #[test]
read_fill_simple()575 fn read_fill_simple() {
576 let chunks = vec![ChunkWithSize {
577 chunk: Chunk::Fill([10, 20, 10, 20]),
578 expanded_size: 8,
579 }];
580 let mut image = test_image(chunks);
581 let mut input_memory = [55u8; 8];
582 image
583 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
584 .expect("Could not read");
585 let expected = [10, 20, 10, 20, 10, 20, 10, 20];
586 assert_eq!(&expected[..], &input_memory[..]);
587 }
588
589 #[test]
read_fill_edges()590 fn read_fill_edges() {
591 let chunks = vec![ChunkWithSize {
592 chunk: Chunk::Fill([10, 20, 30, 40]),
593 expanded_size: 8,
594 }];
595 let mut image = test_image(chunks);
596 let mut input_memory = [55u8; 6];
597 image
598 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 1)
599 .expect("Could not read");
600 let expected = [20, 30, 40, 10, 20, 30];
601 assert_eq!(&expected[..], &input_memory[..]);
602 }
603
604 #[test]
read_fill_offset_edges()605 fn read_fill_offset_edges() {
606 let chunks = vec![
607 ChunkWithSize {
608 chunk: Chunk::DontCare,
609 expanded_size: 20,
610 },
611 ChunkWithSize {
612 chunk: Chunk::Fill([10, 20, 30, 40]),
613 expanded_size: 100,
614 },
615 ];
616 let mut image = test_image(chunks);
617 let mut input_memory = [55u8; 7];
618 image
619 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 39)
620 .expect("Could not read");
621 let expected = [40, 10, 20, 30, 40, 10, 20];
622 assert_eq!(&expected[..], &input_memory[..]);
623 }
624
625 #[test]
read_raw()626 fn read_raw() {
627 let chunks = vec![ChunkWithSize {
628 chunk: Chunk::Raw(0),
629 expanded_size: 100,
630 }];
631 let mut image = test_image(chunks);
632 write!(image.file, "hello").expect("Failed to write into internal file");
633 let mut input_memory = [55u8; 5];
634 image
635 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
636 .expect("Could not read");
637 let expected = [104, 101, 108, 108, 111];
638 assert_eq!(&expected[..], &input_memory[..]);
639 }
640
641 #[test]
read_two_fills()642 fn read_two_fills() {
643 let chunks = vec![
644 ChunkWithSize {
645 chunk: Chunk::Fill([10, 20, 10, 20]),
646 expanded_size: 4,
647 },
648 ChunkWithSize {
649 chunk: Chunk::Fill([30, 40, 30, 40]),
650 expanded_size: 4,
651 },
652 ];
653 let mut image = test_image(chunks);
654 let mut input_memory = [55u8; 8];
655 image
656 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
657 .expect("Could not read");
658 let expected = [10, 20, 10, 20, 30, 40, 30, 40];
659 assert_eq!(&expected[..], &input_memory[..]);
660 }
661
662 /**
663 * Tests for Async.
664 */
665 use cros_async::MemRegion;
666 use cros_async::MemRegionIter;
667 use vm_memory::GuestAddress;
668 use vm_memory::GuestMemory;
669
test_async_image( chunks: Vec<ChunkWithSize>, ex: &Executor, ) -> DiskResult<Box<dyn AsyncDisk>>670 fn test_async_image(
671 chunks: Vec<ChunkWithSize>,
672 ex: &Executor,
673 ) -> DiskResult<Box<dyn AsyncDisk>> {
674 Box::new(test_image(chunks)).to_async_disk(ex)
675 }
676
677 /// Reads `len` bytes of data from `image` at 'offset'.
read_exact_at(image: &dyn AsyncDisk, offset: usize, len: usize) -> Vec<u8>678 async fn read_exact_at(image: &dyn AsyncDisk, offset: usize, len: usize) -> Vec<u8> {
679 let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap());
680 // Fill in guest_mem with dirty data.
681 guest_mem
682 .write_all_at_addr(&vec![55u8; len], GuestAddress(0))
683 .unwrap();
684
685 let mut count = 0usize;
686 while count < len {
687 let result = image
688 .read_to_mem(
689 (offset + count) as u64,
690 guest_mem.clone(),
691 MemRegionIter::new(&[MemRegion {
692 offset: count as u64,
693 len: len - count,
694 }]),
695 )
696 .await;
697 count += result.unwrap();
698 }
699
700 let mut buf = vec![0; len];
701 guest_mem.read_at_addr(&mut buf, GuestAddress(0)).unwrap();
702 buf
703 }
704
705 #[test]
async_read_dontcare()706 fn async_read_dontcare() {
707 let ex = Executor::new().unwrap();
708 ex.run_until(async {
709 let chunks = vec![ChunkWithSize {
710 chunk: Chunk::DontCare,
711 expanded_size: 100,
712 }];
713 let image = test_async_image(chunks, &ex).unwrap();
714 let buf = read_exact_at(&*image, 0, 100).await;
715 assert!(buf.iter().all(|x| *x == 0));
716 })
717 .unwrap();
718 }
719
720 #[test]
async_read_dontcare_with_offsets()721 fn async_read_dontcare_with_offsets() {
722 let ex = Executor::new().unwrap();
723 ex.run_until(async {
724 let chunks = vec![ChunkWithSize {
725 chunk: Chunk::DontCare,
726 expanded_size: 10,
727 }];
728 let image = test_async_image(chunks, &ex).unwrap();
729 // Prepare guest_mem with dirty data.
730 let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap());
731 guest_mem
732 .write_all_at_addr(&[55u8; 20], GuestAddress(0))
733 .unwrap();
734
735 // Pass multiple `MemRegion` to `read_to_mem`.
736 image
737 .read_to_mem(
738 0,
739 guest_mem.clone(),
740 MemRegionIter::new(&[
741 MemRegion { offset: 1, len: 3 },
742 MemRegion { offset: 6, len: 2 },
743 ]),
744 )
745 .await
746 .unwrap();
747 let mut buf = vec![0; 10];
748 guest_mem.read_at_addr(&mut buf, GuestAddress(0)).unwrap();
749 let expected = [55, 0, 0, 0, 55, 55, 0, 0, 55, 55];
750 assert_eq!(expected[..], buf[..]);
751 })
752 .unwrap();
753 }
754
755 #[test]
async_read_fill_simple()756 fn async_read_fill_simple() {
757 let ex = Executor::new().unwrap();
758 ex.run_until(async {
759 let chunks = vec![ChunkWithSize {
760 chunk: Chunk::Fill([10, 20, 10, 20]),
761 expanded_size: 8,
762 }];
763 let image = test_async_image(chunks, &ex).unwrap();
764 let buf = read_exact_at(&*image, 0, 8).await;
765 let expected = [10, 20, 10, 20, 10, 20, 10, 20];
766 assert_eq!(expected[..], buf[..]);
767 })
768 .unwrap();
769 }
770
771 #[test]
async_read_fill_simple_with_offset()772 fn async_read_fill_simple_with_offset() {
773 let ex = Executor::new().unwrap();
774 ex.run_until(async {
775 let chunks = vec![ChunkWithSize {
776 chunk: Chunk::Fill([10, 20, 10, 20]),
777 expanded_size: 8,
778 }];
779 let image = test_async_image(chunks, &ex).unwrap();
780 // Prepare guest_mem with dirty data.
781 let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap());
782 guest_mem
783 .write_all_at_addr(&[55u8; 20], GuestAddress(0))
784 .unwrap();
785
786 // Pass multiple `MemRegion` to `read_to_mem`.
787 image
788 .read_to_mem(
789 0,
790 guest_mem.clone(),
791 MemRegionIter::new(&[
792 MemRegion { offset: 1, len: 3 },
793 MemRegion { offset: 6, len: 2 },
794 ]),
795 )
796 .await
797 .unwrap();
798 let mut buf = vec![0; 10];
799 guest_mem.read_at_addr(&mut buf, GuestAddress(0)).unwrap();
800 let expected = [55, 10, 20, 10, 55, 55, 20, 10, 55, 55];
801 assert_eq!(expected[..], buf[..]);
802 })
803 .unwrap();
804 }
805
806 #[test]
async_read_fill_edges()807 fn async_read_fill_edges() {
808 let ex = Executor::new().unwrap();
809 ex.run_until(async {
810 let chunks = vec![ChunkWithSize {
811 chunk: Chunk::Fill([10, 20, 30, 40]),
812 expanded_size: 8,
813 }];
814 let image = test_async_image(chunks, &ex).unwrap();
815 let buf = read_exact_at(&*image, 1, 6).await;
816 let expected = [20, 30, 40, 10, 20, 30];
817 assert_eq!(expected[..], buf[..]);
818 })
819 .unwrap();
820 }
821
822 #[test]
async_read_fill_offset_edges()823 fn async_read_fill_offset_edges() {
824 let ex = Executor::new().unwrap();
825 ex.run_until(async {
826 let chunks = vec![
827 ChunkWithSize {
828 chunk: Chunk::DontCare,
829 expanded_size: 20,
830 },
831 ChunkWithSize {
832 chunk: Chunk::Fill([10, 20, 30, 40]),
833 expanded_size: 100,
834 },
835 ];
836 let image = test_async_image(chunks, &ex).unwrap();
837 let buf = read_exact_at(&*image, 39, 7).await;
838 let expected = [40, 10, 20, 30, 40, 10, 20];
839 assert_eq!(expected[..], buf[..]);
840 })
841 .unwrap();
842 }
843
844 #[test]
async_read_raw()845 fn async_read_raw() {
846 let ex = Executor::new().unwrap();
847 ex.run_until(async {
848 let chunks = vec![ChunkWithSize {
849 chunk: Chunk::Raw(0),
850 expanded_size: 100,
851 }];
852 let mut image = Box::new(test_image(chunks));
853 write!(image.file, "hello").unwrap();
854 let async_image = image.to_async_disk(&ex).unwrap();
855 let buf = read_exact_at(&*async_image, 0, 5).await;
856 let expected = [104, 101, 108, 108, 111];
857 assert_eq!(&expected[..], &buf[..]);
858 })
859 .unwrap();
860 }
861
862 #[test]
async_read_fill_raw_with_offset()863 fn async_read_fill_raw_with_offset() {
864 let ex = Executor::new().unwrap();
865 ex.run_until(async {
866 let chunks = vec![ChunkWithSize {
867 chunk: Chunk::Raw(0),
868 expanded_size: 100,
869 }];
870 let mut image = Box::new(test_image(chunks));
871 write!(image.file, "hello").unwrap();
872 let async_image = image.to_async_disk(&ex).unwrap();
873 // Prepare guest_mem with dirty data.
874 let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap());
875 guest_mem
876 .write_all_at_addr(&[55u8; 20], GuestAddress(0))
877 .unwrap();
878
879 // Pass multiple `MemRegion` to `read_to_mem`.
880 async_image
881 .read_to_mem(
882 0,
883 guest_mem.clone(),
884 MemRegionIter::new(&[
885 MemRegion { offset: 1, len: 3 },
886 MemRegion { offset: 6, len: 2 },
887 ]),
888 )
889 .await
890 .unwrap();
891 let mut buf = vec![0; 10];
892 guest_mem.read_at_addr(&mut buf, GuestAddress(0)).unwrap();
893 let expected = [55, 104, 101, 108, 55, 55, 108, 111, 55, 55];
894 assert_eq!(expected[..], buf[..]);
895 })
896 .unwrap();
897 }
898
899 #[test]
async_read_two_fills()900 fn async_read_two_fills() {
901 let ex = Executor::new().unwrap();
902 ex.run_until(async {
903 let chunks = vec![
904 ChunkWithSize {
905 chunk: Chunk::Fill([10, 20, 10, 20]),
906 expanded_size: 4,
907 },
908 ChunkWithSize {
909 chunk: Chunk::Fill([30, 40, 30, 40]),
910 expanded_size: 4,
911 },
912 ];
913 let image = test_async_image(chunks, &ex).unwrap();
914 let buf = read_exact_at(&*image, 0, 8).await;
915 let expected = [10, 20, 10, 20, 30, 40, 30, 40];
916 assert_eq!(&expected[..], &buf[..]);
917 })
918 .unwrap();
919 }
920
921 // Convert to sync and back again. There was once a bug where `into_inner` converted the
922 // AndroidSparse into a raw file.
923 //
924 // Skip on windows because `into_source` isn't supported.
925 #[cfg(not(windows))]
926 #[test]
async_roundtrip_read_dontcare()927 fn async_roundtrip_read_dontcare() {
928 let ex = Executor::new().unwrap();
929 ex.run_until(async {
930 let chunks = vec![ChunkWithSize {
931 chunk: Chunk::DontCare,
932 expanded_size: 100,
933 }];
934 let image = test_async_image(chunks, &ex).unwrap();
935 let image = image.into_inner().to_async_disk(&ex).unwrap();
936 let buf = read_exact_at(&*image, 0, 100).await;
937 assert!(buf.iter().all(|x| *x == 0));
938 })
939 .unwrap();
940 }
941 }
942