1 // Copyright 2019 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // https://android.googlesource.com/platform/system/core/+/7b444f0/libsparse/sparse_format.h
6
7 use std::collections::BTreeMap;
8 use std::fs::File;
9 use std::io;
10 use std::io::ErrorKind;
11 use std::io::Read;
12 use std::io::Seek;
13 use std::io::SeekFrom;
14 use std::mem;
15 use std::sync::Arc;
16
17 use async_trait::async_trait;
18 use base::AsRawDescriptor;
19 use base::FileAllocate;
20 use base::FileReadWriteAtVolatile;
21 use base::FileSetLen;
22 use base::RawDescriptor;
23 use cros_async::BackingMemory;
24 use cros_async::Executor;
25 use cros_async::IoSource;
26 use data_model::DataInit;
27 use data_model::Le16;
28 use data_model::Le32;
29 use data_model::VolatileSlice;
30 use remain::sorted;
31 use thiserror::Error;
32
33 use crate::AsyncDisk;
34 use crate::DiskFile;
35 use crate::DiskGetLen;
36 use crate::Error as DiskError;
37 use crate::Result as DiskResult;
38 use crate::ToAsyncDisk;
39
40 #[sorted]
41 #[derive(Error, Debug)]
42 pub enum Error {
43 #[error("invalid magic header for android sparse format")]
44 InvalidMagicHeader,
45 #[error("invalid specification: \"{0}\"")]
46 InvalidSpecification(String),
47 #[error("failed to read specification: \"{0}\"")]
48 ReadSpecificationError(io::Error),
49 }
50
51 pub type Result<T> = std::result::Result<T, Error>;
52
53 pub const SPARSE_HEADER_MAGIC: u32 = 0xed26ff3a;
54 const MAJOR_VERSION: u16 = 1;
55
56 #[repr(C)]
57 #[derive(Clone, Copy, Debug)]
58 struct SparseHeader {
59 magic: Le32, /* SPARSE_HEADER_MAGIC */
60 major_version: Le16, /* (0x1) - reject images with higher major versions */
61 minor_version: Le16, /* (0x0) - allow images with higer minor versions */
62 file_hdr_sz: Le16, /* 28 bytes for first revision of the file format */
63 chunk_hdr_size: Le16, /* 12 bytes for first revision of the file format */
64 blk_sz: Le32, /* block size in bytes, must be a multiple of 4 (4096) */
65 total_blks: Le32, /* total blocks in the non-sparse output image */
66 total_chunks: Le32, /* total chunks in the sparse input image */
67 image_checksum: Le32, /* CRC32 checksum of the original data, counting "don't care" */
68 /* as 0. Standard 802.3 polynomial, use a Public Domain */
69 /* table implementation */
70 }
71
72 unsafe impl DataInit for SparseHeader {}
73
74 const CHUNK_TYPE_RAW: u16 = 0xCAC1;
75 const CHUNK_TYPE_FILL: u16 = 0xCAC2;
76 const CHUNK_TYPE_DONT_CARE: u16 = 0xCAC3;
77 const CHUNK_TYPE_CRC32: u16 = 0xCAC4;
78
79 #[repr(C)]
80 #[derive(Clone, Copy, Debug)]
81 struct ChunkHeader {
82 chunk_type: Le16, /* 0xCAC1 -> raw; 0xCAC2 -> fill; 0xCAC3 -> don't care */
83 reserved1: u16,
84 chunk_sz: Le32, /* in blocks in output image */
85 total_sz: Le32, /* in bytes of chunk input file including chunk header and data */
86 }
87
88 unsafe impl DataInit for ChunkHeader {}
89
90 #[derive(Clone, Debug, PartialEq, Eq)]
91 enum Chunk {
92 Raw(u64), // Offset into the file
93 Fill([u8; 4]),
94 DontCare,
95 }
96
97 #[derive(Clone, Debug, PartialEq, Eq)]
98 struct ChunkWithSize {
99 chunk: Chunk,
100 expanded_size: u64,
101 }
102
103 /* Following a Raw or Fill or CRC32 chunk is data.
104 * For a Raw chunk, it's the data in chunk_sz * blk_sz.
105 * For a Fill chunk, it's 4 bytes of the fill data.
106 * For a CRC32 chunk, it's 4 bytes of CRC32
107 */
108 #[derive(Debug)]
109 pub struct AndroidSparse {
110 file: File,
111 total_size: u64,
112 chunks: BTreeMap<u64, ChunkWithSize>,
113 }
114
parse_chunk<T: Read + Seek>(mut input: &mut T, blk_sz: u64) -> Result<Option<ChunkWithSize>>115 fn parse_chunk<T: Read + Seek>(mut input: &mut T, blk_sz: u64) -> Result<Option<ChunkWithSize>> {
116 const HEADER_SIZE: usize = mem::size_of::<ChunkHeader>();
117 let current_offset = input
118 .seek(SeekFrom::Current(0))
119 .map_err(Error::ReadSpecificationError)?;
120 let chunk_header =
121 ChunkHeader::from_reader(&mut input).map_err(Error::ReadSpecificationError)?;
122 let chunk_body_size = (chunk_header.total_sz.to_native() as usize)
123 .checked_sub(HEADER_SIZE)
124 .ok_or(Error::InvalidSpecification(format!(
125 "chunk total_sz {} smaller than header size {}",
126 chunk_header.total_sz.to_native(),
127 HEADER_SIZE
128 )))?;
129 let chunk = match chunk_header.chunk_type.to_native() {
130 CHUNK_TYPE_RAW => {
131 input
132 .seek(SeekFrom::Current(chunk_body_size as i64))
133 .map_err(Error::ReadSpecificationError)?;
134 Chunk::Raw(current_offset + HEADER_SIZE as u64)
135 }
136 CHUNK_TYPE_FILL => {
137 let mut fill_bytes = [0u8; 4];
138 if chunk_body_size != fill_bytes.len() {
139 return Err(Error::InvalidSpecification(format!(
140 "Fill chunk had bad size. Expected {}, was {}",
141 fill_bytes.len(),
142 chunk_body_size
143 )));
144 }
145 input
146 .read_exact(&mut fill_bytes)
147 .map_err(Error::ReadSpecificationError)?;
148 Chunk::Fill(fill_bytes)
149 }
150 CHUNK_TYPE_DONT_CARE => Chunk::DontCare,
151 CHUNK_TYPE_CRC32 => return Ok(None), // TODO(schuffelen): Validate crc32s in input
152 unknown_type => {
153 return Err(Error::InvalidSpecification(format!(
154 "Chunk had invalid type, was {:x}",
155 unknown_type
156 )))
157 }
158 };
159 let expanded_size = chunk_header.chunk_sz.to_native() as u64 * blk_sz;
160 Ok(Some(ChunkWithSize {
161 chunk,
162 expanded_size,
163 }))
164 }
165
166 impl AndroidSparse {
from_file(mut file: File) -> Result<AndroidSparse>167 pub fn from_file(mut file: File) -> Result<AndroidSparse> {
168 file.seek(SeekFrom::Start(0))
169 .map_err(Error::ReadSpecificationError)?;
170 let sparse_header =
171 SparseHeader::from_reader(&mut file).map_err(Error::ReadSpecificationError)?;
172 if sparse_header.magic != SPARSE_HEADER_MAGIC {
173 return Err(Error::InvalidSpecification(format!(
174 "Header did not match magic constant. Expected {:x}, was {:x}",
175 SPARSE_HEADER_MAGIC,
176 sparse_header.magic.to_native()
177 )));
178 } else if sparse_header.major_version != MAJOR_VERSION {
179 return Err(Error::InvalidSpecification(format!(
180 "Header major version did not match. Expected {}, was {}",
181 MAJOR_VERSION,
182 sparse_header.major_version.to_native(),
183 )));
184 } else if sparse_header.chunk_hdr_size.to_native() as usize != mem::size_of::<ChunkHeader>()
185 {
186 // The canonical parser for this format allows `chunk_hdr_size >= sizeof(ChunkHeader)`,
187 // but we've chosen to be stricter for simplicity.
188 return Err(Error::InvalidSpecification(format!(
189 "Chunk header size does not match chunk header struct, expected {}, was {}",
190 sparse_header.chunk_hdr_size.to_native(),
191 mem::size_of::<ChunkHeader>()
192 )));
193 }
194 let block_size = sparse_header.blk_sz.to_native() as u64;
195 let chunks = (0..sparse_header.total_chunks.to_native())
196 .filter_map(|_| parse_chunk(&mut file, block_size).transpose())
197 .collect::<Result<Vec<ChunkWithSize>>>()?;
198 let total_size =
199 sparse_header.total_blks.to_native() as u64 * sparse_header.blk_sz.to_native() as u64;
200 AndroidSparse::from_parts(file, total_size, chunks)
201 }
202
from_parts(file: File, size: u64, chunks: Vec<ChunkWithSize>) -> Result<AndroidSparse>203 fn from_parts(file: File, size: u64, chunks: Vec<ChunkWithSize>) -> Result<AndroidSparse> {
204 let mut chunks_map: BTreeMap<u64, ChunkWithSize> = BTreeMap::new();
205 let mut expanded_location: u64 = 0;
206 for chunk_with_size in chunks {
207 let size = chunk_with_size.expanded_size;
208 if chunks_map
209 .insert(expanded_location, chunk_with_size)
210 .is_some()
211 {
212 return Err(Error::InvalidSpecification(format!(
213 "Two chunks were at {}",
214 expanded_location
215 )));
216 }
217 expanded_location += size;
218 }
219 let image = AndroidSparse {
220 file,
221 total_size: size,
222 chunks: chunks_map,
223 };
224 let calculated_len: u64 = image.chunks.iter().map(|x| x.1.expanded_size).sum();
225 if calculated_len != size {
226 return Err(Error::InvalidSpecification(format!(
227 "Header promised size {}, chunks added up to {}",
228 size, calculated_len
229 )));
230 }
231 Ok(image)
232 }
233 }
234
235 impl DiskGetLen for AndroidSparse {
get_len(&self) -> io::Result<u64>236 fn get_len(&self) -> io::Result<u64> {
237 Ok(self.total_size)
238 }
239 }
240
241 impl FileSetLen for AndroidSparse {
set_len(&self, _len: u64) -> io::Result<()>242 fn set_len(&self, _len: u64) -> io::Result<()> {
243 Err(io::Error::new(
244 ErrorKind::PermissionDenied,
245 "unsupported operation",
246 ))
247 }
248 }
249
250 impl AsRawDescriptor for AndroidSparse {
as_raw_descriptor(&self) -> RawDescriptor251 fn as_raw_descriptor(&self) -> RawDescriptor {
252 self.file.as_raw_descriptor()
253 }
254 }
255
256 // Performs reads up to the chunk boundary.
257 impl FileReadWriteAtVolatile for AndroidSparse {
read_at_volatile(&mut self, slice: VolatileSlice, offset: u64) -> io::Result<usize>258 fn read_at_volatile(&mut self, slice: VolatileSlice, offset: u64) -> io::Result<usize> {
259 let found_chunk = self.chunks.range(..=offset).next_back();
260 let (
261 chunk_start,
262 ChunkWithSize {
263 chunk,
264 expanded_size,
265 },
266 ) = found_chunk.ok_or_else(|| {
267 io::Error::new(
268 ErrorKind::UnexpectedEof,
269 format!("no chunk for offset {}", offset),
270 )
271 })?;
272 let chunk_offset = offset - chunk_start;
273 let chunk_size = *expanded_size;
274 let subslice = if chunk_offset + (slice.size() as u64) > chunk_size {
275 slice
276 .sub_slice(0, (chunk_size - chunk_offset) as usize)
277 .map_err(|e| io::Error::new(ErrorKind::InvalidData, format!("{:?}", e)))?
278 } else {
279 slice
280 };
281 match chunk {
282 Chunk::DontCare => {
283 subslice.write_bytes(0);
284 Ok(subslice.size() as usize)
285 }
286 Chunk::Raw(file_offset) => self
287 .file
288 .read_at_volatile(subslice, *file_offset + chunk_offset),
289 Chunk::Fill(fill_bytes) => {
290 let chunk_offset_mod = chunk_offset % fill_bytes.len() as u64;
291 let filled_memory: Vec<u8> = fill_bytes
292 .iter()
293 .cloned()
294 .cycle()
295 .skip(chunk_offset_mod as usize)
296 .take(subslice.size() as usize)
297 .collect();
298 subslice.copy_from(&filled_memory);
299 Ok(subslice.size() as usize)
300 }
301 }
302 }
write_at_volatile(&mut self, _slice: VolatileSlice, _offset: u64) -> io::Result<usize>303 fn write_at_volatile(&mut self, _slice: VolatileSlice, _offset: u64) -> io::Result<usize> {
304 Err(io::Error::new(
305 ErrorKind::PermissionDenied,
306 "unsupported operation",
307 ))
308 }
309 }
310
311 // TODO(b/271381851): implement `try_clone`. It allows virtio-blk to run multiple workers.
312 impl DiskFile for AndroidSparse {}
313
314 /// An Android Sparse disk that implements `AsyncDisk` for access.
315 pub struct AsyncAndroidSparse {
316 inner: IoSource<File>,
317 total_size: u64,
318 chunks: BTreeMap<u64, ChunkWithSize>,
319 }
320
321 impl ToAsyncDisk for AndroidSparse {
to_async_disk(self: Box<Self>, ex: &Executor) -> DiskResult<Box<dyn AsyncDisk>>322 fn to_async_disk(self: Box<Self>, ex: &Executor) -> DiskResult<Box<dyn AsyncDisk>> {
323 Ok(Box::new(AsyncAndroidSparse {
324 inner: ex.async_from(self.file).map_err(DiskError::ToAsync)?,
325 total_size: self.total_size,
326 chunks: self.chunks,
327 }))
328 }
329 }
330
331 impl DiskGetLen for AsyncAndroidSparse {
get_len(&self) -> io::Result<u64>332 fn get_len(&self) -> io::Result<u64> {
333 Ok(self.total_size)
334 }
335 }
336
337 impl FileSetLen for AsyncAndroidSparse {
set_len(&self, _len: u64) -> io::Result<()>338 fn set_len(&self, _len: u64) -> io::Result<()> {
339 Err(io::Error::new(
340 ErrorKind::PermissionDenied,
341 "unsupported operation",
342 ))
343 }
344 }
345
346 impl FileAllocate for AsyncAndroidSparse {
allocate(&mut self, _offset: u64, _length: u64) -> io::Result<()>347 fn allocate(&mut self, _offset: u64, _length: u64) -> io::Result<()> {
348 Err(io::Error::new(
349 ErrorKind::PermissionDenied,
350 "unsupported operation",
351 ))
352 }
353 }
354
355 #[async_trait(?Send)]
356 impl AsyncDisk for AsyncAndroidSparse {
into_inner(self: Box<Self>) -> Box<dyn DiskFile>357 fn into_inner(self: Box<Self>) -> Box<dyn DiskFile> {
358 Box::new(AndroidSparse {
359 file: self.inner.into_source(),
360 total_size: self.total_size,
361 chunks: self.chunks,
362 })
363 }
364
fsync(&self) -> DiskResult<()>365 async fn fsync(&self) -> DiskResult<()> {
366 // Do nothing because it's read-only.
367 Ok(())
368 }
369
370 /// Reads data from `file_offset` to the end of the current chunk and write them into memory
371 /// `mem` at `mem_offsets`.
read_to_mem<'a>( &'a self, file_offset: u64, mem: Arc<dyn BackingMemory + Send + Sync>, mem_offsets: &'a [cros_async::MemRegion], ) -> DiskResult<usize>372 async fn read_to_mem<'a>(
373 &'a self,
374 file_offset: u64,
375 mem: Arc<dyn BackingMemory + Send + Sync>,
376 mem_offsets: &'a [cros_async::MemRegion],
377 ) -> DiskResult<usize> {
378 let found_chunk = self.chunks.range(..=file_offset).next_back();
379 let (
380 chunk_start,
381 ChunkWithSize {
382 chunk,
383 expanded_size,
384 },
385 ) = found_chunk.ok_or(DiskError::ReadingData(io::Error::new(
386 ErrorKind::UnexpectedEof,
387 format!("no chunk for offset {}", file_offset),
388 )))?;
389 let chunk_offset = file_offset - chunk_start;
390 let chunk_size = *expanded_size;
391
392 // Truncate `mem_offsets` to the remaining size of the current chunk.
393 let mem_offsets =
394 cros_async::MemRegion::truncate((chunk_size - chunk_offset) as usize, mem_offsets);
395 let mem_size = mem_offsets.iter().map(|x| x.len).sum();
396 match chunk {
397 Chunk::DontCare => {
398 for region in mem_offsets.iter() {
399 mem.get_volatile_slice(*region)
400 .map_err(DiskError::GuestMemory)?
401 .write_bytes(0);
402 }
403 Ok(mem_size)
404 }
405 Chunk::Raw(offset) => self
406 .inner
407 .read_to_mem(Some(offset + chunk_offset), mem, &mem_offsets)
408 .await
409 .map_err(DiskError::ReadToMem),
410 Chunk::Fill(fill_bytes) => {
411 let chunk_offset_mod = chunk_offset % fill_bytes.len() as u64;
412 let filled_memory: Vec<u8> = fill_bytes
413 .iter()
414 .cloned()
415 .cycle()
416 .skip(chunk_offset_mod as usize)
417 .take(mem_size)
418 .collect();
419
420 let mut filled_count = 0;
421 for region in mem_offsets.iter() {
422 let buf = &filled_memory[filled_count..filled_count + region.len];
423 mem.get_volatile_slice(*region)
424 .map_err(DiskError::GuestMemory)?
425 .copy_from(buf);
426 filled_count += region.len;
427 }
428 Ok(mem_size)
429 }
430 }
431 }
432
write_from_mem<'a>( &'a self, _file_offset: u64, _mem: Arc<dyn BackingMemory + Send + Sync>, _mem_offsets: &'a [cros_async::MemRegion], ) -> DiskResult<usize>433 async fn write_from_mem<'a>(
434 &'a self,
435 _file_offset: u64,
436 _mem: Arc<dyn BackingMemory + Send + Sync>,
437 _mem_offsets: &'a [cros_async::MemRegion],
438 ) -> DiskResult<usize> {
439 Err(DiskError::UnsupportedOperation)
440 }
441
punch_hole(&self, _file_offset: u64, _length: u64) -> DiskResult<()>442 async fn punch_hole(&self, _file_offset: u64, _length: u64) -> DiskResult<()> {
443 Err(DiskError::UnsupportedOperation)
444 }
445
write_zeroes_at(&self, _file_offset: u64, _length: u64) -> DiskResult<()>446 async fn write_zeroes_at(&self, _file_offset: u64, _length: u64) -> DiskResult<()> {
447 Err(DiskError::UnsupportedOperation)
448 }
449 }
450
451 #[cfg(test)]
452 mod tests {
453 use std::io::Cursor;
454 use std::io::Write;
455
456 use super::*;
457
458 const CHUNK_SIZE: usize = mem::size_of::<ChunkHeader>();
459
460 #[test]
parse_raw()461 fn parse_raw() {
462 let chunk_raw = ChunkHeader {
463 chunk_type: CHUNK_TYPE_RAW.into(),
464 reserved1: 0,
465 chunk_sz: 1.into(),
466 total_sz: (CHUNK_SIZE as u32 + 123).into(),
467 };
468 let header_bytes = chunk_raw.as_slice();
469 let mut chunk_bytes: Vec<u8> = Vec::new();
470 chunk_bytes.extend_from_slice(header_bytes);
471 chunk_bytes.extend_from_slice(&[0u8; 123]);
472 let mut chunk_cursor = Cursor::new(chunk_bytes);
473 let chunk = parse_chunk(&mut chunk_cursor, 123)
474 .expect("Failed to parse")
475 .expect("Failed to determine chunk type");
476 let expected_chunk = ChunkWithSize {
477 chunk: Chunk::Raw(CHUNK_SIZE as u64),
478 expanded_size: 123,
479 };
480 assert_eq!(expected_chunk, chunk);
481 }
482
483 #[test]
parse_dont_care()484 fn parse_dont_care() {
485 let chunk_raw = ChunkHeader {
486 chunk_type: CHUNK_TYPE_DONT_CARE.into(),
487 reserved1: 0,
488 chunk_sz: 100.into(),
489 total_sz: (CHUNK_SIZE as u32).into(),
490 };
491 let header_bytes = chunk_raw.as_slice();
492 let mut chunk_cursor = Cursor::new(header_bytes);
493 let chunk = parse_chunk(&mut chunk_cursor, 123)
494 .expect("Failed to parse")
495 .expect("Failed to determine chunk type");
496 let expected_chunk = ChunkWithSize {
497 chunk: Chunk::DontCare,
498 expanded_size: 12300,
499 };
500 assert_eq!(expected_chunk, chunk);
501 }
502
503 #[test]
parse_fill()504 fn parse_fill() {
505 let chunk_raw = ChunkHeader {
506 chunk_type: CHUNK_TYPE_FILL.into(),
507 reserved1: 0,
508 chunk_sz: 100.into(),
509 total_sz: (CHUNK_SIZE as u32 + 4).into(),
510 };
511 let header_bytes = chunk_raw.as_slice();
512 let mut chunk_bytes: Vec<u8> = Vec::new();
513 chunk_bytes.extend_from_slice(header_bytes);
514 chunk_bytes.extend_from_slice(&[123u8; 4]);
515 let mut chunk_cursor = Cursor::new(chunk_bytes);
516 let chunk = parse_chunk(&mut chunk_cursor, 123)
517 .expect("Failed to parse")
518 .expect("Failed to determine chunk type");
519 let expected_chunk = ChunkWithSize {
520 chunk: Chunk::Fill([123, 123, 123, 123]),
521 expanded_size: 12300,
522 };
523 assert_eq!(expected_chunk, chunk);
524 }
525
526 #[test]
parse_crc32()527 fn parse_crc32() {
528 let chunk_raw = ChunkHeader {
529 chunk_type: CHUNK_TYPE_CRC32.into(),
530 reserved1: 0,
531 chunk_sz: 0.into(),
532 total_sz: (CHUNK_SIZE as u32 + 4).into(),
533 };
534 let header_bytes = chunk_raw.as_slice();
535 let mut chunk_bytes: Vec<u8> = Vec::new();
536 chunk_bytes.extend_from_slice(header_bytes);
537 chunk_bytes.extend_from_slice(&[123u8; 4]);
538 let mut chunk_cursor = Cursor::new(chunk_bytes);
539 let chunk = parse_chunk(&mut chunk_cursor, 123).expect("Failed to parse");
540 assert_eq!(None, chunk);
541 }
542
test_image(chunks: Vec<ChunkWithSize>) -> AndroidSparse543 fn test_image(chunks: Vec<ChunkWithSize>) -> AndroidSparse {
544 let file = tempfile::tempfile().expect("failed to create tempfile");
545 let size = chunks.iter().map(|x| x.expanded_size).sum();
546 AndroidSparse::from_parts(file, size, chunks).expect("Could not create image")
547 }
548
549 #[test]
read_dontcare()550 fn read_dontcare() {
551 let chunks = vec![ChunkWithSize {
552 chunk: Chunk::DontCare,
553 expanded_size: 100,
554 }];
555 let mut image = test_image(chunks);
556 let mut input_memory = [55u8; 100];
557 image
558 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
559 .expect("Could not read");
560 let expected = [0u8; 100];
561 assert_eq!(&expected[..], &input_memory[..]);
562 }
563
564 #[test]
read_fill_simple()565 fn read_fill_simple() {
566 let chunks = vec![ChunkWithSize {
567 chunk: Chunk::Fill([10, 20, 10, 20]),
568 expanded_size: 8,
569 }];
570 let mut image = test_image(chunks);
571 let mut input_memory = [55u8; 8];
572 image
573 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
574 .expect("Could not read");
575 let expected = [10, 20, 10, 20, 10, 20, 10, 20];
576 assert_eq!(&expected[..], &input_memory[..]);
577 }
578
579 #[test]
read_fill_edges()580 fn read_fill_edges() {
581 let chunks = vec![ChunkWithSize {
582 chunk: Chunk::Fill([10, 20, 30, 40]),
583 expanded_size: 8,
584 }];
585 let mut image = test_image(chunks);
586 let mut input_memory = [55u8; 6];
587 image
588 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 1)
589 .expect("Could not read");
590 let expected = [20, 30, 40, 10, 20, 30];
591 assert_eq!(&expected[..], &input_memory[..]);
592 }
593
594 #[test]
read_fill_offset_edges()595 fn read_fill_offset_edges() {
596 let chunks = vec![
597 ChunkWithSize {
598 chunk: Chunk::DontCare,
599 expanded_size: 20,
600 },
601 ChunkWithSize {
602 chunk: Chunk::Fill([10, 20, 30, 40]),
603 expanded_size: 100,
604 },
605 ];
606 let mut image = test_image(chunks);
607 let mut input_memory = [55u8; 7];
608 image
609 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 39)
610 .expect("Could not read");
611 let expected = [40, 10, 20, 30, 40, 10, 20];
612 assert_eq!(&expected[..], &input_memory[..]);
613 }
614
615 #[test]
read_raw()616 fn read_raw() {
617 let chunks = vec![ChunkWithSize {
618 chunk: Chunk::Raw(0),
619 expanded_size: 100,
620 }];
621 let mut image = test_image(chunks);
622 write!(image.file, "hello").expect("Failed to write into internal file");
623 let mut input_memory = [55u8; 5];
624 image
625 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
626 .expect("Could not read");
627 let expected = [104, 101, 108, 108, 111];
628 assert_eq!(&expected[..], &input_memory[..]);
629 }
630
631 #[test]
read_two_fills()632 fn read_two_fills() {
633 let chunks = vec![
634 ChunkWithSize {
635 chunk: Chunk::Fill([10, 20, 10, 20]),
636 expanded_size: 4,
637 },
638 ChunkWithSize {
639 chunk: Chunk::Fill([30, 40, 30, 40]),
640 expanded_size: 4,
641 },
642 ];
643 let mut image = test_image(chunks);
644 let mut input_memory = [55u8; 8];
645 image
646 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
647 .expect("Could not read");
648 let expected = [10, 20, 10, 20, 30, 40, 30, 40];
649 assert_eq!(&expected[..], &input_memory[..]);
650 }
651
652 /**
653 * Tests for Async.
654 */
655 use cros_async::MemRegion;
656 use vm_memory::GuestAddress;
657 use vm_memory::GuestMemory;
658
test_async_image( chunks: Vec<ChunkWithSize>, ex: &Executor, ) -> DiskResult<Box<dyn AsyncDisk>>659 fn test_async_image(
660 chunks: Vec<ChunkWithSize>,
661 ex: &Executor,
662 ) -> DiskResult<Box<dyn AsyncDisk>> {
663 Box::new(test_image(chunks)).to_async_disk(ex)
664 }
665
666 /// Reads `len` bytes of data from `image` at 'offset'.
read_exact_at(image: &dyn AsyncDisk, offset: usize, len: usize) -> Vec<u8>667 async fn read_exact_at(image: &dyn AsyncDisk, offset: usize, len: usize) -> Vec<u8> {
668 let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap());
669 // Fill in guest_mem with dirty data.
670 guest_mem
671 .write_all_at_addr(&vec![55u8; len], GuestAddress(0))
672 .unwrap();
673
674 let mut count = 0usize;
675 while count < len {
676 let result = image
677 .read_to_mem(
678 (offset + count) as u64,
679 guest_mem.clone(),
680 &[MemRegion {
681 offset: count as u64,
682 len: len - count,
683 }],
684 )
685 .await;
686 count += result.unwrap();
687 }
688
689 let mut buf = vec![0; len];
690 guest_mem.read_at_addr(&mut buf, GuestAddress(0)).unwrap();
691 buf
692 }
693
694 #[test]
async_read_dontcare()695 fn async_read_dontcare() {
696 let ex = Executor::new().unwrap();
697 ex.run_until(async {
698 let chunks = vec![ChunkWithSize {
699 chunk: Chunk::DontCare,
700 expanded_size: 100,
701 }];
702 let image = test_async_image(chunks, &ex).unwrap();
703 let buf = read_exact_at(&*image, 0, 100).await;
704 assert!(buf.iter().all(|x| *x == 0));
705 })
706 .unwrap();
707 }
708
709 #[test]
async_read_dontcare_with_offsets()710 fn async_read_dontcare_with_offsets() {
711 let ex = Executor::new().unwrap();
712 ex.run_until(async {
713 let chunks = vec![ChunkWithSize {
714 chunk: Chunk::DontCare,
715 expanded_size: 10,
716 }];
717 let image = test_async_image(chunks, &ex).unwrap();
718 // Prepare guest_mem with dirty data.
719 let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap());
720 guest_mem
721 .write_all_at_addr(&[55u8; 20], GuestAddress(0))
722 .unwrap();
723
724 // Pass multiple `MemRegion` to `read_to_mem`.
725 image
726 .read_to_mem(
727 0,
728 guest_mem.clone(),
729 &[
730 MemRegion { offset: 1, len: 3 },
731 MemRegion { offset: 6, len: 2 },
732 ],
733 )
734 .await
735 .unwrap();
736 let mut buf = vec![0; 10];
737 guest_mem.read_at_addr(&mut buf, GuestAddress(0)).unwrap();
738 let expected = [55, 0, 0, 0, 55, 55, 0, 0, 55, 55];
739 assert_eq!(expected[..], buf[..]);
740 })
741 .unwrap();
742 }
743
744 #[test]
async_read_fill_simple()745 fn async_read_fill_simple() {
746 let ex = Executor::new().unwrap();
747 ex.run_until(async {
748 let chunks = vec![ChunkWithSize {
749 chunk: Chunk::Fill([10, 20, 10, 20]),
750 expanded_size: 8,
751 }];
752 let image = test_async_image(chunks, &ex).unwrap();
753 let buf = read_exact_at(&*image, 0, 8).await;
754 let expected = [10, 20, 10, 20, 10, 20, 10, 20];
755 assert_eq!(expected[..], buf[..]);
756 })
757 .unwrap();
758 }
759
760 #[test]
async_read_fill_simple_with_offset()761 fn async_read_fill_simple_with_offset() {
762 let ex = Executor::new().unwrap();
763 ex.run_until(async {
764 let chunks = vec![ChunkWithSize {
765 chunk: Chunk::Fill([10, 20, 10, 20]),
766 expanded_size: 8,
767 }];
768 let image = test_async_image(chunks, &ex).unwrap();
769 // Prepare guest_mem with dirty data.
770 let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap());
771 guest_mem
772 .write_all_at_addr(&[55u8; 20], GuestAddress(0))
773 .unwrap();
774
775 // Pass multiple `MemRegion` to `read_to_mem`.
776 image
777 .read_to_mem(
778 0,
779 guest_mem.clone(),
780 &[
781 MemRegion { offset: 1, len: 3 },
782 MemRegion { offset: 6, len: 2 },
783 ],
784 )
785 .await
786 .unwrap();
787 let mut buf = vec![0; 10];
788 guest_mem.read_at_addr(&mut buf, GuestAddress(0)).unwrap();
789 let expected = [55, 10, 20, 10, 55, 55, 20, 10, 55, 55];
790 assert_eq!(expected[..], buf[..]);
791 })
792 .unwrap();
793 }
794
795 #[test]
async_read_fill_edges()796 fn async_read_fill_edges() {
797 let ex = Executor::new().unwrap();
798 ex.run_until(async {
799 let chunks = vec![ChunkWithSize {
800 chunk: Chunk::Fill([10, 20, 30, 40]),
801 expanded_size: 8,
802 }];
803 let image = test_async_image(chunks, &ex).unwrap();
804 let buf = read_exact_at(&*image, 1, 6).await;
805 let expected = [20, 30, 40, 10, 20, 30];
806 assert_eq!(expected[..], buf[..]);
807 })
808 .unwrap();
809 }
810
811 #[test]
async_read_fill_offset_edges()812 fn async_read_fill_offset_edges() {
813 let ex = Executor::new().unwrap();
814 ex.run_until(async {
815 let chunks = vec![
816 ChunkWithSize {
817 chunk: Chunk::DontCare,
818 expanded_size: 20,
819 },
820 ChunkWithSize {
821 chunk: Chunk::Fill([10, 20, 30, 40]),
822 expanded_size: 100,
823 },
824 ];
825 let image = test_async_image(chunks, &ex).unwrap();
826 let buf = read_exact_at(&*image, 39, 7).await;
827 let expected = [40, 10, 20, 30, 40, 10, 20];
828 assert_eq!(expected[..], buf[..]);
829 })
830 .unwrap();
831 }
832
833 #[test]
async_read_raw()834 fn async_read_raw() {
835 let ex = Executor::new().unwrap();
836 ex.run_until(async {
837 let chunks = vec![ChunkWithSize {
838 chunk: Chunk::Raw(0),
839 expanded_size: 100,
840 }];
841 let mut image = Box::new(test_image(chunks));
842 write!(image.file, "hello").unwrap();
843 let async_image = image.to_async_disk(&ex).unwrap();
844 let buf = read_exact_at(&*async_image, 0, 5).await;
845 let expected = [104, 101, 108, 108, 111];
846 assert_eq!(&expected[..], &buf[..]);
847 })
848 .unwrap();
849 }
850
851 #[test]
async_read_fill_raw_with_offset()852 fn async_read_fill_raw_with_offset() {
853 let ex = Executor::new().unwrap();
854 ex.run_until(async {
855 let chunks = vec![ChunkWithSize {
856 chunk: Chunk::Raw(0),
857 expanded_size: 100,
858 }];
859 let mut image = Box::new(test_image(chunks));
860 write!(image.file, "hello").unwrap();
861 let async_image = image.to_async_disk(&ex).unwrap();
862 // Prepare guest_mem with dirty data.
863 let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap());
864 guest_mem
865 .write_all_at_addr(&[55u8; 20], GuestAddress(0))
866 .unwrap();
867
868 // Pass multiple `MemRegion` to `read_to_mem`.
869 async_image
870 .read_to_mem(
871 0,
872 guest_mem.clone(),
873 &[
874 MemRegion { offset: 1, len: 3 },
875 MemRegion { offset: 6, len: 2 },
876 ],
877 )
878 .await
879 .unwrap();
880 let mut buf = vec![0; 10];
881 guest_mem.read_at_addr(&mut buf, GuestAddress(0)).unwrap();
882 let expected = [55, 104, 101, 108, 55, 55, 108, 111, 55, 55];
883 assert_eq!(expected[..], buf[..]);
884 })
885 .unwrap();
886 }
887
888 #[test]
async_read_two_fills()889 fn async_read_two_fills() {
890 let ex = Executor::new().unwrap();
891 ex.run_until(async {
892 let chunks = vec![
893 ChunkWithSize {
894 chunk: Chunk::Fill([10, 20, 10, 20]),
895 expanded_size: 4,
896 },
897 ChunkWithSize {
898 chunk: Chunk::Fill([30, 40, 30, 40]),
899 expanded_size: 4,
900 },
901 ];
902 let image = test_async_image(chunks, &ex).unwrap();
903 let buf = read_exact_at(&*image, 0, 8).await;
904 let expected = [10, 20, 10, 20, 30, 40, 30, 40];
905 assert_eq!(&expected[..], &buf[..]);
906 })
907 .unwrap();
908 }
909
910 // Convert to sync and back again. There was once a bug where `into_inner` converted the
911 // AndroidSparse into a raw file.
912 //
913 // Skip on windows because `into_source` isn't supported.
914 #[cfg(not(windows))]
915 #[test]
async_roundtrip_read_dontcare()916 fn async_roundtrip_read_dontcare() {
917 let ex = Executor::new().unwrap();
918 ex.run_until(async {
919 let chunks = vec![ChunkWithSize {
920 chunk: Chunk::DontCare,
921 expanded_size: 100,
922 }];
923 let image = test_async_image(chunks, &ex).unwrap();
924 let image = image.into_inner().to_async_disk(&ex).unwrap();
925 let buf = read_exact_at(&*image, 0, 100).await;
926 assert!(buf.iter().all(|x| *x == 0));
927 })
928 .unwrap();
929 }
930 }
931