1 // Copyright 2019 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // https://android.googlesource.com/platform/system/core/+/7b444f0/libsparse/sparse_format.h
6
7 use std::collections::BTreeMap;
8 use std::fs::File;
9 use std::io::{self, ErrorKind, Read, Seek, SeekFrom};
10 use std::mem;
11
12 use crate::DiskGetLen;
13 use base::{
14 AsRawDescriptor, FileAllocate, FileReadWriteAtVolatile, FileSetLen, FileSync, PunchHole,
15 RawDescriptor, WriteZeroesAt,
16 };
17 use data_model::{DataInit, Le16, Le32, VolatileSlice};
18 use remain::sorted;
19 use thiserror::Error;
20
21 #[sorted]
22 #[derive(Error, Debug)]
23 pub enum Error {
24 #[error("invalid magic header for android sparse format")]
25 InvalidMagicHeader,
26 #[error("invalid specification: \"{0}\"")]
27 InvalidSpecification(String),
28 #[error("failed to read specification: \"{0}\"")]
29 ReadSpecificationError(io::Error),
30 }
31
32 pub type Result<T> = std::result::Result<T, Error>;
33
34 pub const SPARSE_HEADER_MAGIC: u32 = 0xed26ff3a;
35 const MAJOR_VERSION: u16 = 1;
36
37 #[repr(C)]
38 #[derive(Clone, Copy, Debug)]
39 struct SparseHeader {
40 magic: Le32, /* SPARSE_HEADER_MAGIC */
41 major_version: Le16, /* (0x1) - reject images with higher major versions */
42 minor_version: Le16, /* (0x0) - allow images with higer minor versions */
43 file_hdr_sz: Le16, /* 28 bytes for first revision of the file format */
44 chunk_hdr_size: Le16, /* 12 bytes for first revision of the file format */
45 blk_sz: Le32, /* block size in bytes, must be a multiple of 4 (4096) */
46 total_blks: Le32, /* total blocks in the non-sparse output image */
47 total_chunks: Le32, /* total chunks in the sparse input image */
48 image_checksum: Le32, /* CRC32 checksum of the original data, counting "don't care" */
49 /* as 0. Standard 802.3 polynomial, use a Public Domain */
50 /* table implementation */
51 }
52
53 unsafe impl DataInit for SparseHeader {}
54
55 const CHUNK_TYPE_RAW: u16 = 0xCAC1;
56 const CHUNK_TYPE_FILL: u16 = 0xCAC2;
57 const CHUNK_TYPE_DONT_CARE: u16 = 0xCAC3;
58 const CHUNK_TYPE_CRC32: u16 = 0xCAC4;
59
60 #[repr(C)]
61 #[derive(Clone, Copy, Debug)]
62 struct ChunkHeader {
63 chunk_type: Le16, /* 0xCAC1 -> raw; 0xCAC2 -> fill; 0xCAC3 -> don't care */
64 reserved1: u16,
65 chunk_sz: Le32, /* in blocks in output image */
66 total_sz: Le32, /* in bytes of chunk input file including chunk header and data */
67 }
68
69 unsafe impl DataInit for ChunkHeader {}
70
71 #[derive(Clone, Debug, PartialEq, Eq)]
72 enum Chunk {
73 Raw(u64), // Offset into the file
74 Fill(Vec<u8>),
75 DontCare,
76 }
77
78 #[derive(Clone, Debug, PartialEq, Eq)]
79 struct ChunkWithSize {
80 chunk: Chunk,
81 expanded_size: u64,
82 }
83
84 /* Following a Raw or Fill or CRC32 chunk is data.
85 * For a Raw chunk, it's the data in chunk_sz * blk_sz.
86 * For a Fill chunk, it's 4 bytes of the fill data.
87 * For a CRC32 chunk, it's 4 bytes of CRC32
88 */
89 #[derive(Debug)]
90 pub struct AndroidSparse {
91 file: File,
92 total_size: u64,
93 chunks: BTreeMap<u64, ChunkWithSize>,
94 }
95
parse_chunk<T: Read + Seek>( mut input: &mut T, chunk_hdr_size: u64, blk_sz: u64, ) -> Result<Option<ChunkWithSize>>96 fn parse_chunk<T: Read + Seek>(
97 mut input: &mut T,
98 chunk_hdr_size: u64,
99 blk_sz: u64,
100 ) -> Result<Option<ChunkWithSize>> {
101 let current_offset = input
102 .seek(SeekFrom::Current(0))
103 .map_err(Error::ReadSpecificationError)?;
104 let chunk_header =
105 ChunkHeader::from_reader(&mut input).map_err(Error::ReadSpecificationError)?;
106 let chunk = match chunk_header.chunk_type.to_native() {
107 CHUNK_TYPE_RAW => {
108 input
109 .seek(SeekFrom::Current(
110 chunk_header.total_sz.to_native() as i64 - chunk_hdr_size as i64,
111 ))
112 .map_err(Error::ReadSpecificationError)?;
113 Chunk::Raw(current_offset + chunk_hdr_size as u64)
114 }
115 CHUNK_TYPE_FILL => {
116 if chunk_header.total_sz == chunk_hdr_size as u32 {
117 return Err(Error::InvalidSpecification(
118 "Fill chunk did not have any data to fill".to_string(),
119 ));
120 }
121 let fill_size = chunk_header.total_sz.to_native() as u64 - chunk_hdr_size as u64;
122 let mut fill_bytes = vec![0u8; fill_size as usize];
123 input
124 .read_exact(&mut fill_bytes)
125 .map_err(Error::ReadSpecificationError)?;
126 Chunk::Fill(fill_bytes)
127 }
128 CHUNK_TYPE_DONT_CARE => Chunk::DontCare,
129 CHUNK_TYPE_CRC32 => return Ok(None), // TODO(schuffelen): Validate crc32s in input
130 unknown_type => {
131 return Err(Error::InvalidSpecification(format!(
132 "Chunk had invalid type, was {:x}",
133 unknown_type
134 )))
135 }
136 };
137 let expanded_size = chunk_header.chunk_sz.to_native() as u64 * blk_sz;
138 Ok(Some(ChunkWithSize {
139 chunk,
140 expanded_size,
141 }))
142 }
143
144 impl AndroidSparse {
from_file(mut file: File) -> Result<AndroidSparse>145 pub fn from_file(mut file: File) -> Result<AndroidSparse> {
146 file.seek(SeekFrom::Start(0))
147 .map_err(Error::ReadSpecificationError)?;
148 let sparse_header =
149 SparseHeader::from_reader(&mut file).map_err(Error::ReadSpecificationError)?;
150 if sparse_header.magic != SPARSE_HEADER_MAGIC {
151 return Err(Error::InvalidSpecification(format!(
152 "Header did not match magic constant. Expected {:x}, was {:x}",
153 SPARSE_HEADER_MAGIC,
154 sparse_header.magic.to_native()
155 )));
156 } else if sparse_header.major_version != MAJOR_VERSION {
157 return Err(Error::InvalidSpecification(format!(
158 "Header major version did not match. Expected {}, was {}",
159 MAJOR_VERSION,
160 sparse_header.major_version.to_native(),
161 )));
162 } else if (sparse_header.chunk_hdr_size.to_native() as usize)
163 < mem::size_of::<ChunkHeader>()
164 {
165 return Err(Error::InvalidSpecification(format!(
166 "Chunk header size does not fit chunk header struct, expected >={}, was {}",
167 sparse_header.chunk_hdr_size.to_native(),
168 mem::size_of::<ChunkHeader>()
169 )));
170 }
171 let header_size = sparse_header.chunk_hdr_size.to_native() as u64;
172 let block_size = sparse_header.blk_sz.to_native() as u64;
173 let chunks = (0..sparse_header.total_chunks.to_native())
174 .filter_map(|_| parse_chunk(&mut file, header_size, block_size).transpose())
175 .collect::<Result<Vec<ChunkWithSize>>>()?;
176 let total_size =
177 sparse_header.total_blks.to_native() as u64 * sparse_header.blk_sz.to_native() as u64;
178 AndroidSparse::from_parts(file, total_size, chunks)
179 }
180
from_parts(file: File, size: u64, chunks: Vec<ChunkWithSize>) -> Result<AndroidSparse>181 fn from_parts(file: File, size: u64, chunks: Vec<ChunkWithSize>) -> Result<AndroidSparse> {
182 let mut chunks_map: BTreeMap<u64, ChunkWithSize> = BTreeMap::new();
183 let mut expanded_location: u64 = 0;
184 for chunk_with_size in chunks {
185 let size = chunk_with_size.expanded_size;
186 if chunks_map
187 .insert(expanded_location, chunk_with_size)
188 .is_some()
189 {
190 return Err(Error::InvalidSpecification(format!(
191 "Two chunks were at {}",
192 expanded_location
193 )));
194 }
195 expanded_location += size;
196 }
197 let image = AndroidSparse {
198 file,
199 total_size: size,
200 chunks: chunks_map,
201 };
202 let calculated_len = image.get_len().map_err(Error::ReadSpecificationError)?;
203 if calculated_len != size {
204 return Err(Error::InvalidSpecification(format!(
205 "Header promised size {}, chunks added up to {}",
206 size, calculated_len
207 )));
208 }
209 Ok(image)
210 }
211 }
212
213 impl DiskGetLen for AndroidSparse {
get_len(&self) -> io::Result<u64>214 fn get_len(&self) -> io::Result<u64> {
215 Ok(self.total_size)
216 }
217 }
218
219 impl FileSetLen for AndroidSparse {
set_len(&self, _len: u64) -> io::Result<()>220 fn set_len(&self, _len: u64) -> io::Result<()> {
221 Err(io::Error::new(
222 ErrorKind::PermissionDenied,
223 "unsupported operation",
224 ))
225 }
226 }
227
228 impl FileSync for AndroidSparse {
fsync(&mut self) -> io::Result<()>229 fn fsync(&mut self) -> io::Result<()> {
230 Ok(())
231 }
232 }
233
234 impl PunchHole for AndroidSparse {
punch_hole(&mut self, _offset: u64, _length: u64) -> io::Result<()>235 fn punch_hole(&mut self, _offset: u64, _length: u64) -> io::Result<()> {
236 Err(io::Error::new(
237 ErrorKind::PermissionDenied,
238 "unsupported operation",
239 ))
240 }
241 }
242
243 impl WriteZeroesAt for AndroidSparse {
write_zeroes_at(&mut self, _offset: u64, _length: usize) -> io::Result<usize>244 fn write_zeroes_at(&mut self, _offset: u64, _length: usize) -> io::Result<usize> {
245 Err(io::Error::new(
246 ErrorKind::PermissionDenied,
247 "unsupported operation",
248 ))
249 }
250 }
251
252 impl AsRawDescriptor for AndroidSparse {
as_raw_descriptor(&self) -> RawDescriptor253 fn as_raw_descriptor(&self) -> RawDescriptor {
254 self.file.as_raw_descriptor()
255 }
256 }
257
258 impl FileAllocate for AndroidSparse {
allocate(&mut self, _offset: u64, _length: u64) -> io::Result<()>259 fn allocate(&mut self, _offset: u64, _length: u64) -> io::Result<()> {
260 Err(io::Error::new(
261 ErrorKind::PermissionDenied,
262 "unsupported operation",
263 ))
264 }
265 }
266
267 // Performs reads up to the chunk boundary.
268 impl FileReadWriteAtVolatile for AndroidSparse {
read_at_volatile(&mut self, slice: VolatileSlice, offset: u64) -> io::Result<usize>269 fn read_at_volatile(&mut self, slice: VolatileSlice, offset: u64) -> io::Result<usize> {
270 let found_chunk = self.chunks.range(..=offset).next_back();
271 let (
272 chunk_start,
273 ChunkWithSize {
274 chunk,
275 expanded_size,
276 },
277 ) = found_chunk.ok_or_else(|| {
278 io::Error::new(
279 ErrorKind::UnexpectedEof,
280 format!("no chunk for offset {}", offset),
281 )
282 })?;
283 let chunk_offset = offset - chunk_start;
284 let chunk_size = *expanded_size;
285 let subslice = if chunk_offset + (slice.size() as u64) > chunk_size {
286 slice
287 .sub_slice(0, (chunk_size - chunk_offset) as usize)
288 .map_err(|e| io::Error::new(ErrorKind::InvalidData, format!("{:?}", e)))?
289 } else {
290 slice
291 };
292 match chunk {
293 Chunk::DontCare => {
294 subslice.write_bytes(0);
295 Ok(subslice.size() as usize)
296 }
297 Chunk::Raw(file_offset) => self
298 .file
299 .read_at_volatile(subslice, *file_offset + chunk_offset),
300 Chunk::Fill(fill_bytes) => {
301 let chunk_offset_mod = chunk_offset % fill_bytes.len() as u64;
302 let filled_memory: Vec<u8> = fill_bytes
303 .iter()
304 .cloned()
305 .cycle()
306 .skip(chunk_offset_mod as usize)
307 .take(subslice.size() as usize)
308 .collect();
309 subslice.copy_from(&filled_memory);
310 Ok(subslice.size() as usize)
311 }
312 }
313 }
write_at_volatile(&mut self, _slice: VolatileSlice, _offset: u64) -> io::Result<usize>314 fn write_at_volatile(&mut self, _slice: VolatileSlice, _offset: u64) -> io::Result<usize> {
315 Err(io::Error::new(
316 ErrorKind::PermissionDenied,
317 "unsupported operation",
318 ))
319 }
320 }
321
322 #[cfg(test)]
323 mod tests {
324 use super::*;
325 use std::io::{Cursor, Write};
326 use tempfile::tempfile;
327
328 const CHUNK_SIZE: usize = mem::size_of::<ChunkHeader>();
329
330 #[test]
parse_raw()331 fn parse_raw() {
332 let chunk_raw = ChunkHeader {
333 chunk_type: CHUNK_TYPE_RAW.into(),
334 reserved1: 0,
335 chunk_sz: 1.into(),
336 total_sz: (CHUNK_SIZE as u32 + 123).into(),
337 };
338 let header_bytes = chunk_raw.as_slice();
339 let mut chunk_bytes: Vec<u8> = Vec::new();
340 chunk_bytes.extend_from_slice(header_bytes);
341 chunk_bytes.extend_from_slice(&[0u8; 123]);
342 let mut chunk_cursor = Cursor::new(chunk_bytes);
343 let chunk = parse_chunk(&mut chunk_cursor, CHUNK_SIZE as u64, 123)
344 .expect("Failed to parse")
345 .expect("Failed to determine chunk type");
346 let expected_chunk = ChunkWithSize {
347 chunk: Chunk::Raw(CHUNK_SIZE as u64),
348 expanded_size: 123,
349 };
350 assert_eq!(expected_chunk, chunk);
351 }
352
353 #[test]
parse_dont_care()354 fn parse_dont_care() {
355 let chunk_raw = ChunkHeader {
356 chunk_type: CHUNK_TYPE_DONT_CARE.into(),
357 reserved1: 0,
358 chunk_sz: 100.into(),
359 total_sz: (CHUNK_SIZE as u32).into(),
360 };
361 let header_bytes = chunk_raw.as_slice();
362 let mut chunk_cursor = Cursor::new(header_bytes);
363 let chunk = parse_chunk(&mut chunk_cursor, CHUNK_SIZE as u64, 123)
364 .expect("Failed to parse")
365 .expect("Failed to determine chunk type");
366 let expected_chunk = ChunkWithSize {
367 chunk: Chunk::DontCare,
368 expanded_size: 12300,
369 };
370 assert_eq!(expected_chunk, chunk);
371 }
372
373 #[test]
parse_fill()374 fn parse_fill() {
375 let chunk_raw = ChunkHeader {
376 chunk_type: CHUNK_TYPE_FILL.into(),
377 reserved1: 0,
378 chunk_sz: 100.into(),
379 total_sz: (CHUNK_SIZE as u32 + 4).into(),
380 };
381 let header_bytes = chunk_raw.as_slice();
382 let mut chunk_bytes: Vec<u8> = Vec::new();
383 chunk_bytes.extend_from_slice(header_bytes);
384 chunk_bytes.extend_from_slice(&[123u8; 4]);
385 let mut chunk_cursor = Cursor::new(chunk_bytes);
386 let chunk = parse_chunk(&mut chunk_cursor, CHUNK_SIZE as u64, 123)
387 .expect("Failed to parse")
388 .expect("Failed to determine chunk type");
389 let expected_chunk = ChunkWithSize {
390 chunk: Chunk::Fill(vec![123, 123, 123, 123]),
391 expanded_size: 12300,
392 };
393 assert_eq!(expected_chunk, chunk);
394 }
395
396 #[test]
parse_crc32()397 fn parse_crc32() {
398 let chunk_raw = ChunkHeader {
399 chunk_type: CHUNK_TYPE_CRC32.into(),
400 reserved1: 0,
401 chunk_sz: 0.into(),
402 total_sz: (CHUNK_SIZE as u32 + 4).into(),
403 };
404 let header_bytes = chunk_raw.as_slice();
405 let mut chunk_bytes: Vec<u8> = Vec::new();
406 chunk_bytes.extend_from_slice(header_bytes);
407 chunk_bytes.extend_from_slice(&[123u8; 4]);
408 let mut chunk_cursor = Cursor::new(chunk_bytes);
409 let chunk =
410 parse_chunk(&mut chunk_cursor, CHUNK_SIZE as u64, 123).expect("Failed to parse");
411 assert_eq!(None, chunk);
412 }
413
test_image(chunks: Vec<ChunkWithSize>) -> AndroidSparse414 fn test_image(chunks: Vec<ChunkWithSize>) -> AndroidSparse {
415 let file = tempfile().expect("failed to create tempfile");
416 let size = chunks.iter().map(|x| x.expanded_size).sum();
417 AndroidSparse::from_parts(file, size, chunks).expect("Could not create image")
418 }
419
420 #[test]
read_dontcare()421 fn read_dontcare() {
422 let chunks = vec![ChunkWithSize {
423 chunk: Chunk::DontCare,
424 expanded_size: 100,
425 }];
426 let mut image = test_image(chunks);
427 let mut input_memory = [55u8; 100];
428 image
429 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
430 .expect("Could not read");
431 let expected = [0u8; 100];
432 assert_eq!(&expected[..], &input_memory[..]);
433 }
434
435 #[test]
read_fill_simple()436 fn read_fill_simple() {
437 let chunks = vec![ChunkWithSize {
438 chunk: Chunk::Fill(vec![10, 20]),
439 expanded_size: 8,
440 }];
441 let mut image = test_image(chunks);
442 let mut input_memory = [55u8; 8];
443 image
444 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
445 .expect("Could not read");
446 let expected = [10, 20, 10, 20, 10, 20, 10, 20];
447 assert_eq!(&expected[..], &input_memory[..]);
448 }
449
450 #[test]
read_fill_edges()451 fn read_fill_edges() {
452 let chunks = vec![ChunkWithSize {
453 chunk: Chunk::Fill(vec![10, 20, 30]),
454 expanded_size: 8,
455 }];
456 let mut image = test_image(chunks);
457 let mut input_memory = [55u8; 6];
458 image
459 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 1)
460 .expect("Could not read");
461 let expected = [20, 30, 10, 20, 30, 10];
462 assert_eq!(&expected[..], &input_memory[..]);
463 }
464
465 #[test]
read_fill_offset_edges()466 fn read_fill_offset_edges() {
467 let chunks = vec![
468 ChunkWithSize {
469 chunk: Chunk::DontCare,
470 expanded_size: 20,
471 },
472 ChunkWithSize {
473 chunk: Chunk::Fill(vec![10, 20, 30]),
474 expanded_size: 100,
475 },
476 ];
477 let mut image = test_image(chunks);
478 let mut input_memory = [55u8; 7];
479 image
480 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 39)
481 .expect("Could not read");
482 let expected = [20, 30, 10, 20, 30, 10, 20];
483 assert_eq!(&expected[..], &input_memory[..]);
484 }
485
486 #[test]
read_raw()487 fn read_raw() {
488 let chunks = vec![ChunkWithSize {
489 chunk: Chunk::Raw(0),
490 expanded_size: 100,
491 }];
492 let mut image = test_image(chunks);
493 write!(image.file, "hello").expect("Failed to write into internal file");
494 let mut input_memory = [55u8; 5];
495 image
496 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
497 .expect("Could not read");
498 let expected = [104, 101, 108, 108, 111];
499 assert_eq!(&expected[..], &input_memory[..]);
500 }
501
502 #[test]
read_two_fills()503 fn read_two_fills() {
504 let chunks = vec![
505 ChunkWithSize {
506 chunk: Chunk::Fill(vec![10, 20]),
507 expanded_size: 4,
508 },
509 ChunkWithSize {
510 chunk: Chunk::Fill(vec![30, 40]),
511 expanded_size: 4,
512 },
513 ];
514 let mut image = test_image(chunks);
515 let mut input_memory = [55u8; 8];
516 image
517 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
518 .expect("Could not read");
519 let expected = [10, 20, 10, 20, 30, 40, 30, 40];
520 assert_eq!(&expected[..], &input_memory[..]);
521 }
522 }
523