1 // Copyright 2019 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::cmp::max;
6 use std::cmp::min;
7 use std::collections::HashSet;
8 use std::convert::TryInto;
9 use std::fs::File;
10 use std::fs::OpenOptions;
11 use std::io;
12 use std::io::ErrorKind;
13 use std::io::Read;
14 use std::io::Seek;
15 use std::io::SeekFrom;
16 use std::io::Write;
17 use std::ops::Range;
18 use std::path::Path;
19 use std::path::PathBuf;
20 use std::sync::atomic::AtomicBool;
21 use std::sync::atomic::Ordering;
22 use std::sync::Arc;
23
24 use async_trait::async_trait;
25 use base::AsRawDescriptors;
26 use base::FileAllocate;
27 use base::FileReadWriteAtVolatile;
28 use base::FileSetLen;
29 use base::RawDescriptor;
30 use base::VolatileSlice;
31 use crc32fast::Hasher;
32 use cros_async::BackingMemory;
33 use cros_async::Executor;
34 use cros_async::MemRegionIter;
35 use protobuf::Message;
36 use protos::cdisk_spec;
37 use protos::cdisk_spec::ComponentDisk;
38 use protos::cdisk_spec::CompositeDisk;
39 use protos::cdisk_spec::ReadWriteCapability;
40 use remain::sorted;
41 use thiserror::Error;
42 use uuid::Uuid;
43
44 use crate::gpt;
45 use crate::gpt::write_gpt_header;
46 use crate::gpt::write_protective_mbr;
47 use crate::gpt::GptPartitionEntry;
48 use crate::gpt::GPT_BEGINNING_SIZE;
49 use crate::gpt::GPT_END_SIZE;
50 use crate::gpt::GPT_HEADER_SIZE;
51 use crate::gpt::GPT_NUM_PARTITIONS;
52 use crate::gpt::GPT_PARTITION_ENTRY_SIZE;
53 use crate::gpt::SECTOR_SIZE;
54 use crate::open_disk_file;
55 use crate::AsyncDisk;
56 use crate::DiskFile;
57 use crate::DiskFileParams;
58 use crate::DiskGetLen;
59 use crate::ImageType;
60 use crate::ToAsyncDisk;
61
62 /// The amount of padding needed between the last partition entry and the first partition, to align
63 /// the partition appropriately. The two sectors are for the MBR and the GPT header.
64 const PARTITION_ALIGNMENT_SIZE: usize = GPT_BEGINNING_SIZE as usize
65 - 2 * SECTOR_SIZE as usize
66 - GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize;
67 const HEADER_PADDING_LENGTH: usize = SECTOR_SIZE as usize - GPT_HEADER_SIZE as usize;
68 // Keep all partitions 4k aligned for performance.
69 const PARTITION_SIZE_SHIFT: u8 = 12;
70
71 // From https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs.
72 const LINUX_FILESYSTEM_GUID: Uuid = Uuid::from_u128(0x0FC63DAF_8483_4772_8E79_3D69D8477DE4);
73 const EFI_SYSTEM_PARTITION_GUID: Uuid = Uuid::from_u128(0xC12A7328_F81F_11D2_BA4B_00A0C93EC93B);
74
75 #[sorted]
76 #[derive(Error, Debug)]
77 pub enum Error {
78 #[error("failed to use underlying disk: \"{0}\"")]
79 DiskError(Box<crate::Error>),
80 #[error("duplicate GPT partition label \"{0}\"")]
81 DuplicatePartitionLabel(String),
82 #[error("failed to write GPT header: \"{0}\"")]
83 GptError(gpt::Error),
84 #[error("invalid magic header for composite disk format")]
85 InvalidMagicHeader,
86 #[error("invalid partition path {0:?}")]
87 InvalidPath(PathBuf),
88 #[error("failed to parse specification proto: \"{0}\"")]
89 InvalidProto(protobuf::Error),
90 #[error("invalid specification: \"{0}\"")]
91 InvalidSpecification(String),
92 #[error("no image files for partition {0:?}")]
93 NoImageFiles(PartitionInfo),
94 #[error("failed to open component file \"{1}\": \"{0}\"")]
95 OpenFile(io::Error, String),
96 #[error("failed to read specification: \"{0}\"")]
97 ReadSpecificationError(io::Error),
98 #[error("Read-write partition {0:?} size is not a multiple of {multiple}.", multiple = 1 << PARTITION_SIZE_SHIFT)]
99 UnalignedReadWrite(PartitionInfo),
100 #[error("unknown version {0} in specification")]
101 UnknownVersion(u64),
102 #[error("unsupported component disk type \"{0:?}\"")]
103 UnsupportedComponent(ImageType),
104 #[error("failed to write composite disk header: \"{0}\"")]
105 WriteHeader(io::Error),
106 #[error("failed to write specification proto: \"{0}\"")]
107 WriteProto(protobuf::Error),
108 #[error("failed to write zero filler: \"{0}\"")]
109 WriteZeroFiller(io::Error),
110 }
111
112 impl From<gpt::Error> for Error {
from(e: gpt::Error) -> Self113 fn from(e: gpt::Error) -> Self {
114 Self::GptError(e)
115 }
116 }
117
118 pub type Result<T> = std::result::Result<T, Error>;
119
120 #[derive(Debug)]
121 struct ComponentDiskPart {
122 file: Box<dyn DiskFile>,
123 offset: u64,
124 length: u64,
125 // Whether there have been any writes since the last fsync or fdatasync.
126 needs_flush: AtomicBool,
127 }
128
129 impl ComponentDiskPart {
range(&self) -> Range<u64>130 fn range(&self) -> Range<u64> {
131 self.offset..(self.offset + self.length)
132 }
133 }
134
135 /// Represents a composite virtual disk made out of multiple component files. This is described on
136 /// disk by a protocol buffer file that lists out the component file locations and their offsets
137 /// and lengths on the virtual disk. The spaces covered by the component disks must be contiguous
138 /// and not overlapping.
139 #[derive(Debug)]
140 pub struct CompositeDiskFile {
141 component_disks: Vec<ComponentDiskPart>,
142 // We keep the root composite file open so that the file lock is not dropped.
143 _disk_spec_file: File,
144 }
145
146 // TODO(b/271381851): implement `try_clone`. It allows virtio-blk to run multiple workers.
147 impl DiskFile for CompositeDiskFile {}
148
ranges_overlap(a: &Range<u64>, b: &Range<u64>) -> bool149 fn ranges_overlap(a: &Range<u64>, b: &Range<u64>) -> bool {
150 range_intersection(a, b).is_some()
151 }
152
range_intersection(a: &Range<u64>, b: &Range<u64>) -> Option<Range<u64>>153 fn range_intersection(a: &Range<u64>, b: &Range<u64>) -> Option<Range<u64>> {
154 let r = Range {
155 start: max(a.start, b.start),
156 end: min(a.end, b.end),
157 };
158 if r.is_empty() {
159 None
160 } else {
161 Some(r)
162 }
163 }
164
165 /// The version of the composite disk format supported by this implementation.
166 const COMPOSITE_DISK_VERSION: u64 = 2;
167
168 /// A magic string placed at the beginning of a composite disk file to identify it.
169 pub const CDISK_MAGIC: &str = "composite_disk\x1d";
170
171 impl CompositeDiskFile {
new(mut disks: Vec<ComponentDiskPart>, disk_spec_file: File) -> Result<CompositeDiskFile>172 fn new(mut disks: Vec<ComponentDiskPart>, disk_spec_file: File) -> Result<CompositeDiskFile> {
173 disks.sort_by(|d1, d2| d1.offset.cmp(&d2.offset));
174 for s in disks.windows(2) {
175 if s[0].offset == s[1].offset {
176 return Err(Error::InvalidSpecification(format!(
177 "Two disks at offset {}",
178 s[0].offset
179 )));
180 }
181 }
182 Ok(CompositeDiskFile {
183 component_disks: disks,
184 _disk_spec_file: disk_spec_file,
185 })
186 }
187
188 /// Set up a composite disk by reading the specification from a file. The file must consist of
189 /// the CDISK_MAGIC string followed by one binary instance of the CompositeDisk protocol
190 /// buffer. Returns an error if it could not read the file or if the specification was invalid.
from_file(mut file: File, params: DiskFileParams) -> Result<CompositeDiskFile>191 pub fn from_file(mut file: File, params: DiskFileParams) -> Result<CompositeDiskFile> {
192 file.seek(SeekFrom::Start(0))
193 .map_err(Error::ReadSpecificationError)?;
194 let mut magic_space = [0u8; CDISK_MAGIC.len()];
195 file.read_exact(&mut magic_space[..])
196 .map_err(Error::ReadSpecificationError)?;
197 if magic_space != CDISK_MAGIC.as_bytes() {
198 return Err(Error::InvalidMagicHeader);
199 }
200 let proto: cdisk_spec::CompositeDisk =
201 Message::parse_from_reader(&mut file).map_err(Error::InvalidProto)?;
202 if proto.version > COMPOSITE_DISK_VERSION {
203 return Err(Error::UnknownVersion(proto.version));
204 }
205 let mut disks: Vec<ComponentDiskPart> = proto
206 .component_disks
207 .iter()
208 .map(|disk| {
209 let writable = !params.is_read_only
210 && disk.read_write_capability
211 == cdisk_spec::ReadWriteCapability::READ_WRITE.into();
212 let component_path = PathBuf::from(&disk.file_path);
213 let path = if component_path.is_relative() || proto.version > 1 {
214 params.path.parent().unwrap().join(component_path)
215 } else {
216 component_path
217 };
218
219 // Note that a read-only parts of a composite disk should NOT be marked sparse,
220 // as the action of marking them sparse is a write. This may seem a little hacky,
221 // and it is; however:
222 // (a) there is not a good way to pass sparseness parameters per composite disk
223 // part (the proto does not have fields for it).
224 // (b) this override of sorts always matches the correct user intent.
225 Ok(ComponentDiskPart {
226 file: open_disk_file(DiskFileParams {
227 path: path.to_owned(),
228 is_read_only: !writable,
229 is_sparse_file: params.is_sparse_file && writable,
230 // TODO: Should pass `params.is_overlapped` through here. Needs testing.
231 is_overlapped: false,
232 is_direct: params.is_direct,
233 lock: params.lock,
234 depth: params.depth + 1,
235 })
236 .map_err(|e| Error::DiskError(Box::new(e)))?,
237 offset: disk.offset,
238 length: 0, // Assigned later
239 needs_flush: AtomicBool::new(false),
240 })
241 })
242 .collect::<Result<Vec<ComponentDiskPart>>>()?;
243 disks.sort_by(|d1, d2| d1.offset.cmp(&d2.offset));
244 for i in 0..(disks.len() - 1) {
245 let length = disks[i + 1].offset - disks[i].offset;
246 if length == 0 {
247 let text = format!("Two disks at offset {}", disks[i].offset);
248 return Err(Error::InvalidSpecification(text));
249 }
250 if let Some(disk) = disks.get_mut(i) {
251 disk.length = length;
252 } else {
253 let text = format!("Unable to set disk length {}", length);
254 return Err(Error::InvalidSpecification(text));
255 }
256 }
257 if let Some(last_disk) = disks.last_mut() {
258 if proto.length <= last_disk.offset {
259 let text = format!(
260 "Full size of disk doesn't match last offset. {} <= {}",
261 proto.length, last_disk.offset
262 );
263 return Err(Error::InvalidSpecification(text));
264 }
265 last_disk.length = proto.length - last_disk.offset;
266 } else {
267 let text = format!("Unable to set last disk length to end at {}", proto.length);
268 return Err(Error::InvalidSpecification(text));
269 }
270
271 CompositeDiskFile::new(disks, file)
272 }
273
length(&self) -> u64274 fn length(&self) -> u64 {
275 if let Some(disk) = self.component_disks.last() {
276 disk.offset + disk.length
277 } else {
278 0
279 }
280 }
281
disk_at_offset(&self, offset: u64) -> io::Result<&ComponentDiskPart>282 fn disk_at_offset(&self, offset: u64) -> io::Result<&ComponentDiskPart> {
283 self.component_disks
284 .iter()
285 .find(|disk| disk.range().contains(&offset))
286 .ok_or(io::Error::new(
287 ErrorKind::InvalidData,
288 format!("no disk at offset {}", offset),
289 ))
290 }
291 }
292
293 impl DiskGetLen for CompositeDiskFile {
get_len(&self) -> io::Result<u64>294 fn get_len(&self) -> io::Result<u64> {
295 Ok(self.length())
296 }
297 }
298
299 impl FileSetLen for CompositeDiskFile {
set_len(&self, _len: u64) -> io::Result<()>300 fn set_len(&self, _len: u64) -> io::Result<()> {
301 Err(io::Error::new(ErrorKind::Other, "unsupported operation"))
302 }
303 }
304
305 // Implements Read and Write targeting volatile storage for composite disks.
306 //
307 // Note that reads and writes will return early if crossing component disk boundaries.
308 // This is allowed by the read and write specifications, which only say read and write
309 // have to return how many bytes were actually read or written. Use read_exact_volatile
310 // or write_all_volatile to make sure all bytes are received/transmitted.
311 //
312 // If one of the component disks does a partial read or write, that also gets passed
313 // transparently to the parent.
314 impl FileReadWriteAtVolatile for CompositeDiskFile {
read_at_volatile(&self, slice: VolatileSlice, offset: u64) -> io::Result<usize>315 fn read_at_volatile(&self, slice: VolatileSlice, offset: u64) -> io::Result<usize> {
316 let cursor_location = offset;
317 let disk = self.disk_at_offset(cursor_location)?;
318 let subslice = if cursor_location + slice.size() as u64 > disk.offset + disk.length {
319 let new_size = disk.offset + disk.length - cursor_location;
320 slice
321 .sub_slice(0, new_size as usize)
322 .map_err(|e| io::Error::new(ErrorKind::InvalidData, e.to_string()))?
323 } else {
324 slice
325 };
326 disk.file
327 .read_at_volatile(subslice, cursor_location - disk.offset)
328 }
write_at_volatile(&self, slice: VolatileSlice, offset: u64) -> io::Result<usize>329 fn write_at_volatile(&self, slice: VolatileSlice, offset: u64) -> io::Result<usize> {
330 let cursor_location = offset;
331 let disk = self.disk_at_offset(cursor_location)?;
332 let subslice = if cursor_location + slice.size() as u64 > disk.offset + disk.length {
333 let new_size = disk.offset + disk.length - cursor_location;
334 slice
335 .sub_slice(0, new_size as usize)
336 .map_err(|e| io::Error::new(ErrorKind::InvalidData, e.to_string()))?
337 } else {
338 slice
339 };
340
341 let bytes = disk
342 .file
343 .write_at_volatile(subslice, cursor_location - disk.offset)?;
344 disk.needs_flush.store(true, Ordering::SeqCst);
345 Ok(bytes)
346 }
347 }
348
349 impl AsRawDescriptors for CompositeDiskFile {
as_raw_descriptors(&self) -> Vec<RawDescriptor>350 fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
351 self.component_disks
352 .iter()
353 .flat_map(|d| d.file.as_raw_descriptors())
354 .collect()
355 }
356 }
357
358 struct AsyncComponentDiskPart {
359 file: Box<dyn AsyncDisk>,
360 offset: u64,
361 length: u64,
362 needs_flush: AtomicBool,
363 }
364
365 pub struct AsyncCompositeDiskFile {
366 component_disks: Vec<AsyncComponentDiskPart>,
367 }
368
369 impl DiskGetLen for AsyncCompositeDiskFile {
get_len(&self) -> io::Result<u64>370 fn get_len(&self) -> io::Result<u64> {
371 Ok(self.length())
372 }
373 }
374
375 impl FileSetLen for AsyncCompositeDiskFile {
set_len(&self, _len: u64) -> io::Result<()>376 fn set_len(&self, _len: u64) -> io::Result<()> {
377 Err(io::Error::new(ErrorKind::Other, "unsupported operation"))
378 }
379 }
380
381 impl FileAllocate for AsyncCompositeDiskFile {
allocate(&self, offset: u64, length: u64) -> io::Result<()>382 fn allocate(&self, offset: u64, length: u64) -> io::Result<()> {
383 let range = offset..(offset + length);
384 let disks = self
385 .component_disks
386 .iter()
387 .filter(|disk| ranges_overlap(&disk.range(), &range));
388 for disk in disks {
389 if let Some(intersection) = range_intersection(&range, &disk.range()) {
390 disk.file.allocate(
391 intersection.start - disk.offset,
392 intersection.end - intersection.start,
393 )?;
394 disk.needs_flush.store(true, Ordering::SeqCst);
395 }
396 }
397 Ok(())
398 }
399 }
400
401 impl ToAsyncDisk for CompositeDiskFile {
to_async_disk(self: Box<Self>, ex: &Executor) -> crate::Result<Box<dyn AsyncDisk>>402 fn to_async_disk(self: Box<Self>, ex: &Executor) -> crate::Result<Box<dyn AsyncDisk>> {
403 Ok(Box::new(AsyncCompositeDiskFile {
404 component_disks: self
405 .component_disks
406 .into_iter()
407 .map(|disk| -> crate::Result<_> {
408 Ok(AsyncComponentDiskPart {
409 file: disk.file.to_async_disk(ex)?,
410 offset: disk.offset,
411 length: disk.length,
412 needs_flush: disk.needs_flush,
413 })
414 })
415 .collect::<crate::Result<Vec<_>>>()?,
416 }))
417 }
418 }
419
420 impl AsyncComponentDiskPart {
range(&self) -> Range<u64>421 fn range(&self) -> Range<u64> {
422 self.offset..(self.offset + self.length)
423 }
424
set_needs_flush(&self)425 fn set_needs_flush(&self) {
426 self.needs_flush.store(true, Ordering::SeqCst);
427 }
428 }
429
430 impl AsyncCompositeDiskFile {
length(&self) -> u64431 fn length(&self) -> u64 {
432 if let Some(disk) = self.component_disks.last() {
433 disk.offset + disk.length
434 } else {
435 0
436 }
437 }
438
disk_at_offset(&self, offset: u64) -> io::Result<&AsyncComponentDiskPart>439 fn disk_at_offset(&self, offset: u64) -> io::Result<&AsyncComponentDiskPart> {
440 self.component_disks
441 .iter()
442 .find(|disk| disk.range().contains(&offset))
443 .ok_or(io::Error::new(
444 ErrorKind::InvalidData,
445 format!("no disk at offset {}", offset),
446 ))
447 }
448
disks_in_range<'a>(&'a self, range: &Range<u64>) -> Vec<&'a AsyncComponentDiskPart>449 fn disks_in_range<'a>(&'a self, range: &Range<u64>) -> Vec<&'a AsyncComponentDiskPart> {
450 self.component_disks
451 .iter()
452 .filter(|disk| ranges_overlap(&disk.range(), range))
453 .collect()
454 }
455 }
456
457 #[async_trait(?Send)]
458 impl AsyncDisk for AsyncCompositeDiskFile {
flush(&self) -> crate::Result<()>459 async fn flush(&self) -> crate::Result<()> {
460 futures::future::try_join_all(self.component_disks.iter().map(|c| c.file.flush())).await?;
461 Ok(())
462 }
463
fsync(&self) -> crate::Result<()>464 async fn fsync(&self) -> crate::Result<()> {
465 // NOTE: The fsync implementation isn't really async, so no point in adding concurrency
466 // here unless we introduce a blocking threadpool.
467 for disk in self.component_disks.iter() {
468 if disk.needs_flush.fetch_and(false, Ordering::SeqCst) {
469 if let Err(e) = disk.file.fsync().await {
470 disk.set_needs_flush();
471 return Err(e);
472 }
473 }
474 }
475 Ok(())
476 }
477
fdatasync(&self) -> crate::Result<()>478 async fn fdatasync(&self) -> crate::Result<()> {
479 // NOTE: The fdatasync implementation isn't really async, so no point in adding concurrency
480 // here unless we introduce a blocking threadpool.
481 for disk in self.component_disks.iter() {
482 if disk.needs_flush.fetch_and(false, Ordering::SeqCst) {
483 if let Err(e) = disk.file.fdatasync().await {
484 disk.set_needs_flush();
485 return Err(e);
486 }
487 }
488 }
489 Ok(())
490 }
491
read_to_mem<'a>( &'a self, file_offset: u64, mem: Arc<dyn BackingMemory + Send + Sync>, mem_offsets: MemRegionIter<'a>, ) -> crate::Result<usize>492 async fn read_to_mem<'a>(
493 &'a self,
494 file_offset: u64,
495 mem: Arc<dyn BackingMemory + Send + Sync>,
496 mem_offsets: MemRegionIter<'a>,
497 ) -> crate::Result<usize> {
498 let disk = self
499 .disk_at_offset(file_offset)
500 .map_err(crate::Error::ReadingData)?;
501 let remaining_disk = disk.offset + disk.length - file_offset;
502 disk.file
503 .read_to_mem(
504 file_offset - disk.offset,
505 mem,
506 mem_offsets.take_bytes(remaining_disk.try_into().unwrap()),
507 )
508 .await
509 }
510
write_from_mem<'a>( &'a self, file_offset: u64, mem: Arc<dyn BackingMemory + Send + Sync>, mem_offsets: MemRegionIter<'a>, ) -> crate::Result<usize>511 async fn write_from_mem<'a>(
512 &'a self,
513 file_offset: u64,
514 mem: Arc<dyn BackingMemory + Send + Sync>,
515 mem_offsets: MemRegionIter<'a>,
516 ) -> crate::Result<usize> {
517 let disk = self
518 .disk_at_offset(file_offset)
519 .map_err(crate::Error::ReadingData)?;
520 let remaining_disk = disk.offset + disk.length - file_offset;
521 let n = disk
522 .file
523 .write_from_mem(
524 file_offset - disk.offset,
525 mem,
526 mem_offsets.take_bytes(remaining_disk.try_into().unwrap()),
527 )
528 .await?;
529 disk.set_needs_flush();
530 Ok(n)
531 }
532
punch_hole(&self, file_offset: u64, length: u64) -> crate::Result<()>533 async fn punch_hole(&self, file_offset: u64, length: u64) -> crate::Result<()> {
534 let range = file_offset..(file_offset + length);
535 let disks = self.disks_in_range(&range);
536 for disk in disks {
537 if let Some(intersection) = range_intersection(&range, &disk.range()) {
538 disk.file
539 .punch_hole(
540 intersection.start - disk.offset,
541 intersection.end - intersection.start,
542 )
543 .await?;
544 disk.set_needs_flush();
545 }
546 }
547 Ok(())
548 }
549
write_zeroes_at(&self, file_offset: u64, length: u64) -> crate::Result<()>550 async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> crate::Result<()> {
551 let range = file_offset..(file_offset + length);
552 let disks = self.disks_in_range(&range);
553 for disk in disks {
554 if let Some(intersection) = range_intersection(&range, &disk.range()) {
555 disk.file
556 .write_zeroes_at(
557 intersection.start - disk.offset,
558 intersection.end - intersection.start,
559 )
560 .await?;
561 disk.set_needs_flush();
562 }
563 }
564 Ok(())
565 }
566 }
567
568 /// Information about a partition to create.
569 #[derive(Clone, Debug, Eq, PartialEq)]
570 pub struct PartitionInfo {
571 pub label: String,
572 pub path: PathBuf,
573 pub partition_type: ImagePartitionType,
574 pub writable: bool,
575 pub size: u64,
576 pub part_guid: Option<Uuid>,
577 }
578
579 impl PartitionInfo {
aligned_size(&self) -> u64580 fn aligned_size(&self) -> u64 {
581 self.size.next_multiple_of(1 << PARTITION_SIZE_SHIFT)
582 }
583 }
584
585 /// The type of partition.
586 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
587 pub enum ImagePartitionType {
588 LinuxFilesystem,
589 EfiSystemPartition,
590 }
591
592 impl ImagePartitionType {
guid(self) -> Uuid593 fn guid(self) -> Uuid {
594 match self {
595 Self::LinuxFilesystem => LINUX_FILESYSTEM_GUID,
596 Self::EfiSystemPartition => EFI_SYSTEM_PARTITION_GUID,
597 }
598 }
599 }
600
601 /// Write protective MBR and primary GPT table.
write_beginning( file: &mut impl Write, disk_guid: Uuid, partitions: &[u8], partition_entries_crc32: u32, secondary_table_offset: u64, disk_size: u64, ) -> Result<()>602 fn write_beginning(
603 file: &mut impl Write,
604 disk_guid: Uuid,
605 partitions: &[u8],
606 partition_entries_crc32: u32,
607 secondary_table_offset: u64,
608 disk_size: u64,
609 ) -> Result<()> {
610 // Write the protective MBR to the first sector.
611 write_protective_mbr(file, disk_size)?;
612
613 // Write the GPT header, and pad out to the end of the sector.
614 write_gpt_header(
615 file,
616 disk_guid,
617 partition_entries_crc32,
618 secondary_table_offset,
619 false,
620 )?;
621 file.write_all(&[0; HEADER_PADDING_LENGTH])
622 .map_err(Error::WriteHeader)?;
623
624 // Write partition entries, including unused ones.
625 file.write_all(partitions).map_err(Error::WriteHeader)?;
626
627 // Write zeroes to align the first partition appropriately.
628 file.write_all(&[0; PARTITION_ALIGNMENT_SIZE])
629 .map_err(Error::WriteHeader)?;
630
631 Ok(())
632 }
633
634 /// Write secondary GPT table.
write_end( file: &mut impl Write, disk_guid: Uuid, partitions: &[u8], partition_entries_crc32: u32, secondary_table_offset: u64, ) -> Result<()>635 fn write_end(
636 file: &mut impl Write,
637 disk_guid: Uuid,
638 partitions: &[u8],
639 partition_entries_crc32: u32,
640 secondary_table_offset: u64,
641 ) -> Result<()> {
642 // Write partition entries, including unused ones.
643 file.write_all(partitions).map_err(Error::WriteHeader)?;
644
645 // Write the GPT header, and pad out to the end of the sector.
646 write_gpt_header(
647 file,
648 disk_guid,
649 partition_entries_crc32,
650 secondary_table_offset,
651 true,
652 )?;
653 file.write_all(&[0; HEADER_PADDING_LENGTH])
654 .map_err(Error::WriteHeader)?;
655
656 Ok(())
657 }
658
659 /// Create the `GptPartitionEntry` for the given partition.
create_gpt_entry(partition: &PartitionInfo, offset: u64) -> GptPartitionEntry660 fn create_gpt_entry(partition: &PartitionInfo, offset: u64) -> GptPartitionEntry {
661 let mut partition_name: Vec<u16> = partition.label.encode_utf16().collect();
662 partition_name.resize(36, 0);
663
664 GptPartitionEntry {
665 partition_type_guid: partition.partition_type.guid(),
666 unique_partition_guid: partition.part_guid.unwrap_or(Uuid::new_v4()),
667 first_lba: offset / SECTOR_SIZE,
668 last_lba: (offset + partition.aligned_size()) / SECTOR_SIZE - 1,
669 attributes: 0,
670 partition_name: partition_name.try_into().unwrap(),
671 }
672 }
673
674 /// Create one or more `ComponentDisk` proto messages for the given partition.
create_component_disks( partition: &PartitionInfo, offset: u64, zero_filler_path: &str, ) -> Result<Vec<ComponentDisk>>675 fn create_component_disks(
676 partition: &PartitionInfo,
677 offset: u64,
678 zero_filler_path: &str,
679 ) -> Result<Vec<ComponentDisk>> {
680 let aligned_size = partition.aligned_size();
681
682 let mut component_disks = vec![ComponentDisk {
683 offset,
684 file_path: partition
685 .path
686 .to_str()
687 .ok_or_else(|| Error::InvalidPath(partition.path.to_owned()))?
688 .to_string(),
689 read_write_capability: if partition.writable {
690 ReadWriteCapability::READ_WRITE.into()
691 } else {
692 ReadWriteCapability::READ_ONLY.into()
693 },
694 ..ComponentDisk::new()
695 }];
696
697 if partition.size != aligned_size {
698 if partition.writable {
699 return Err(Error::UnalignedReadWrite(partition.to_owned()));
700 } else {
701 // Fill in the gap by reusing the zero filler file, because we know it is always bigger
702 // than the alignment size. Its size is 1 << PARTITION_SIZE_SHIFT (4k).
703 component_disks.push(ComponentDisk {
704 offset: offset + partition.size,
705 file_path: zero_filler_path.to_owned(),
706 read_write_capability: ReadWriteCapability::READ_ONLY.into(),
707 ..ComponentDisk::new()
708 });
709 }
710 }
711
712 Ok(component_disks)
713 }
714
715 /// Create a new composite disk image containing the given partitions, and write it out to the given
716 /// files.
create_composite_disk( partitions: &[PartitionInfo], zero_filler_path: &Path, header_path: &Path, header_file: &mut File, footer_path: &Path, footer_file: &mut File, output_composite: &mut File, ) -> Result<()>717 pub fn create_composite_disk(
718 partitions: &[PartitionInfo],
719 zero_filler_path: &Path,
720 header_path: &Path,
721 header_file: &mut File,
722 footer_path: &Path,
723 footer_file: &mut File,
724 output_composite: &mut File,
725 ) -> Result<()> {
726 let zero_filler_path = zero_filler_path
727 .to_str()
728 .ok_or_else(|| Error::InvalidPath(zero_filler_path.to_owned()))?
729 .to_string();
730 let header_path = header_path
731 .to_str()
732 .ok_or_else(|| Error::InvalidPath(header_path.to_owned()))?
733 .to_string();
734 let footer_path = footer_path
735 .to_str()
736 .ok_or_else(|| Error::InvalidPath(footer_path.to_owned()))?
737 .to_string();
738
739 let mut composite_proto = CompositeDisk::new();
740 composite_proto.version = COMPOSITE_DISK_VERSION;
741 composite_proto.component_disks.push(ComponentDisk {
742 file_path: header_path,
743 offset: 0,
744 read_write_capability: ReadWriteCapability::READ_ONLY.into(),
745 ..ComponentDisk::new()
746 });
747
748 // Write partitions to a temporary buffer so that we can calculate the CRC, and construct the
749 // ComponentDisk proto messages at the same time.
750 let mut partitions_buffer =
751 [0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
752 let mut writer: &mut [u8] = &mut partitions_buffer;
753 let mut next_disk_offset = GPT_BEGINNING_SIZE;
754 let mut labels = HashSet::with_capacity(partitions.len());
755 for partition in partitions {
756 let gpt_entry = create_gpt_entry(partition, next_disk_offset);
757 if !labels.insert(gpt_entry.partition_name) {
758 return Err(Error::DuplicatePartitionLabel(partition.label.clone()));
759 }
760 gpt_entry.write_bytes(&mut writer)?;
761
762 for component_disk in
763 create_component_disks(partition, next_disk_offset, &zero_filler_path)?
764 {
765 composite_proto.component_disks.push(component_disk);
766 }
767
768 next_disk_offset += partition.aligned_size();
769 }
770 // The secondary GPT needs to be at the very end of the file, but its size (0x4200) is not
771 // aligned to the chosen partition size (0x1000). We compensate for that by writing some
772 // padding to the start of the footer file.
773 const FOOTER_PADDING: u64 =
774 GPT_END_SIZE.next_multiple_of(1 << PARTITION_SIZE_SHIFT) - GPT_END_SIZE;
775 let footer_file_offset = next_disk_offset;
776 let secondary_table_offset = footer_file_offset + FOOTER_PADDING;
777 let disk_size = secondary_table_offset + GPT_END_SIZE;
778 composite_proto.component_disks.push(ComponentDisk {
779 file_path: footer_path,
780 offset: footer_file_offset,
781 read_write_capability: ReadWriteCapability::READ_ONLY.into(),
782 ..ComponentDisk::new()
783 });
784
785 // Calculate CRC32 of partition entries.
786 let mut hasher = Hasher::new();
787 hasher.update(&partitions_buffer);
788 let partition_entries_crc32 = hasher.finalize();
789
790 let disk_guid = Uuid::new_v4();
791 write_beginning(
792 header_file,
793 disk_guid,
794 &partitions_buffer,
795 partition_entries_crc32,
796 secondary_table_offset,
797 disk_size,
798 )?;
799
800 footer_file
801 .write_all(&[0; FOOTER_PADDING as usize])
802 .map_err(Error::WriteHeader)?;
803 write_end(
804 footer_file,
805 disk_guid,
806 &partitions_buffer,
807 partition_entries_crc32,
808 secondary_table_offset,
809 )?;
810
811 composite_proto.length = disk_size;
812 output_composite
813 .write_all(CDISK_MAGIC.as_bytes())
814 .map_err(Error::WriteHeader)?;
815 composite_proto
816 .write_to_writer(output_composite)
817 .map_err(Error::WriteProto)?;
818
819 Ok(())
820 }
821
822 /// Create a zero filler file which can be used to fill the gaps between partition files.
823 /// The filler is sized to be big enough to fill the gaps. (1 << PARTITION_SIZE_SHIFT)
create_zero_filler<P: AsRef<Path>>(zero_filler_path: P) -> Result<()>824 pub fn create_zero_filler<P: AsRef<Path>>(zero_filler_path: P) -> Result<()> {
825 let f = OpenOptions::new()
826 .create(true)
827 .read(true)
828 .write(true)
829 .truncate(true)
830 .open(zero_filler_path.as_ref())
831 .map_err(Error::WriteZeroFiller)?;
832 f.set_len(1 << PARTITION_SIZE_SHIFT)
833 .map_err(Error::WriteZeroFiller)
834 }
835
836 #[cfg(test)]
837 mod tests {
838 use std::fs::OpenOptions;
839 use std::io::Write;
840 use std::matches;
841
842 use base::AsRawDescriptor;
843 use tempfile::tempfile;
844
845 use super::*;
846
new_from_components(disks: Vec<ComponentDiskPart>) -> Result<CompositeDiskFile>847 fn new_from_components(disks: Vec<ComponentDiskPart>) -> Result<CompositeDiskFile> {
848 CompositeDiskFile::new(disks, tempfile().unwrap())
849 }
850
851 #[test]
block_duplicate_offset_disks()852 fn block_duplicate_offset_disks() {
853 let file1 = tempfile().unwrap();
854 let file2 = tempfile().unwrap();
855 let disk_part1 = ComponentDiskPart {
856 file: Box::new(file1),
857 offset: 0,
858 length: 100,
859 needs_flush: AtomicBool::new(false),
860 };
861 let disk_part2 = ComponentDiskPart {
862 file: Box::new(file2),
863 offset: 0,
864 length: 100,
865 needs_flush: AtomicBool::new(false),
866 };
867 assert!(new_from_components(vec![disk_part1, disk_part2]).is_err());
868 }
869
870 #[test]
get_len()871 fn get_len() {
872 let file1 = tempfile().unwrap();
873 let file2 = tempfile().unwrap();
874 let disk_part1 = ComponentDiskPart {
875 file: Box::new(file1),
876 offset: 0,
877 length: 100,
878 needs_flush: AtomicBool::new(false),
879 };
880 let disk_part2 = ComponentDiskPart {
881 file: Box::new(file2),
882 offset: 100,
883 length: 100,
884 needs_flush: AtomicBool::new(false),
885 };
886 let composite = new_from_components(vec![disk_part1, disk_part2]).unwrap();
887 let len = composite.get_len().unwrap();
888 assert_eq!(len, 200);
889 }
890
891 #[test]
async_get_len()892 fn async_get_len() {
893 let file1 = tempfile().unwrap();
894 let file2 = tempfile().unwrap();
895 let disk_part1 = ComponentDiskPart {
896 file: Box::new(file1),
897 offset: 0,
898 length: 100,
899 needs_flush: AtomicBool::new(false),
900 };
901 let disk_part2 = ComponentDiskPart {
902 file: Box::new(file2),
903 offset: 100,
904 length: 100,
905 needs_flush: AtomicBool::new(false),
906 };
907 let composite = new_from_components(vec![disk_part1, disk_part2]).unwrap();
908
909 let ex = Executor::new().unwrap();
910 let composite = Box::new(composite).to_async_disk(&ex).unwrap();
911 let len = composite.get_len().unwrap();
912 assert_eq!(len, 200);
913 }
914
915 #[test]
single_file_passthrough()916 fn single_file_passthrough() {
917 let file = tempfile().unwrap();
918 let disk_part = ComponentDiskPart {
919 file: Box::new(file),
920 offset: 0,
921 length: 100,
922 needs_flush: AtomicBool::new(false),
923 };
924 let composite = new_from_components(vec![disk_part]).unwrap();
925 let mut input_memory = [55u8; 5];
926 let input_volatile_memory = VolatileSlice::new(&mut input_memory[..]);
927 composite
928 .write_all_at_volatile(input_volatile_memory, 0)
929 .unwrap();
930 let mut output_memory = [0u8; 5];
931 let output_volatile_memory = VolatileSlice::new(&mut output_memory[..]);
932 composite
933 .read_exact_at_volatile(output_volatile_memory, 0)
934 .unwrap();
935 assert_eq!(input_memory, output_memory);
936 }
937
938 #[test]
async_single_file_passthrough()939 fn async_single_file_passthrough() {
940 let file = tempfile().unwrap();
941 let disk_part = ComponentDiskPart {
942 file: Box::new(file),
943 offset: 0,
944 length: 100,
945 needs_flush: AtomicBool::new(false),
946 };
947 let composite = new_from_components(vec![disk_part]).unwrap();
948 let ex = Executor::new().unwrap();
949 ex.run_until(async {
950 let composite = Box::new(composite).to_async_disk(&ex).unwrap();
951 let expected = [55u8; 5];
952 assert_eq!(
953 composite.write_double_buffered(0, &expected).await.unwrap(),
954 5
955 );
956 let mut buf = [0u8; 5];
957 assert_eq!(
958 composite
959 .read_double_buffered(0, &mut buf[..])
960 .await
961 .unwrap(),
962 5
963 );
964 assert_eq!(buf, expected);
965 })
966 .unwrap();
967 }
968
969 #[test]
triple_file_descriptors()970 fn triple_file_descriptors() {
971 let file1 = tempfile().unwrap();
972 let file2 = tempfile().unwrap();
973 let file3 = tempfile().unwrap();
974 let mut in_descriptors = vec![
975 file1.as_raw_descriptor(),
976 file2.as_raw_descriptor(),
977 file3.as_raw_descriptor(),
978 ];
979 in_descriptors.sort_unstable();
980 let disk_part1 = ComponentDiskPart {
981 file: Box::new(file1),
982 offset: 0,
983 length: 100,
984 needs_flush: AtomicBool::new(false),
985 };
986 let disk_part2 = ComponentDiskPart {
987 file: Box::new(file2),
988 offset: 100,
989 length: 100,
990 needs_flush: AtomicBool::new(false),
991 };
992 let disk_part3 = ComponentDiskPart {
993 file: Box::new(file3),
994 offset: 200,
995 length: 100,
996 needs_flush: AtomicBool::new(false),
997 };
998 let composite = new_from_components(vec![disk_part1, disk_part2, disk_part3]).unwrap();
999 let mut out_descriptors = composite.as_raw_descriptors();
1000 out_descriptors.sort_unstable();
1001 assert_eq!(in_descriptors, out_descriptors);
1002 }
1003
1004 #[test]
triple_file_passthrough()1005 fn triple_file_passthrough() {
1006 let file1 = tempfile().unwrap();
1007 let file2 = tempfile().unwrap();
1008 let file3 = tempfile().unwrap();
1009 let disk_part1 = ComponentDiskPart {
1010 file: Box::new(file1),
1011 offset: 0,
1012 length: 100,
1013 needs_flush: AtomicBool::new(false),
1014 };
1015 let disk_part2 = ComponentDiskPart {
1016 file: Box::new(file2),
1017 offset: 100,
1018 length: 100,
1019 needs_flush: AtomicBool::new(false),
1020 };
1021 let disk_part3 = ComponentDiskPart {
1022 file: Box::new(file3),
1023 offset: 200,
1024 length: 100,
1025 needs_flush: AtomicBool::new(false),
1026 };
1027 let composite = new_from_components(vec![disk_part1, disk_part2, disk_part3]).unwrap();
1028 let mut input_memory = [55u8; 200];
1029 let input_volatile_memory = VolatileSlice::new(&mut input_memory[..]);
1030 composite
1031 .write_all_at_volatile(input_volatile_memory, 50)
1032 .unwrap();
1033 let mut output_memory = [0u8; 200];
1034 let output_volatile_memory = VolatileSlice::new(&mut output_memory[..]);
1035 composite
1036 .read_exact_at_volatile(output_volatile_memory, 50)
1037 .unwrap();
1038 assert!(input_memory.iter().eq(output_memory.iter()));
1039 }
1040
1041 #[test]
async_triple_file_passthrough()1042 fn async_triple_file_passthrough() {
1043 let file1 = tempfile().unwrap();
1044 let file2 = tempfile().unwrap();
1045 let file3 = tempfile().unwrap();
1046 let disk_part1 = ComponentDiskPart {
1047 file: Box::new(file1),
1048 offset: 0,
1049 length: 100,
1050 needs_flush: AtomicBool::new(false),
1051 };
1052 let disk_part2 = ComponentDiskPart {
1053 file: Box::new(file2),
1054 offset: 100,
1055 length: 100,
1056 needs_flush: AtomicBool::new(false),
1057 };
1058 let disk_part3 = ComponentDiskPart {
1059 file: Box::new(file3),
1060 offset: 200,
1061 length: 100,
1062 needs_flush: AtomicBool::new(false),
1063 };
1064 let composite = new_from_components(vec![disk_part1, disk_part2, disk_part3]).unwrap();
1065 let ex = Executor::new().unwrap();
1066 ex.run_until(async {
1067 let composite = Box::new(composite).to_async_disk(&ex).unwrap();
1068
1069 let expected = [55u8; 200];
1070 assert_eq!(
1071 composite.write_double_buffered(0, &expected).await.unwrap(),
1072 100
1073 );
1074 assert_eq!(
1075 composite
1076 .write_double_buffered(100, &expected[100..])
1077 .await
1078 .unwrap(),
1079 100
1080 );
1081
1082 let mut buf = [0u8; 200];
1083 assert_eq!(
1084 composite
1085 .read_double_buffered(0, &mut buf[..])
1086 .await
1087 .unwrap(),
1088 100
1089 );
1090 assert_eq!(
1091 composite
1092 .read_double_buffered(100, &mut buf[100..])
1093 .await
1094 .unwrap(),
1095 100
1096 );
1097 assert_eq!(buf, expected);
1098 })
1099 .unwrap();
1100 }
1101
1102 #[test]
async_triple_file_punch_hole()1103 fn async_triple_file_punch_hole() {
1104 let file1 = tempfile().unwrap();
1105 let file2 = tempfile().unwrap();
1106 let file3 = tempfile().unwrap();
1107 let disk_part1 = ComponentDiskPart {
1108 file: Box::new(file1),
1109 offset: 0,
1110 length: 100,
1111 needs_flush: AtomicBool::new(false),
1112 };
1113 let disk_part2 = ComponentDiskPart {
1114 file: Box::new(file2),
1115 offset: 100,
1116 length: 100,
1117 needs_flush: AtomicBool::new(false),
1118 };
1119 let disk_part3 = ComponentDiskPart {
1120 file: Box::new(file3),
1121 offset: 200,
1122 length: 100,
1123 needs_flush: AtomicBool::new(false),
1124 };
1125 let composite = new_from_components(vec![disk_part1, disk_part2, disk_part3]).unwrap();
1126 let ex = Executor::new().unwrap();
1127 ex.run_until(async {
1128 let composite = Box::new(composite).to_async_disk(&ex).unwrap();
1129
1130 let input = [55u8; 300];
1131 assert_eq!(
1132 composite.write_double_buffered(0, &input).await.unwrap(),
1133 100
1134 );
1135 assert_eq!(
1136 composite
1137 .write_double_buffered(100, &input[100..])
1138 .await
1139 .unwrap(),
1140 100
1141 );
1142 assert_eq!(
1143 composite
1144 .write_double_buffered(200, &input[200..])
1145 .await
1146 .unwrap(),
1147 100
1148 );
1149
1150 composite.punch_hole(50, 200).await.unwrap();
1151
1152 let mut buf = [0u8; 300];
1153 assert_eq!(
1154 composite
1155 .read_double_buffered(0, &mut buf[..])
1156 .await
1157 .unwrap(),
1158 100
1159 );
1160 assert_eq!(
1161 composite
1162 .read_double_buffered(100, &mut buf[100..])
1163 .await
1164 .unwrap(),
1165 100
1166 );
1167 assert_eq!(
1168 composite
1169 .read_double_buffered(200, &mut buf[200..])
1170 .await
1171 .unwrap(),
1172 100
1173 );
1174
1175 let mut expected = input;
1176 expected[50..250].iter_mut().for_each(|x| *x = 0);
1177 assert_eq!(buf, expected);
1178 })
1179 .unwrap();
1180 }
1181
1182 #[test]
async_triple_file_write_zeroes()1183 fn async_triple_file_write_zeroes() {
1184 let file1 = tempfile().unwrap();
1185 let file2 = tempfile().unwrap();
1186 let file3 = tempfile().unwrap();
1187 let disk_part1 = ComponentDiskPart {
1188 file: Box::new(file1),
1189 offset: 0,
1190 length: 100,
1191 needs_flush: AtomicBool::new(false),
1192 };
1193 let disk_part2 = ComponentDiskPart {
1194 file: Box::new(file2),
1195 offset: 100,
1196 length: 100,
1197 needs_flush: AtomicBool::new(false),
1198 };
1199 let disk_part3 = ComponentDiskPart {
1200 file: Box::new(file3),
1201 offset: 200,
1202 length: 100,
1203 needs_flush: AtomicBool::new(false),
1204 };
1205 let composite = new_from_components(vec![disk_part1, disk_part2, disk_part3]).unwrap();
1206 let ex = Executor::new().unwrap();
1207 ex.run_until(async {
1208 let composite = Box::new(composite).to_async_disk(&ex).unwrap();
1209
1210 let input = [55u8; 300];
1211 assert_eq!(
1212 composite.write_double_buffered(0, &input).await.unwrap(),
1213 100
1214 );
1215 assert_eq!(
1216 composite
1217 .write_double_buffered(100, &input[100..])
1218 .await
1219 .unwrap(),
1220 100
1221 );
1222 assert_eq!(
1223 composite
1224 .write_double_buffered(200, &input[200..])
1225 .await
1226 .unwrap(),
1227 100
1228 );
1229
1230 composite.write_zeroes_at(50, 200).await.unwrap();
1231
1232 let mut buf = [0u8; 300];
1233 assert_eq!(
1234 composite
1235 .read_double_buffered(0, &mut buf[..])
1236 .await
1237 .unwrap(),
1238 100
1239 );
1240 assert_eq!(
1241 composite
1242 .read_double_buffered(100, &mut buf[100..])
1243 .await
1244 .unwrap(),
1245 100
1246 );
1247 assert_eq!(
1248 composite
1249 .read_double_buffered(200, &mut buf[200..])
1250 .await
1251 .unwrap(),
1252 100
1253 );
1254
1255 let mut expected = input;
1256 expected[50..250].iter_mut().for_each(|x| *x = 0);
1257 assert_eq!(buf, expected);
1258 })
1259 .unwrap();
1260 }
1261
1262 // TODO: fsync on a RO file is legal, this test doesn't work as expected. Consider using a mock
1263 // DiskFile to detect the fsync calls.
1264 #[test]
async_fsync_skips_unchanged_parts()1265 fn async_fsync_skips_unchanged_parts() {
1266 let mut rw_file = tempfile().unwrap();
1267 rw_file.write_all(&[0u8; 100]).unwrap();
1268 rw_file.seek(SeekFrom::Start(0)).unwrap();
1269 let mut ro_disk_image = tempfile::NamedTempFile::new().unwrap();
1270 ro_disk_image.write_all(&[0u8; 100]).unwrap();
1271 let ro_file = OpenOptions::new()
1272 .read(true)
1273 .open(ro_disk_image.path())
1274 .unwrap();
1275
1276 let rw_part = ComponentDiskPart {
1277 file: Box::new(rw_file),
1278 offset: 0,
1279 length: 100,
1280 needs_flush: AtomicBool::new(false),
1281 };
1282 let ro_part = ComponentDiskPart {
1283 file: Box::new(ro_file),
1284 offset: 100,
1285 length: 100,
1286 needs_flush: AtomicBool::new(false),
1287 };
1288 let composite = new_from_components(vec![rw_part, ro_part]).unwrap();
1289 let ex = Executor::new().unwrap();
1290 ex.run_until(async {
1291 let composite = Box::new(composite).to_async_disk(&ex).unwrap();
1292
1293 // Write to the RW part so that some fsync operation will occur.
1294 composite.write_zeroes_at(0, 20).await.unwrap();
1295
1296 // This is the test's assert. fsyncing should NOT touch a read-only disk part. On
1297 // Windows, this would be an error.
1298 composite.fsync().await.expect(
1299 "Failed to fsync composite disk. \
1300 This can happen if the disk writable state is wrong.",
1301 );
1302 })
1303 .unwrap();
1304 }
1305
1306 #[test]
beginning_size()1307 fn beginning_size() {
1308 let mut buffer = vec![];
1309 let partitions = [0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
1310 let disk_size = 1000 * SECTOR_SIZE;
1311 write_beginning(
1312 &mut buffer,
1313 Uuid::from_u128(0x12345678_1234_5678_abcd_12345678abcd),
1314 &partitions,
1315 42,
1316 disk_size - GPT_END_SIZE,
1317 disk_size,
1318 )
1319 .unwrap();
1320
1321 assert_eq!(buffer.len(), GPT_BEGINNING_SIZE as usize);
1322 }
1323
1324 #[test]
end_size()1325 fn end_size() {
1326 let mut buffer = vec![];
1327 let partitions = [0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
1328 let disk_size = 1000 * SECTOR_SIZE;
1329 write_end(
1330 &mut buffer,
1331 Uuid::from_u128(0x12345678_1234_5678_abcd_12345678abcd),
1332 &partitions,
1333 42,
1334 disk_size - GPT_END_SIZE,
1335 )
1336 .unwrap();
1337
1338 assert_eq!(buffer.len(), GPT_END_SIZE as usize);
1339 }
1340
1341 /// Creates a composite disk image with no partitions.
1342 #[test]
create_composite_disk_empty()1343 fn create_composite_disk_empty() {
1344 let mut header_image = tempfile().unwrap();
1345 let mut footer_image = tempfile().unwrap();
1346 let mut composite_image = tempfile().unwrap();
1347
1348 create_composite_disk(
1349 &[],
1350 Path::new("/zero_filler.img"),
1351 Path::new("/header_path.img"),
1352 &mut header_image,
1353 Path::new("/footer_path.img"),
1354 &mut footer_image,
1355 &mut composite_image,
1356 )
1357 .unwrap();
1358 }
1359
1360 /// Creates a composite disk image with two partitions.
1361 #[test]
1362 #[allow(clippy::unnecessary_to_owned)] // false positives
create_composite_disk_success()1363 fn create_composite_disk_success() {
1364 fn tmpfile(prefix: &str) -> tempfile::NamedTempFile {
1365 tempfile::Builder::new().prefix(prefix).tempfile().unwrap()
1366 }
1367
1368 let mut header_image = tmpfile("header");
1369 let mut footer_image = tmpfile("footer");
1370 let mut composite_image = tmpfile("composite");
1371
1372 // The test doesn't read these, just needs to be able to open them.
1373 let partition1 = tmpfile("partition1");
1374 let partition2 = tmpfile("partition1");
1375 let zero_filler = tmpfile("zero");
1376
1377 create_composite_disk(
1378 &[
1379 PartitionInfo {
1380 label: "partition1".to_string(),
1381 path: partition1.path().to_path_buf(),
1382 partition_type: ImagePartitionType::LinuxFilesystem,
1383 writable: false,
1384 // Needs small amount of padding.
1385 size: 4000,
1386 part_guid: None,
1387 },
1388 PartitionInfo {
1389 label: "partition2".to_string(),
1390 path: partition2.path().to_path_buf(),
1391 partition_type: ImagePartitionType::LinuxFilesystem,
1392 writable: true,
1393 // Needs no padding.
1394 size: 4096,
1395 part_guid: Some(Uuid::from_u128(0x4049C8DC_6C2B_C740_A95A_BDAA629D4378)),
1396 },
1397 ],
1398 zero_filler.path(),
1399 &header_image.path().to_path_buf(),
1400 header_image.as_file_mut(),
1401 &footer_image.path().to_path_buf(),
1402 footer_image.as_file_mut(),
1403 composite_image.as_file_mut(),
1404 )
1405 .unwrap();
1406
1407 // Check magic.
1408 composite_image.rewind().unwrap();
1409 let mut magic_space = [0u8; CDISK_MAGIC.len()];
1410 composite_image.read_exact(&mut magic_space[..]).unwrap();
1411 assert_eq!(magic_space, CDISK_MAGIC.as_bytes());
1412 // Check proto.
1413 let proto = CompositeDisk::parse_from_reader(&mut composite_image).unwrap();
1414 assert_eq!(
1415 proto,
1416 CompositeDisk {
1417 version: 2,
1418 component_disks: vec![
1419 ComponentDisk {
1420 file_path: header_image.path().to_str().unwrap().to_string(),
1421 offset: 0,
1422 read_write_capability: ReadWriteCapability::READ_ONLY.into(),
1423 ..ComponentDisk::new()
1424 },
1425 ComponentDisk {
1426 file_path: partition1.path().to_str().unwrap().to_string(),
1427 offset: 0x5000, // GPT_BEGINNING_SIZE,
1428 read_write_capability: ReadWriteCapability::READ_ONLY.into(),
1429 ..ComponentDisk::new()
1430 },
1431 ComponentDisk {
1432 file_path: zero_filler.path().to_str().unwrap().to_string(),
1433 offset: 0x5fa0, // GPT_BEGINNING_SIZE + 4000,
1434 read_write_capability: ReadWriteCapability::READ_ONLY.into(),
1435 ..ComponentDisk::new()
1436 },
1437 ComponentDisk {
1438 file_path: partition2.path().to_str().unwrap().to_string(),
1439 offset: 0x6000, // GPT_BEGINNING_SIZE + 4096,
1440 read_write_capability: ReadWriteCapability::READ_WRITE.into(),
1441 ..ComponentDisk::new()
1442 },
1443 ComponentDisk {
1444 file_path: footer_image.path().to_str().unwrap().to_string(),
1445 offset: 0x7000, // GPT_BEGINNING_SIZE + 4096 + 4096,
1446 read_write_capability: ReadWriteCapability::READ_ONLY.into(),
1447 ..ComponentDisk::new()
1448 },
1449 ],
1450 length: 0xc000,
1451 ..CompositeDisk::new()
1452 }
1453 );
1454
1455 // Open the file as a composite disk and do some basic GPT header/footer validation.
1456 let ex = Executor::new().unwrap();
1457 ex.run_until(async {
1458 let disk = Box::new(
1459 CompositeDiskFile::from_file(
1460 composite_image.into_file(),
1461 DiskFileParams {
1462 path: "/foo".into(),
1463 is_read_only: true,
1464 is_sparse_file: false,
1465 is_overlapped: false,
1466 is_direct: false,
1467 lock: false,
1468 depth: 0,
1469 },
1470 )
1471 .unwrap(),
1472 )
1473 .to_async_disk(&ex)
1474 .unwrap();
1475
1476 let header_offset = SECTOR_SIZE;
1477 let footer_offset = disk.get_len().unwrap() - SECTOR_SIZE;
1478
1479 let mut header_bytes = [0u8; SECTOR_SIZE as usize];
1480 assert_eq!(
1481 disk.read_double_buffered(header_offset, &mut header_bytes[..])
1482 .await
1483 .unwrap(),
1484 SECTOR_SIZE as usize
1485 );
1486
1487 let mut footer_bytes = [0u8; SECTOR_SIZE as usize];
1488 assert_eq!(
1489 disk.read_double_buffered(footer_offset, &mut footer_bytes[..])
1490 .await
1491 .unwrap(),
1492 SECTOR_SIZE as usize
1493 );
1494
1495 // Check the header and footer fields point to each other correctly.
1496 let header_current_lba = u64::from_le_bytes(header_bytes[24..32].try_into().unwrap());
1497 assert_eq!(header_current_lba * SECTOR_SIZE, header_offset);
1498 let header_backup_lba = u64::from_le_bytes(header_bytes[32..40].try_into().unwrap());
1499 assert_eq!(header_backup_lba * SECTOR_SIZE, footer_offset);
1500
1501 let footer_current_lba = u64::from_le_bytes(footer_bytes[24..32].try_into().unwrap());
1502 assert_eq!(footer_current_lba * SECTOR_SIZE, footer_offset);
1503 let footer_backup_lba = u64::from_le_bytes(footer_bytes[32..40].try_into().unwrap());
1504 assert_eq!(footer_backup_lba * SECTOR_SIZE, header_offset);
1505
1506 // Header and footer should be equal if we zero the pointers and CRCs.
1507 header_bytes[16..20].fill(0);
1508 header_bytes[24..40].fill(0);
1509 footer_bytes[16..20].fill(0);
1510 footer_bytes[24..40].fill(0);
1511 assert_eq!(header_bytes, footer_bytes);
1512 })
1513 .unwrap();
1514 }
1515
1516 /// Attempts to create a composite disk image with two partitions with the same label.
1517 #[test]
create_composite_disk_duplicate_label()1518 fn create_composite_disk_duplicate_label() {
1519 let mut header_image = tempfile().unwrap();
1520 let mut footer_image = tempfile().unwrap();
1521 let mut composite_image = tempfile().unwrap();
1522
1523 let result = create_composite_disk(
1524 &[
1525 PartitionInfo {
1526 label: "label".to_string(),
1527 path: "/partition1.img".to_string().into(),
1528 partition_type: ImagePartitionType::LinuxFilesystem,
1529 writable: false,
1530 size: 0,
1531 part_guid: None,
1532 },
1533 PartitionInfo {
1534 label: "label".to_string(),
1535 path: "/partition2.img".to_string().into(),
1536 partition_type: ImagePartitionType::LinuxFilesystem,
1537 writable: true,
1538 size: 0,
1539 part_guid: None,
1540 },
1541 ],
1542 Path::new("/zero_filler.img"),
1543 Path::new("/header_path.img"),
1544 &mut header_image,
1545 Path::new("/footer_path.img"),
1546 &mut footer_image,
1547 &mut composite_image,
1548 );
1549 assert!(matches!(result, Err(Error::DuplicatePartitionLabel(label)) if label == "label"));
1550 }
1551 }
1552