• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2023, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 use crate::{BlockIo, Disk, Result};
16 use core::{
17     array::from_fn,
18     cmp::min,
19     convert::TryFrom,
20     default::Default,
21     fmt::{Debug, Formatter},
22     mem::size_of,
23     num::NonZeroU64,
24     ops::{Deref, DerefMut},
25     str::from_utf8,
26 };
27 use crc32fast::Hasher;
28 use gbl_async::block_on;
29 use liberror::{Error, GptError};
30 use safemath::SafeNum;
31 use zerocopy::{
32     ByteSlice, FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout, Ref, SplitByteSlice,
33 };
34 
35 /// Number of bytes in GUID.
36 pub const GPT_GUID_LEN: usize = 16;
37 /// The maximum number of UTF-16 characters in a GPT partition name, including termination.
38 pub const GPT_NAME_LEN_U16: usize = 36;
39 const GPT_NAME_LEN_U8: usize = 2 * GPT_GUID_LEN;
40 
41 /// The top-level GPT header.
42 #[repr(C, packed)]
43 #[derive(
44     Debug, Default, Copy, Clone, Immutable, IntoBytes, FromBytes, KnownLayout, PartialEq, Eq,
45 )]
46 pub struct GptHeader {
47     /// Magic bytes; must be [GPT_MAGIC].
48     pub magic: u64,
49     /// Header version.
50     pub revision: u32,
51     /// Header size in bytes.
52     pub size: u32,
53     /// CRC of the first `size` bytes, calculated with this field zeroed.
54     pub crc32: u32,
55     /// Reserved; must be set to 0.
56     pub reserved0: u32,
57     /// The on-disk block location of this header.
58     pub current: u64,
59     /// The on-disk block location of the other header.
60     pub backup: u64,
61     /// First usable block for partition contents.
62     pub first: u64,
63     /// Last usable block for partition contents (inclusive).
64     pub last: u64,
65     /// Disk GUID.
66     pub guid: [u8; GPT_GUID_LEN],
67     /// Starting block for the partition entries array.
68     pub entries: u64,
69     /// Number of partition entries.
70     pub entries_count: u32,
71     /// The size of each partition entry in bytes.
72     pub entries_size: u32,
73     /// CRC of the partition entries array.
74     pub entries_crc: u32,
75 }
76 
77 impl GptHeader {
78     /// Casts a bytes slice into a mutable GptHeader structure.
from_bytes_mut(bytes: &mut [u8]) -> &mut GptHeader79     pub fn from_bytes_mut(bytes: &mut [u8]) -> &mut GptHeader {
80         Ref::into_mut(Ref::<_, GptHeader>::new_from_prefix(bytes).unwrap().0)
81     }
82 
83     /// Computes the actual crc32 value.
calculate_header_crc(&self) -> u3284     fn calculate_header_crc(&self) -> u32 {
85         let mut hasher = Hasher::new();
86         hasher.update(&self.as_bytes()[..GPT_CRC32_OFFSET]);
87         hasher.update(&[0u8; size_of::<u32>()]);
88         hasher.update(&self.as_bytes()[GPT_CRC32_OFFSET + size_of::<u32>()..]);
89         hasher.finalize()
90     }
91 
92     /// Update the header crc32 value.
update_crc(&mut self)93     pub fn update_crc(&mut self) {
94         self.crc32 = self.calculate_header_crc();
95     }
96 
97     /// Updates entries and header crc according to the given entries buffer.
update_entries_crc(&mut self, entries: &[u8])98     fn update_entries_crc(&mut self, entries: &[u8]) {
99         let size = SafeNum::from(self.entries_count) * self.entries_size;
100         self.entries_crc = crc32(&entries[..size.try_into().unwrap()]);
101         self.update_crc();
102     }
103 }
104 
105 /// Computes the number of blocks for the 128 partition entries reserved space in GPT.
gpt_entries_blk(block_size: u64) -> Result<u64>106 fn gpt_entries_blk(block_size: u64) -> Result<u64> {
107     let size = u64::try_from(GPT_MAX_NUM_ENTRIES_SIZE).unwrap();
108     match size % block_size {
109         0 => Ok(size / block_size),
110         _ => Err(Error::InvalidInput),
111     }
112 }
113 
114 /// Checks a header against a block device.
115 ///
116 /// # Args
117 ///
118 /// * `io`: An implementation of [BlockIo],
119 /// * `header`: The GPT header to verify.
120 /// * `is_primary`: If the header is a primary header.
check_header(io: &mut impl BlockIo, header: &GptHeader, is_primary: bool) -> Result<()>121 fn check_header(io: &mut impl BlockIo, header: &GptHeader, is_primary: bool) -> Result<()> {
122     let num_blks = SafeNum::from(io.info().num_blocks);
123     let blk_sz = io.info().block_size;
124 
125     // GPT spec requires that at least 128 entries worth of space be reserved.
126     let min_reserved_entries_blk = gpt_entries_blk(blk_sz)?;
127     // Minimum space needed: 2 * (header + entries) + MBR.
128     let min_disk_blks: u64 = ((min_reserved_entries_blk + 1) * 2 + 1).try_into().unwrap();
129     if min_disk_blks > u64::try_from(num_blks).unwrap() {
130         return Err(Error::GptError(GptError::DiskTooSmall));
131     }
132 
133     if header.magic != GPT_MAGIC {
134         return Err(Error::GptError(GptError::IncorrectMagic(header.magic)));
135     }
136 
137     if header.calculate_header_crc() != header.crc32 {
138         return Err(Error::GptError(GptError::IncorrectHeaderCrc));
139     }
140 
141     if header.size != size_of::<GptHeader>().try_into().unwrap() {
142         return Err(Error::GptError(GptError::UnexpectedHeaderSize {
143             actual: header.size,
144             expect: size_of::<GptHeader>(),
145         }));
146     }
147 
148     if header.entries_size != size_of::<GptEntry>().try_into().unwrap() {
149         return Err(Error::GptError(GptError::UnexpectedEntrySize {
150             actual: header.entries_size,
151             expect: size_of::<GptEntry>(),
152         }));
153     }
154 
155     // Checks first/last usable block.
156     //
157     // Assuming maximum range where partition entries are adjacent to GPT headers.
158     //
159     // Should leave a minimum space for MBR + primary header + primary entries before.
160     let min_first: u64 = (min_reserved_entries_blk + 2).try_into().unwrap();
161     // Should leave a minimum space for secondary header + secondary entries space after.
162     let max_last: u64 = (num_blks - 1 - min_reserved_entries_blk - 1).try_into().unwrap();
163     if header.first > header.last + 1 || header.first < min_first || header.last > max_last {
164         return Err(Error::GptError(GptError::InvalidFirstLastUsableBlock {
165             first: header.first,
166             last: header.last,
167             range: (min_first, max_last),
168         }));
169     }
170 
171     // Checks entries starting block.
172     if is_primary {
173         // For primary header, entries must be before first usable block and can hold up to
174         // `GPT_MAX_NUM_ENTRIES` entries
175         let right: u64 =
176             (SafeNum::from(header.first) - min_reserved_entries_blk).try_into().unwrap();
177         if !(header.entries >= 2 && header.entries <= right) {
178             return Err(Error::GptError(GptError::InvalidPrimaryEntriesStart {
179                 value: header.entries,
180                 expect_range: (2, right),
181             }));
182         }
183     } else {
184         // For secondary header, entries must be after last usable block and can hold up to
185         // `GPT_MAX_NUM_ENTRIES` entries.
186         if !(header.entries > header.last && header.entries <= max_last + 1) {
187             return Err(Error::GptError(GptError::InvalidSecondaryEntriesStart {
188                 value: header.entries,
189                 expect_range: (header.last + 1, max_last + 1),
190             }));
191         }
192     }
193 
194     if header.entries_count > GPT_MAX_NUM_ENTRIES.try_into().unwrap() {
195         return Err(Error::GptError(GptError::NumberOfEntriesOverflow {
196             entries: header.entries_count,
197             max_allowed: GPT_MAX_NUM_ENTRIES,
198         }));
199     }
200 
201     Ok(())
202 }
203 
204 /// Verifies the given entries against a verifed GPT header.
205 ///
206 /// # Args
207 ///
208 /// * `header`: The verified GPT header corresponding to the entries.
209 /// * `entries`: The buffer containing the entries.
check_entries(header: &GptHeader, entries: &[u8]) -> Result<()>210 fn check_entries(header: &GptHeader, entries: &[u8]) -> Result<()> {
211     // Checks entries CRC.
212     assert!(header.entries_count <= GPT_MAX_NUM_ENTRIES.try_into().unwrap());
213     let entries_size: usize =
214         (SafeNum::from(header.entries_count) * GPT_ENTRY_SIZE).try_into().unwrap();
215     let entries = entries.get(..entries_size).ok_or(Error::GptError(GptError::EntriesTruncated))?;
216     if header.entries_crc != crc32(entries) {
217         return Err(Error::GptError(GptError::IncorrectEntriesCrc));
218     }
219 
220     // Checks each entry.
221     let entries = Ref::<_, [GptEntry]>::new_slice(entries)
222         .ok_or(Error::GptError(GptError::EntriesTruncated))?
223         .into_slice();
224     let entries = &entries[..header.entries_count.try_into().unwrap()];
225     for (idx, ele) in entries.iter().take_while(|v| !v.is_null()).enumerate() {
226         // Error information uses 1-base partition index.
227         let idx = idx.checked_add(1).unwrap();
228         let (first, last) = (ele.first, ele.last);
229         if first > last + 1 || last > header.last || first < header.first {
230             return Err(Error::GptError(GptError::InvalidPartitionRange {
231                 idx,
232                 part_range: (first, last),
233                 usable_range: (header.first, header.last),
234             }));
235         } else if ele.part_type == [0u8; GPT_GUID_LEN] {
236             return Err(Error::GptError(GptError::ZeroPartitionTypeGUID { idx }));
237         } else if ele.guid == [0u8; GPT_GUID_LEN] {
238             return Err(Error::GptError(GptError::ZeroPartitionUniqueGUID { idx }));
239         }
240     }
241 
242     // Checks overlap between partition ranges.
243     // Sorts an index array because we don't want to modify input.
244     let mut sorted_indices: [u8; GPT_MAX_NUM_ENTRIES] = from_fn(|i| i.try_into().unwrap());
245     sorted_indices.sort_unstable_by_key(|v| match entries.get(usize::try_from(*v).unwrap()) {
246         Some(v) if !v.is_null() => v.first,
247         _ => u64::MAX,
248     });
249 
250     let actual = entries.iter().position(|v| v.is_null()).unwrap_or(entries.len());
251     if actual > 1 {
252         for i in 0..actual - 1 {
253             let prev: usize = sorted_indices[i].try_into().unwrap();
254             let next: usize = sorted_indices[i + 1].try_into().unwrap();
255             if entries[prev].last >= entries[next].first {
256                 return Err(Error::GptError(GptError::PartitionRangeOverlap {
257                     prev: (prev + 1, entries[prev].first, entries[prev].last),
258                     next: (next + 1, entries[next].first, entries[next].last),
259                 }));
260             }
261         }
262     }
263 
264     Ok(())
265 }
266 
267 /// GptEntry is the partition entry data structure in the GPT.
268 #[repr(C, packed)]
269 #[derive(Debug, Copy, Clone, Immutable, IntoBytes, FromBytes, KnownLayout, PartialEq)]
270 pub struct GptEntry {
271     /// Partition type GUID.
272     pub part_type: [u8; GPT_GUID_LEN],
273     /// Unique partition GUID.
274     pub guid: [u8; GPT_GUID_LEN],
275     /// First block.
276     pub first: u64,
277     /// Last block (inclusive).
278     pub last: u64,
279     /// Partition flags.
280     pub flags: u64,
281     /// Partition name in UTF-16.
282     pub name: [u16; GPT_NAME_LEN_U16],
283 }
284 
285 impl GptEntry {
286     /// Return the partition entry size in blocks.
blocks(&self) -> Result<u64>287     pub fn blocks(&self) -> Result<u64> {
288         // Must perform "+1" first before subtracting `self.first`. Otherwise if partition size is
289         // zero, where `self.first > self.last`, arithmetic will overflow.
290         u64::try_from(SafeNum::from(self.last) + 1 - self.first).map_err(Into::into)
291     }
292 
293     /// Return whether this is a `NULL` entry. The first null entry marks the end of the partition
294     /// entries.
is_null(&self) -> bool295     fn is_null(&self) -> bool {
296         self.first == 0 && self.last == 0
297     }
298 
299     /// Decode the partition name into a string. A length N utf16 string can be at most 2N utf8
300     /// bytes. Therefore, a safe size of `buffer` is 2*GPT_NAME_LEN_U16 = 72.
name_to_str<'a>(&self, buffer: &'a mut [u8]) -> Result<&'a str>301     pub fn name_to_str<'a>(&self, buffer: &'a mut [u8]) -> Result<&'a str> {
302         let mut index = 0;
303         for c in char::decode_utf16(self.name) {
304             match c.unwrap_or(char::REPLACEMENT_CHARACTER) {
305                 '\0' => break,
306                 c if c.len_utf8() <= buffer[index..].len() => {
307                     index += c.encode_utf8(&mut buffer[index..]).len()
308                 }
309                 _ => return Err(Error::InvalidInput), // Not enough space in `buffer`.
310             }
311         }
312         // SAFETY:
313         // _unchecked should be OK here since we wrote each utf8 byte ourselves,
314         // but it's just an optimization, checked version would be fine also.
315         unsafe { Ok(core::str::from_utf8_unchecked(&buffer[..index])) }
316     }
317 
318     /// Checks if the partition name is the same as the given.
match_name(&self, part: &str) -> Result<bool>319     pub fn match_name(&self, part: &str) -> Result<bool> {
320         Ok(self.name_to_str(&mut [0u8; GPT_NAME_LEN_U16 * 2][..])? == part)
321     }
322 }
323 
324 impl core::fmt::Display for GptEntry {
fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result325     fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
326         // Format: partition name: "abc", [first, last]: [123, 456]
327         let mut name_conversion_buffer = [0u8; GPT_NAME_LEN_U16 * 2];
328         let name = self.name_to_str(&mut name_conversion_buffer).map_err(|_| core::fmt::Error)?;
329         // Note: The bracket around `{ self.first }` is for forcing a copy of the field because
330         // GptEntry is a packed structure.
331         write!(f, "partition: \"{}\", first: {}, last: {}", name, { self.first }, { self.last })
332     }
333 }
334 
335 // core::mem::offset_of!(GptHeader, crc32) is unsatble feature and rejected by the compiler in our
336 // settings. We pre-compute the value here.
337 const GPT_CRC32_OFFSET: usize = 16;
338 const GPT_ENTRY_SIZE: usize = size_of::<GptEntry>();
339 const GPT_MAX_NUM_ENTRIES: usize = 128;
340 const GPT_MAX_NUM_ENTRIES_SIZE: usize = GPT_MAX_NUM_ENTRIES * GPT_ENTRY_SIZE;
341 /// GPT header magic bytes ("EFI PART" in ASCII).
342 pub const GPT_MAGIC: u64 = 0x5452415020494645;
343 
344 enum HeaderType {
345     Primary,
346     Secondary,
347 }
348 
349 /// `Partition` contains information about a GPT partition.
350 #[derive(Debug, Copy, Clone, PartialEq)]
351 pub struct Partition {
352     entry: GptEntry,
353     block_size: u64,
354     decoded_name: Option<([u8; GPT_NAME_LEN_U8], usize)>,
355 }
356 
357 impl Partition {
358     /// Creates a new instance.
new(entry: GptEntry, block_size: u64) -> Self359     fn new(entry: GptEntry, block_size: u64) -> Self {
360         let mut buf = [0u8; GPT_NAME_LEN_U8];
361         let decoded_name = match entry.name_to_str(&mut buf[..]).ok().map(|v| v.len()) {
362             Some(len) => Some((buf, len)),
363             _ => None,
364         };
365         Self { entry, block_size, decoded_name }
366     }
367 
368     /// Gets the decoded partition name.
name(&self) -> Option<&str>369     pub fn name(&self) -> Option<&str> {
370         // Correct by construction. `from_utf8` should not fail.
371         self.decoded_name.as_ref().map(|(buf, sz)| from_utf8(&buf[..*sz]).unwrap())
372     }
373 
374     /// Returns the partition size in bytes.
size(&self) -> Result<u64>375     pub fn size(&self) -> Result<u64> {
376         u64::try_from(SafeNum::from(self.entry.blocks()?) * self.block_size).map_err(Error::from)
377     }
378 
379     /// Returns the block size of this partition.
block_size(&self) -> u64380     pub fn block_size(&self) -> u64 {
381         self.block_size
382     }
383 
384     /// Returns the partition entry structure in the GPT header.
gpt_entry(&self) -> &GptEntry385     pub fn gpt_entry(&self) -> &GptEntry {
386         &self.entry
387     }
388 
389     /// Returns the partition's absolute start/end offset in number of bytes.
absolute_range(&self) -> Result<(u64, u64)>390     pub fn absolute_range(&self) -> Result<(u64, u64)> {
391         let start = SafeNum::from(self.entry.first) * self.block_size;
392         let end = (SafeNum::from(self.entry.last) + 1) * self.block_size;
393         Ok((start.try_into()?, end.try_into()?))
394     }
395 
396     /// Checks a given sub range and returns its absolute offset.
check_range(&self, off: u64, size: u64) -> Result<u64>397     pub fn check_range(&self, off: u64, size: u64) -> Result<u64> {
398         let off = SafeNum::from(off);
399         let end: u64 = (off + size).try_into()?;
400         match end > self.size()? {
401             true => Err(Error::BadIndex(end as usize)),
402             _ => Ok((off + self.absolute_range()?.0).try_into()?),
403         }
404     }
405 }
406 
407 /// `PartitionIterator` iterates all GPT partition entries.
408 pub struct PartitionIterator<'a> {
409     entries: &'a [GptEntry],
410     block_size: u64,
411     idx: usize,
412 }
413 
414 impl Iterator for PartitionIterator<'_> {
415     type Item = Partition;
416 
next(&mut self) -> Option<Self::Item>417     fn next(&mut self) -> Option<Self::Item> {
418         let res = self
419             .entries
420             .get(self.idx)
421             .filter(|v| !v.is_null())
422             .map(|v| Partition::new(*v, self.block_size))?;
423         self.idx += 1;
424         Some(res)
425     }
426 }
427 
428 /// Contains result of GPT syncing/restoration.
429 #[derive(Copy, Clone, PartialEq, Debug, Default)]
430 pub enum GptSyncResult {
431     /// Both primary and secondary GPT are valid.
432     #[default]
433     BothValid,
434     /// Primary GPT is invalid and restored.
435     PrimaryRestored(Error),
436     /// Secondary GPT is invalid and restored.
437     SecondaryRestored(Error),
438     /// Neither primary or secondary GPT is valid.
439     NoValidGpt {
440         /// Primary GPT verify error.
441         primary: Error,
442         /// Secondary GPT verify error.
443         secondary: Error,
444     },
445 }
446 
447 impl GptSyncResult {
448     /// Combined into a result
res(&self) -> Result<()>449     pub fn res(&self) -> Result<()> {
450         match self {
451             Self::NoValidGpt { primary: e, .. } => Err(*e),
452             _ => Ok(()),
453         }
454     }
455 }
456 
457 impl core::fmt::Display for GptSyncResult {
fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result458     fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
459         match self {
460             Self::BothValid => write!(f, "Found valid GPT."),
461             Self::PrimaryRestored(e) => write!(f, "Primary GPT restored due to {e:?}."),
462             Self::SecondaryRestored(e) => write!(f, "Secondary GPT restored due to {e:?}."),
463             Self::NoValidGpt { primary, secondary } => {
464                 write!(f, "No valid GPT. primary: {primary:?}, secondary: {secondary:?}.")
465             }
466         }
467     }
468 }
469 
470 /// A packed wrapper of `Option<NonZeroU64>`
471 #[repr(C, packed)]
472 #[derive(Debug, Copy, Clone, Immutable, IntoBytes, FromBytes, KnownLayout)]
473 struct BlockSize(Option<NonZeroU64>);
474 
475 /// Represents the structure of a load buffer for loading/verifying/syncing up to N GPT entries.
476 #[repr(C, packed)]
477 #[derive(Debug, Copy, Clone, Immutable, IntoBytes, FromBytes)]
478 pub struct GptLoadBufferN<const N: usize> {
479     // GPT doesn't care about block size. But it's easier to have it available for computing offset
480     // and size in bytes for partitions. It's also used as a flag for indicating whether a valid
481     // GPT is loaded.
482     block_size: BlockSize,
483     primary_header: GptHeader,
484     secondary_header: GptHeader,
485     primary_entries: [GptEntry; N],
486     secondary_entries: [GptEntry; N],
487 }
488 
489 impl<const N: usize> Deref for GptLoadBufferN<N> {
490     type Target = [u8];
491 
deref(&self) -> &Self::Target492     fn deref(&self) -> &Self::Target {
493         self.as_bytes()
494     }
495 }
496 
497 impl<const N: usize> DerefMut for GptLoadBufferN<N> {
deref_mut(&mut self) -> &mut Self::Target498     fn deref_mut(&mut self) -> &mut Self::Target {
499         self.as_bytes_mut()
500     }
501 }
502 
503 /// Contains references corresponding to different GPT load entities parsed from a load buffer.
504 ///
505 /// The structure is simply for organizing together the individual references of fields in
506 /// `GptLoadBufferN` parsed from a raw buffer. Note that we can't parse a `Ref<B, GptLoadBufferN>`
507 /// directly from a buffer because the number of entries (length of [GptEntry]) in this case needs
508 /// to be computed at run time based on the buffer size.
509 struct LoadBufferRef<B: ByteSlice> {
510     block_size: Ref<B, BlockSize>,
511     primary_header: Ref<B, GptHeader>,
512     secondary_header: Ref<B, GptHeader>,
513     primary_entries: Ref<B, [GptEntry]>,
514     secondary_entries: Ref<B, [GptEntry]>,
515 }
516 
517 impl<B: SplitByteSlice> LoadBufferRef<B> {
from(buffer: B) -> Self518     fn from(buffer: B) -> Self {
519         let n = min(GPT_MAX_NUM_ENTRIES, max_supported_entries(&buffer[..]).unwrap());
520         let (block_size, rest) = Ref::new_from_prefix(buffer).unwrap();
521         let (primary_header, rest) = Ref::new_from_prefix(rest).unwrap();
522         let (secondary_header, rest) = Ref::new_from_prefix(rest).unwrap();
523         let (primary_entries, rest) = Ref::new_slice_from_prefix(rest, n).unwrap();
524         let (secondary_entries, _) = Ref::new_slice_from_prefix(rest, n).unwrap();
525         Self { block_size, primary_header, secondary_header, primary_entries, secondary_entries }
526     }
527 
528     /// Unpacks into the secondary GPT header/entries
secondary(self) -> (Ref<B, GptHeader>, Ref<B, [GptEntry]>)529     fn secondary(self) -> (Ref<B, GptHeader>, Ref<B, [GptEntry]>) {
530         (self.secondary_header, self.secondary_entries)
531     }
532 }
533 
534 /// The minimum buffer size needed for creating a [Gpt] that can load `entries` number of
535 /// partitions.
536 ///
537 /// # Returns
538 ///
539 /// * Returns Ok(size) on success.
540 /// * Returns Err(Error::InvalidInput) if max_entries is greater than 128.
gpt_buffer_size(entries: usize) -> Result<usize>541 pub fn gpt_buffer_size(entries: usize) -> Result<usize> {
542     match entries > GPT_MAX_NUM_ENTRIES {
543         true => Err(Error::InvalidInput),
544         _ => Ok(size_of::<GptLoadBufferN<0>>() + entries * GPT_ENTRY_SIZE * 2),
545     }
546 }
547 
548 /// Computes the maximum number of entries that can be loaded if using the given buffer for [Gpt].
max_supported_entries(buf: &[u8]) -> Result<usize>549 fn max_supported_entries(buf: &[u8]) -> Result<usize> {
550     match buf.len() < size_of::<GptLoadBufferN<0>>() {
551         true => Err(Error::BufferTooSmall(Some(size_of::<GptLoadBufferN<0>>()))),
552         _ => Ok((buf.len() - size_of::<GptLoadBufferN<0>>()) / 2 / GPT_ENTRY_SIZE),
553     }
554 }
555 
556 /// [Gpt] manages a buffer for loading, verifying and syncing GPT.
557 pub struct Gpt<B> {
558     buffer: B,
559 }
560 
561 impl<B: DerefMut<Target = [u8]>> Gpt<B> {
562     /// Create an uninitialized Gpt instance from a provided buffer.
563     ///
564     /// The created [Gpt] can then be used in `Disk::sync_gpt()` for loading, verifying and syncing
565     /// GPT on disk.
566     ///
567     /// # Args:
568     ///
569     /// * `buffer`: A buffer to use for loading, verifying and syncing primary and secondary GPT.
570     ///   The size of the buffer determines the maximum number of partition entries that can be
571     ///   loaded. If actual number of partitions, specified by `entries_count` in the GPT header,
572     ///   exceeds it, verification and sync will eventually fail with `Error::BufferTooSmall`.
573     ///   `gpt_buffer_size(num_entries)` can be used to compute the required size of buffer for
574     ///   loading a specific number of entries. Note that most tools and OS fix the `entries_count`
575     ///   value to the max 128 regardless of the actual number of partition entries used. Thus
576     ///   unless you have full control of GPT generation in your entire system where you can always
577     ///   ensure a smaller bound on it, it is recommended to always provide enough buffer for
578     ///   loading 128 entries.
579     ///
580     /// # Returns
581     ///
582     /// * Returns Ok(Self) on success.
583     /// * Returns Err(Error::BufferTooSmall) if buffer is less than the minimum size.
new(mut buffer: B) -> Result<Self>584     pub fn new(mut buffer: B) -> Result<Self> {
585         max_supported_entries(&buffer[..])?;
586         LoadBufferRef::from(&mut buffer[..]).block_size.0 = None;
587         Ok(Self { buffer })
588     }
589 
590     /// Returns the maximum allowed entries.
max_entries(&self) -> usize591     pub fn max_entries(&self) -> usize {
592         max_supported_entries(&self.buffer[..]).unwrap()
593     }
594 
595     /// Creates an instance of `Gpt<&mut [u8]>` that borrows the internal GPT buffer.
as_borrowed(&mut self) -> Gpt<&mut [u8]>596     pub fn as_borrowed(&mut self) -> Gpt<&mut [u8]> {
597         Gpt { buffer: &mut self.buffer[..] }
598     }
599 
600     /// Returns an iterator to GPT partition entries.
601     ///
602     /// If the object does not contain a valid GPT, the method returns Error.
partition_iter(&self) -> Result<PartitionIterator>603     pub fn partition_iter(&self) -> Result<PartitionIterator> {
604         let block_size = self.check_valid()?;
605         let entries = LoadBufferRef::from(&self.buffer[..]).primary_entries.into_slice();
606         Ok(PartitionIterator { entries, idx: 0, block_size })
607     }
608 
609     /// Checks if a read/write range into a GPT partition overflows and returns the range's absolute
610     /// offset in number of bytes.
check_range(&self, part_name: &str, offset: u64, size: usize) -> Result<u64>611     pub fn check_range(&self, part_name: &str, offset: u64, size: usize) -> Result<u64> {
612         self.find_partition(part_name)?.check_range(offset, u64::try_from(size)?)
613     }
614 
615     /// Return the list of GPT entries.
616     ///
617     /// If there is not a valid GPT, the method returns Error.
entries(&self) -> Result<&[GptEntry]>618     pub fn entries(&self) -> Result<&[GptEntry]> {
619         self.check_valid()?;
620         let entries = LoadBufferRef::from(&self.buffer[..]).primary_entries.into_slice();
621         let n = entries.iter().position(|v| v.is_null()).unwrap_or(entries.len());
622         Ok(&entries[..n])
623     }
624 
625     /// Returns the total number of partitions.
num_partitions(&self) -> Result<usize>626     pub fn num_partitions(&self) -> Result<usize> {
627         Ok(self.entries()?.len())
628     }
629 
630     /// Gets the `idx`th partition.
get_partition(&self, idx: usize) -> Result<Partition>631     pub fn get_partition(&self, idx: usize) -> Result<Partition> {
632         let block_size = self.check_valid()?;
633         let entry = *self.entries()?.get(idx).ok_or(Error::BadIndex(idx))?;
634         Ok(Partition::new(entry, block_size))
635     }
636 
637     /// Returns the `Partition` for a partition.
638     ///
639     /// # Args
640     ///
641     /// * `part`: Name of the partition.
find_partition(&self, part: &str) -> Result<Partition>642     pub fn find_partition(&self, part: &str) -> Result<Partition> {
643         let block_size = self.check_valid()?;
644         for entry in self.entries()? {
645             let mut name_conversion_buffer = [0u8; GPT_NAME_LEN_U16 * 2];
646             if entry.name_to_str(&mut name_conversion_buffer)? != part {
647                 continue;
648             }
649             return Ok(Partition::new(*entry, block_size));
650         }
651         Err(Error::NotFound)
652     }
653 
654     /// Checks whether the Gpt has been initialized and returns the block size.
check_valid(&self) -> Result<u64>655     fn check_valid(&self) -> Result<u64> {
656         Ok(LoadBufferRef::from(&self.buffer[..]).block_size.0.ok_or(Error::InvalidState)?.get())
657     }
658 
659     /// Helper function for loading and validating GPT header and entries.
load_and_validate_gpt( &mut self, disk: &mut Disk<impl BlockIo, impl DerefMut<Target = [u8]>>, hdr_type: HeaderType, ) -> Result<()>660     async fn load_and_validate_gpt(
661         &mut self,
662         disk: &mut Disk<impl BlockIo, impl DerefMut<Target = [u8]>>,
663         hdr_type: HeaderType,
664     ) -> Result<()> {
665         let blk_sz = disk.io().info().block_size;
666         let load = LoadBufferRef::from(&mut self.buffer[..]);
667         let (header_start, mut header, mut entries) = match hdr_type {
668             HeaderType::Primary => (blk_sz, load.primary_header, load.primary_entries),
669             HeaderType::Secondary => (
670                 ((SafeNum::from(disk.io().info().num_blocks) - 1) * blk_sz).try_into()?,
671                 load.secondary_header,
672                 load.secondary_entries,
673             ),
674         };
675 
676         // Loads the header
677         disk.read(header_start, Ref::bytes_mut(&mut header)).await?;
678         // Checks header.
679         check_header(disk.io(), &header, matches!(hdr_type, HeaderType::Primary))?;
680         // Loads the entries.
681         let entries_size = SafeNum::from(header.entries_count) * GPT_ENTRY_SIZE;
682         let entries_offset = SafeNum::from(header.entries) * blk_sz;
683         let out = entries.as_bytes_mut().get_mut(..entries_size.try_into().unwrap()).ok_or(
684             Error::BufferTooSmall(Some(
685                 gpt_buffer_size(header.entries_count.try_into().unwrap()).unwrap(),
686             )),
687         )?;
688         disk.read(entries_offset.try_into().unwrap(), out).await?;
689         // Checks entries.
690         check_entries(&header, entries.as_bytes())
691     }
692 
693     /// Loads and syncs GPT from a block device.
694     ///
695     /// * Returns Ok(sync_result) if disk IO is successful, where `sync_result` contains the GPT
696     ///   verification and restoration result,
697     /// * Returns Err() if disk IO encounters error.
load_and_sync( &mut self, disk: &mut Disk<impl BlockIo, impl DerefMut<Target = [u8]>>, ) -> Result<GptSyncResult>698     pub(crate) async fn load_and_sync(
699         &mut self,
700         disk: &mut Disk<impl BlockIo, impl DerefMut<Target = [u8]>>,
701     ) -> Result<GptSyncResult> {
702         let blk_sz = disk.io().info().block_size;
703         let nonzero_blk_sz = NonZeroU64::new(blk_sz).ok_or(Error::InvalidInput)?;
704         let total_blocks: SafeNum = disk.io().info().num_blocks.into();
705 
706         let primary_header_blk = 1;
707         let primary_header_pos = blk_sz;
708         let secondary_header_blk = total_blocks - 1;
709 
710         // Entries position for restoring.
711         let primary_entries_blk = 2;
712         let primary_entries_pos = SafeNum::from(primary_entries_blk) * blk_sz;
713         let primary_res = self.load_and_validate_gpt(disk, HeaderType::Primary).await;
714         let secondary_res = self.load_and_validate_gpt(disk, HeaderType::Secondary).await;
715 
716         let LoadBufferRef {
717             mut block_size,
718             mut primary_header,
719             mut secondary_header,
720             mut primary_entries,
721             mut secondary_entries,
722         } = LoadBufferRef::from(&mut self.buffer[..]);
723         block_size.0 = None;
724         let primary_entries = primary_entries.as_bytes_mut();
725         let secondary_entries = secondary_entries.as_bytes_mut();
726         let sync_res = match (primary_res, secondary_res) {
727             (Err(primary), Err(secondary)) => GptSyncResult::NoValidGpt { primary, secondary },
728             (Ok(()), Ok(())) if is_consistent(&primary_header, &secondary_header) => {
729                 GptSyncResult::BothValid
730             }
731             (Err(e), Ok(())) => {
732                 // Restores to primary
733                 primary_header.as_bytes_mut().clone_from_slice(secondary_header.as_bytes());
734                 primary_entries.clone_from_slice(&secondary_entries);
735                 primary_header.current = primary_header_blk;
736                 primary_header.backup = secondary_header_blk.try_into()?;
737                 primary_header.entries = primary_entries_blk;
738                 primary_header.update_crc();
739 
740                 disk.write(primary_header_pos, primary_header.as_bytes_mut()).await?;
741                 disk.write(primary_entries_pos.try_into()?, primary_entries).await?;
742                 GptSyncResult::PrimaryRestored(e)
743             }
744             (Ok(()), v) => {
745                 // Restores to secondary
746                 let pos = secondary_header_blk * blk_sz;
747                 let secondary_entries_pos = pos - GPT_MAX_NUM_ENTRIES_SIZE;
748                 let secondary_entries_blk = secondary_entries_pos / blk_sz;
749 
750                 secondary_header.as_bytes_mut().clone_from_slice(primary_header.as_bytes());
751                 secondary_entries.clone_from_slice(primary_entries);
752                 secondary_header.current = secondary_header_blk.try_into()?;
753                 secondary_header.backup = primary_header_blk;
754                 secondary_header.entries = secondary_entries_blk.try_into()?;
755                 secondary_header.update_crc();
756 
757                 disk.write(pos.try_into()?, secondary_header.as_bytes_mut()).await?;
758                 disk.write(secondary_entries_pos.try_into()?, secondary_entries).await?;
759 
760                 GptSyncResult::SecondaryRestored(match v {
761                     Err(e) => e,
762                     _ => Error::GptError(GptError::DifferentFromPrimary),
763                 })
764             }
765         };
766 
767         block_size.0 = Some(nonzero_blk_sz);
768         Ok(sync_res)
769     }
770 }
771 
772 /// Checks whether primary and secondary header
is_consistent(primary: &GptHeader, secondary: &GptHeader) -> bool773 fn is_consistent(primary: &GptHeader, secondary: &GptHeader) -> bool {
774     let mut expected_secondary = *primary;
775     expected_secondary.crc32 = secondary.crc32;
776     expected_secondary.current = secondary.current;
777     expected_secondary.backup = 1;
778     expected_secondary.entries = secondary.entries;
779     &expected_secondary == secondary
780 }
781 
782 /// A [Gpt] that owns a `GptLoadBufferN<N>` and can load up to N partition entries.
783 ///
784 /// Note: The size of this type increases with N and can be expensive to store on stack. It is
785 /// typically intended for resource abundant environment such as test.
786 pub type GptN<const N: usize> = Gpt<GptLoadBufferN<N>>;
787 
788 /// Creates an instance of GptN.
new_gpt_n<const N: usize>() -> GptN<N>789 pub fn new_gpt_n<const N: usize>() -> GptN<N> {
790     Gpt::new(GptLoadBufferN::<N>::new_zeroed()).unwrap()
791 }
792 
793 /// A [Gpt] that owns a `GptLoadBufferN<128>` and can load the maximum 128 partition entries.
794 ///
795 /// Note: The size of this type is approximately 34K and can be expensive to store on stack. It
796 /// is typically intended for resource abundant environment such as test.
797 pub type GptMax = GptN<GPT_MAX_NUM_ENTRIES>;
798 
799 /// Creates an instance of GptMax.
new_gpt_max() -> GptMax800 pub fn new_gpt_max() -> GptMax {
801     new_gpt_n::<GPT_MAX_NUM_ENTRIES>()
802 }
803 
804 /// Updates GPT on a block device.
805 ///
806 /// # Args
807 ///
808 /// * `io`: An implementation of [BlockIo]
809 /// * `scratch`: Scratch buffer for unaligned read write.
810 /// * `mbr_primary`: A buffer containing the MBR block, primary GPT header and entries.
811 /// * `resize`: If set to true, the method updates the last partition to cover the rest of the
812 ///    storage.
813 /// * `gpt`: The output [Gpt] to update.
update_gpt( disk: &mut Disk<impl BlockIo, impl DerefMut<Target = [u8]>>, mbr_primary: &mut [u8], resize: bool, gpt: &mut Gpt<impl DerefMut<Target = [u8]>>, ) -> Result<()>814 pub(crate) async fn update_gpt(
815     disk: &mut Disk<impl BlockIo, impl DerefMut<Target = [u8]>>,
816     mbr_primary: &mut [u8],
817     resize: bool,
818     gpt: &mut Gpt<impl DerefMut<Target = [u8]>>,
819 ) -> Result<()> {
820     let blk_sz: usize = disk.io().info().block_size.try_into()?;
821     let (header, remain) = mbr_primary
822         .get_mut(blk_sz..)
823         .map(|v| v.split_at_mut_checked(blk_sz))
824         .flatten()
825         .ok_or(Error::BufferTooSmall(Some(blk_sz * 2)))?;
826     let header = Ref::into_mut(Ref::<_, GptHeader>::new_from_prefix(&mut header[..]).unwrap().0);
827 
828     // Adjusts last usable block according to this device in case the GPT was generated for a
829     // different disk size. If this results in some partition being out of range, it will be
830     // caught during `check_header()`.
831     let entries_blk = SafeNum::from(GPT_MAX_NUM_ENTRIES_SIZE) / blk_sz;
832     // Reserves only secondary GPT header and entries.
833     let num_blks = SafeNum::from(disk.io().info().num_blocks);
834     header.last = (num_blks - entries_blk - 2).try_into().unwrap();
835     header.backup = (num_blks - 1).try_into().unwrap();
836     header.update_crc();
837 
838     check_header(disk.io(), &header, true)?;
839     // Computes entries offset in bytes relative to `remain`
840     let entries_off: usize = ((SafeNum::from(header.entries) - 2) * blk_sz).try_into().unwrap();
841     let entries_size: usize =
842         (SafeNum::from(header.entries_count) * header.entries_size).try_into().unwrap();
843     let entries = remain
844         .get_mut(entries_off..)
845         .map(|v| v.get_mut(..entries_size))
846         .flatten()
847         .ok_or(Error::BufferTooSmall(Some(2 * blk_sz + entries_off + entries_size)))?;
848     check_entries(&header, entries)?;
849 
850     if resize {
851         // Updates the last entry to cover the rest of the storage.
852         let gpt_entries =
853             Ref::<_, [GptEntry]>::new_slice(&mut entries[..]).unwrap().into_mut_slice();
854         gpt_entries.iter_mut().filter(|e| !e.is_null()).last().map(|v| v.last = header.last);
855         header.update_entries_crc(entries);
856         // Re-verifies everything.
857         check_header(disk.io(), &header, true).unwrap();
858         check_entries(&header, entries).unwrap();
859     }
860 
861     disk.write(0, mbr_primary).await?;
862     disk.sync_gpt(gpt).await?.res()
863 }
864 
865 /// Erases GPT if there is one on the device.
erase_gpt( disk: &mut Disk<impl BlockIo, impl DerefMut<Target = [u8]>>, gpt: &mut Gpt<impl DerefMut<Target = [u8]>>, ) -> Result<()>866 pub(crate) async fn erase_gpt(
867     disk: &mut Disk<impl BlockIo, impl DerefMut<Target = [u8]>>,
868     gpt: &mut Gpt<impl DerefMut<Target = [u8]>>,
869 ) -> Result<()> {
870     match disk.sync_gpt(gpt).await?.res() {
871         Err(_) => Ok(()), // No valid GPT. Nothing to erase.
872         _ => {
873             let blk_sz = disk.block_info().block_size;
874             let mut load = LoadBufferRef::from(&mut gpt.buffer[..]);
875             let entries_size = SafeNum::from(load.primary_header.entries_count) * GPT_ENTRY_SIZE;
876             let scratch = load.primary_entries.as_bytes_mut();
877             // Invalidate GPT first.
878             load.block_size.0 = None;
879             // Erases primary header/entries.
880             let header = load.primary_header.current;
881             let entries = load.primary_header.entries;
882             disk.fill(header * blk_sz, blk_sz, 0, scratch).await?;
883             disk.fill(entries * blk_sz, entries_size.try_into().unwrap(), 0, scratch).await?;
884             // Erases secondary header/entries.
885             let header = load.secondary_header.current;
886             let entries = load.secondary_header.entries;
887             disk.fill(header * blk_sz, blk_sz, 0, scratch).await?;
888             disk.fill(entries * blk_sz, entries_size.try_into().unwrap(), 0, scratch).await?;
889             Ok(())
890         }
891     }
892 }
893 
894 /// Computes the minimum blocks needed for creating a GPT.
min_required_blocks(block_size: u64) -> Result<u64>895 fn min_required_blocks(block_size: u64) -> Result<u64> {
896     // MBR + primary/secondary GPT header block + primary/secondary entries blocks.
897     Ok(1 + (1 + gpt_entries_blk(block_size)?) * 2)
898 }
899 
900 /// `GptBuilder` provides API for modifying/creating GPT partition table on a disk.
901 pub struct GptBuilder<D, G> {
902     disk: D,
903     gpt: G,
904 }
905 
906 impl<D: Debug, G: Debug> Debug for GptBuilder<D, G> {
fmt(&self, f: &mut Formatter<'_>) -> core::result::Result<(), core::fmt::Error>907     fn fmt(&self, f: &mut Formatter<'_>) -> core::result::Result<(), core::fmt::Error> {
908         write!(f, "GptBuilder {{ disk: {:?}, gpt: {:?} }}", self.disk, self.gpt)
909     }
910 }
911 // Generic parameters:
912 //
913 // * T: The type that implement BlockIo.
914 // * S: The type for the scratch buffer in `Self::disk`.
915 // * B: The type for the GPT buffer in `Self::gpt`.
916 // * D: The type for `Self::disk` which can dereference to a Disk<T, S>.
917 // * G: The type for `Self::gpt` which can dereference to a Gpt<B>.
918 impl<'a, T, S, B, D, G> GptBuilder<D, G>
919 where
920     T: BlockIo,
921     S: DerefMut<Target = [u8]>,
922     B: DerefMut<Target = [u8]>,
923     D: DerefMut<Target = Disk<T, S>>,
924     G: DerefMut<Target = Gpt<B>>,
925 {
926     /// Creates a new instance.
927     ///
928     /// The method always re-syncs the GPT. If `disk` does not contain a valid GPT, a new GPT is
929     /// started from scratch.
930     ///
931     /// The partition entries will always be sorted when writing back to disk by `Self::persist()`.
932     ///
933     /// # Returns
934     ///
935     /// * Returns Ok((Self, true)) if an instance is created and the disk has a valid GPT.
936     /// * Returns Ok((Self, false)) if an instance is created but disk does not have a valid GPT.
937     /// * Returns Err() otherwise.
new(mut disk: D, mut gpt: G) -> Result<(Self, bool)>938     pub fn new(mut disk: D, mut gpt: G) -> Result<(Self, bool)> {
939         if disk.block_info().num_blocks < min_required_blocks(disk.block_info().block_size)? {
940             return Err(Error::GptError(GptError::DiskTooSmall));
941         }
942         let has_valid_gpt = block_on(disk.sync_gpt(&mut gpt))?.res().is_ok();
943         // Uses the buffer for secondary GPT header/entries as construction buffer, as it is not
944         // used by Gpt once loaded and synced.
945         let (mut header, mut entries) = LoadBufferRef::from(&mut gpt.buffer[..]).secondary();
946         if !has_valid_gpt {
947             header.as_bytes_mut().fill(0);
948             entries.as_bytes_mut().fill(0);
949             let entries_blk = gpt_entries_blk(disk.block_info().block_size).unwrap();
950             // Initializes a secondary header.
951             let num_blks = SafeNum::from(disk.block_info().num_blocks);
952             header.magic = GPT_MAGIC;
953             header.current = (num_blks - 1).try_into().unwrap();
954             header.backup = 1;
955             header.size = size_of::<GptHeader>().try_into().unwrap();
956             header.first = 1 + 1 + entries_blk; // MBR + GPT header blocks + entries block
957             header.last = (num_blks - 1 - entries_blk - 1).try_into().unwrap();
958             header.entries = (num_blks - 1 - entries_blk).try_into().unwrap();
959             header.entries_count = 0;
960             header.entries_size = size_of::<GptEntry>().try_into().unwrap();
961         }
962         // Normalizes `entries_count` to actual valid entries. Some GPT disk fixes `entry_count` to
963         // 128.
964         header.entries_count =
965             entries.iter().position(|v| v.is_null()).unwrap_or(entries.len()).try_into().unwrap();
966         entries.sort_unstable_by_key(|v| match v.is_null() {
967             true => u64::MAX,
968             _ => v.first,
969         });
970         Ok((Self { disk, gpt }, has_valid_gpt))
971     }
972 
973     /// Removes a partition.
974     ///
975     /// # Returns
976     ///
977     /// * Returns Ok(true) if found and removed.
978     /// * Returns Ok(false) if not found.
979     /// * Returns Err() otherwise.
remove(&mut self, part: &str) -> Result<bool>980     pub fn remove(&mut self, part: &str) -> Result<bool> {
981         let (mut header, mut entries) = LoadBufferRef::from(&mut self.gpt.buffer[..]).secondary();
982         let entries = &mut entries[..header.entries_count.try_into().unwrap()];
983         match entries.iter().position(|v| v.match_name(part).unwrap_or(false)) {
984             Some(n) => {
985                 // Shift the elements behind forward.
986                 entries[n..].rotate_left(1);
987                 // Zeroizes the last element.
988                 entries.last_mut().unwrap().as_bytes_mut().fill(0);
989                 header.entries_count -= 1;
990                 Ok(true)
991             }
992             _ => Ok(false),
993         }
994     }
995 
996     /// Inserts a new partition before a partition.
997     ///
998     /// # Args
999     ///
1000     /// * `idx`: Index of the partition to insert before. If index is out of range of valid entries,
1001     ///   the partition will be inserted at the last.
1002     /// * `name`: Name of the partition.
1003     /// * `part_type`: Type GUID.
1004     /// * `unique_guid`: Unique GUID.
1005     /// * `flags`: Partition flag.
1006     /// * `size`: If Some(_), specifies the size in number of bytes for the partition. The method
1007     ///   will round it up to multiple of disk block size and check that there is enough space for
1008     ///   the partition. If None, the method will insert the partition and consumes all the
1009     ///   available space in between.
insert_before( &mut self, idx: usize, name: &str, part_type: [u8; GPT_GUID_LEN], unique_guid: [u8; GPT_GUID_LEN], flags: u64, size: Option<u64>, ) -> Result<()>1010     fn insert_before(
1011         &mut self,
1012         idx: usize,
1013         name: &str,
1014         part_type: [u8; GPT_GUID_LEN],
1015         unique_guid: [u8; GPT_GUID_LEN],
1016         flags: u64,
1017         size: Option<u64>,
1018     ) -> Result<()> {
1019         let (mut header, mut entries) = LoadBufferRef::from(&mut self.gpt.buffer[..]).secondary();
1020         // Gets position to the first NULL entry.
1021         let n = entries.iter().position(|v| v.is_null()).ok_or(Error::OutOfResources)?;
1022         let entries = &mut entries[..n + 1];
1023         // Caps `idx` to no more than the first NULL entry.
1024         let idx = min(n, idx);
1025         // Comptues the ending block index (non-inclusive) of the previous partition entry.
1026         // Entries are guaranteed sorted in `Self::new()`.
1027         let prev_end = match idx {
1028             0 => header.first,
1029             _ => entries[idx - 1].last + 1,
1030         };
1031         // Comptues the starting block index (inclusive) of the next partition entry.
1032         let next_start = match idx == n {
1033             true => header.last + 1,
1034             _ => entries[idx].first,
1035         };
1036         // Computes the size in number of blocks
1037         let blk_sz = self.disk.block_info().block_size;
1038         let blocks: u64 = match size {
1039             Some(v) => (SafeNum::from(v).round_up(blk_sz) / blk_sz).try_into()?,
1040             _ => next_start - prev_end, // If not given, uses up all the gap space
1041         };
1042         // Checks if there is enough space.
1043         if next_start - prev_end < blocks {
1044             return Err(Error::OutOfResources);
1045         }
1046         // Inserts the new entry.
1047         entries[idx..].rotate_right(1);
1048         let entry = &mut entries[idx];
1049         assert!(entry.is_null());
1050         entry.part_type = part_type;
1051         entry.guid = unique_guid;
1052         entry.flags = flags;
1053         entry.first = prev_end;
1054         entry.last = prev_end + blocks - 1;
1055         for (idx, ele) in name.encode_utf16().enumerate() {
1056             match idx < GPT_NAME_LEN_U16 {
1057                 true => entry.name[idx] = ele,
1058                 _ => break,
1059             }
1060         }
1061         header.entries_count += 1;
1062         Ok(())
1063     }
1064 
1065     /// Adds a partition.
1066     ///
1067     /// # Args
1068     ///
1069     /// * `name`: Name of the partition.
1070     /// * `part_type`: Type GUID.
1071     /// * `unique_guid`: Unique GUID.
1072     /// * `flags`: Partition flag.
1073     /// * `size`: If Some(_), specifies the size in number of bytes for the partition. The method
1074     ///   will round it up to multiple of disk block size and search for the first large enough
1075     ///   space in the unused spae for putting the partition. If None, the method will add the
1076     ///   partition at the last and have it consume all remaining usable disk space.
add( &mut self, name: &str, part_type: [u8; GPT_GUID_LEN], unique_guid: [u8; GPT_GUID_LEN], flags: u64, size: Option<u64>, ) -> Result<()>1077     pub fn add(
1078         &mut self,
1079         name: &str,
1080         part_type: [u8; GPT_GUID_LEN],
1081         unique_guid: [u8; GPT_GUID_LEN],
1082         flags: u64,
1083         size: Option<u64>,
1084     ) -> Result<()> {
1085         let (header, _) = LoadBufferRef::from(&mut self.gpt.buffer[..]).secondary();
1086         let entry_count = usize::try_from(header.entries_count).unwrap();
1087         let search_start = size.is_some().then_some(0).unwrap_or(entry_count);
1088         for i in search_start..entry_count + 1 {
1089             if self.insert_before(i, name, part_type, unique_guid, flags, size).is_ok() {
1090                 return Ok(());
1091             }
1092         }
1093         Err(Error::OutOfResources)
1094     }
1095 
1096     /// Persists the constructed GPT table to the disk and syncs. The builder is consumed.
persist(mut self) -> Result<()>1097     pub async fn persist(mut self) -> Result<()> {
1098         let (mut header, mut entries) = LoadBufferRef::from(&mut self.gpt.buffer[..]).secondary();
1099         header.update_entries_crc(entries.as_bytes());
1100         // Check validity. Should not fail if implementation is correct.
1101         check_header(self.disk.io(), &header, false).unwrap();
1102         check_entries(&header, entries.as_bytes()).unwrap();
1103         let blk_sz = self.disk.block_info().block_size;
1104         // Writes to secondary header/ entries
1105         self.disk.write(header.current * blk_sz, header.as_bytes_mut()).await?;
1106         self.disk.write(header.entries * blk_sz, entries.as_bytes_mut()).await?;
1107         // Clears primary header magic
1108         self.disk.write(blk_sz, &mut 0u64.to_be_bytes()).await?;
1109         // Re-syncs GPT
1110         self.disk.sync_gpt(&mut self.gpt).await?.res()
1111     }
1112 }
1113 
1114 /// Helper for calculcating the Crc32.
crc32(data: &[u8]) -> u321115 fn crc32(data: &[u8]) -> u32 {
1116     let mut hasher = Hasher::new();
1117     hasher.update(data);
1118     hasher.finalize()
1119 }
1120 
1121 #[cfg(test)]
1122 pub(crate) mod test {
1123     use super::*;
1124     use crate::test::TestDisk;
1125     use gbl_async::block_on;
1126 
1127     /// A helper for creating a [TestDisk] from given data.
test_disk(data: impl AsRef<[u8]>) -> TestDisk1128     fn test_disk(data: impl AsRef<[u8]>) -> TestDisk {
1129         // All tests cases use pre-generated GPT disk of 512 block size.
1130         TestDisk::new_ram_alloc(512, 512, data.as_ref().to_vec()).unwrap()
1131     }
1132 
1133     /// A helper for creating a [TestDisk] from given data and a [Gpt] for 128 entries.
test_disk_and_gpt(data: impl AsRef<[u8]>) -> (TestDisk, GptMax)1134     fn test_disk_and_gpt(data: impl AsRef<[u8]>) -> (TestDisk, GptMax) {
1135         (test_disk(data), new_gpt_max())
1136     }
1137 
1138     #[test]
test_load_and_sync()1139     fn test_load_and_sync() {
1140         let (mut dev, mut gpt) = test_disk_and_gpt(include_bytes!("../test/gpt_test_1.bin"));
1141         block_on(dev.sync_gpt(&mut gpt)).unwrap();
1142 
1143         assert_eq!(gpt.partition_iter().unwrap().count(), 2);
1144         gpt.find_partition("boot_a").unwrap();
1145         gpt.find_partition("boot_b").unwrap();
1146         assert!(gpt.find_partition("boot_c").is_err());
1147 
1148         // Creating a new [Gpt] using the same buffer should reset the valid state.
1149         let gpt = Gpt::new(gpt.buffer).unwrap();
1150         assert!(gpt.partition_iter().is_err());
1151         assert!(gpt.find_partition("boot_a").is_err());
1152         assert!(gpt.find_partition("boot_b").is_err());
1153     }
1154 
1155     #[test]
test_load_with_unaligned_buffer()1156     fn test_load_with_unaligned_buffer() {
1157         #[repr(align(8))]
1158         struct AlignedBuffer([u8; 34 * 1024]);
1159         let mut buffer = AlignedBuffer([0u8; 34 * 1024]);
1160         let buffer = &mut buffer.0[1..];
1161         assert_ne!(buffer.as_ptr() as usize % 2, 0);
1162         let mut disk = test_disk(include_bytes!("../test/gpt_test_1.bin"));
1163         let mut gpt = Gpt::new(buffer).unwrap();
1164         block_on(disk.sync_gpt(&mut gpt)).unwrap();
1165     }
1166 
1167     #[test]
test_gpt_buffer_too_small()1168     fn test_gpt_buffer_too_small() {
1169         assert!(Gpt::new(vec![0u8; size_of::<GptLoadBufferN<0>>() - 1]).is_err());
1170     }
1171 
1172     #[test]
test_gpt_buffer_not_enough_for_all_entries()1173     fn test_gpt_buffer_not_enough_for_all_entries() {
1174         let mut dev = test_disk(include_bytes!("../test/gpt_test_1.bin"));
1175         let mut gpt = new_gpt_n::<127>();
1176         assert_eq!(gpt.max_entries(), 127);
1177         // Actual entries_count is 128 in the GPT.
1178         assert!(block_on(dev.sync_gpt(&mut gpt)).unwrap().res().is_err());
1179     }
1180 
1181     #[test]
test_good_gpt_no_repair_write()1182     fn test_good_gpt_no_repair_write() {
1183         let (mut dev, mut gpt) = test_disk_and_gpt(include_bytes!("../test/gpt_test_1.bin"));
1184         assert_eq!(block_on(dev.sync_gpt(&mut gpt)).unwrap(), GptSyncResult::BothValid);
1185     }
1186 
1187     /// A helper for testing restoration of invalid primary/secondary header modified by caller.
test_gpt_sync_restore<'a>( modify_primary: impl FnOnce(&mut GptHeader, Ref<&mut [u8], [GptEntry]>), modify_secondary: impl FnOnce(&mut GptHeader, Ref<&mut [u8], [GptEntry]>), expect_primary_err: Error, expect_secondary_err: Error, )1188     fn test_gpt_sync_restore<'a>(
1189         modify_primary: impl FnOnce(&mut GptHeader, Ref<&mut [u8], [GptEntry]>),
1190         modify_secondary: impl FnOnce(&mut GptHeader, Ref<&mut [u8], [GptEntry]>),
1191         expect_primary_err: Error,
1192         expect_secondary_err: Error,
1193     ) {
1194         let disk_orig = include_bytes!("../test/gpt_test_1.bin");
1195 
1196         // Restores from secondary to primary.
1197         let mut disk = disk_orig.to_vec();
1198         let (header, entries) = (&mut disk[512..]).split_at_mut(512);
1199         let mut header = GptHeader::from_bytes_mut(header);
1200         modify_primary(&mut header, Ref::<_, [GptEntry]>::new_slice(entries).unwrap());
1201         let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1202         assert_ne!(dev.io().storage(), disk_orig);
1203         let sync_res = block_on(dev.sync_gpt(&mut gpt)).unwrap();
1204         assert_eq!(sync_res, GptSyncResult::PrimaryRestored(expect_primary_err));
1205         assert_eq!(dev.io().storage(), disk_orig);
1206 
1207         // Restores from primary to secondary.
1208         let mut disk = disk_orig.to_vec();
1209         let (entries, header) = (&mut disk[512..]).split_last_chunk_mut::<512>().unwrap();
1210         let (_, entries) = entries.split_last_chunk_mut::<{ 512 * 32 }>().unwrap();
1211         let mut header = GptHeader::from_bytes_mut(&mut header[..]);
1212         modify_secondary(&mut header, Ref::<_, [GptEntry]>::new_slice(&mut entries[..]).unwrap());
1213         let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1214         assert_ne!(dev.io().storage(), disk_orig);
1215         let sync_res = block_on(dev.sync_gpt(&mut gpt)).unwrap();
1216         assert_eq!(sync_res, GptSyncResult::SecondaryRestored(expect_secondary_err));
1217         assert_eq!(dev.io().storage(), disk_orig);
1218     }
1219 
1220     #[test]
test_sync_gpt_incorrect_magic()1221     fn test_sync_gpt_incorrect_magic() {
1222         fn modify(hdr: &mut GptHeader, _: Ref<&mut [u8], [GptEntry]>) {
1223             hdr.magic = 0x123456;
1224             hdr.update_crc();
1225         }
1226         let err = Error::GptError(GptError::IncorrectMagic(0x123456));
1227         test_gpt_sync_restore(modify, modify, err, err);
1228     }
1229 
1230     #[test]
test_sync_gpt_incorrect_crc()1231     fn test_sync_gpt_incorrect_crc() {
1232         fn modify(hdr: &mut GptHeader, _: Ref<&mut [u8], [GptEntry]>) {
1233             hdr.crc32 = !hdr.crc32;
1234         }
1235         let err = Error::GptError(GptError::IncorrectHeaderCrc);
1236         test_gpt_sync_restore(modify, modify, err, err);
1237     }
1238 
1239     #[test]
test_sync_gpt_unexpected_header_size()1240     fn test_sync_gpt_unexpected_header_size() {
1241         fn modify(hdr: &mut GptHeader, _: Ref<&mut [u8], [GptEntry]>) {
1242             hdr.size += 1;
1243             hdr.update_crc();
1244         }
1245         let err = Error::GptError(GptError::UnexpectedHeaderSize { actual: 93, expect: 92 });
1246         test_gpt_sync_restore(modify, modify, err, err);
1247     }
1248 
1249     #[test]
test_sync_gpt_unexpected_entry_size()1250     fn test_sync_gpt_unexpected_entry_size() {
1251         fn modify(hdr: &mut GptHeader, _: Ref<&mut [u8], [GptEntry]>) {
1252             hdr.entries_size += 1;
1253             hdr.update_crc();
1254         }
1255         let err = Error::GptError(GptError::UnexpectedEntrySize { actual: 129, expect: 128 });
1256         test_gpt_sync_restore(modify, modify, err, err);
1257     }
1258 
1259     #[test]
test_sync_gpt_first_usable_gt_last()1260     fn test_sync_gpt_first_usable_gt_last() {
1261         fn modify(hdr: &mut GptHeader, _: Ref<&mut [u8], [GptEntry]>) {
1262             hdr.first = hdr.last;
1263             hdr.last = hdr.first - 2;
1264             hdr.update_crc();
1265         }
1266         let err = Error::GptError(GptError::InvalidFirstLastUsableBlock {
1267             first: 94,
1268             last: 92,
1269             range: (34, 94),
1270         });
1271         test_gpt_sync_restore(modify, modify, err, err);
1272     }
1273 
1274     #[test]
test_sync_gpt_first_usable_out_of_range()1275     fn test_sync_gpt_first_usable_out_of_range() {
1276         fn modify(hdr: &mut GptHeader, _: Ref<&mut [u8], [GptEntry]>) {
1277             hdr.first = 33;
1278             hdr.update_crc();
1279         }
1280         let err = Error::GptError(GptError::InvalidFirstLastUsableBlock {
1281             first: 33,
1282             last: 94,
1283             range: (34, 94),
1284         });
1285         test_gpt_sync_restore(modify, modify, err, err);
1286     }
1287 
1288     #[test]
test_sync_gpt_last_usable_out_of_range()1289     fn test_sync_gpt_last_usable_out_of_range() {
1290         fn modify(hdr: &mut GptHeader, _: Ref<&mut [u8], [GptEntry]>) {
1291             hdr.last += 1;
1292             hdr.update_crc();
1293         }
1294         let err = Error::GptError(GptError::InvalidFirstLastUsableBlock {
1295             first: 34,
1296             last: 95,
1297             range: (34, 94),
1298         });
1299         test_gpt_sync_restore(modify, modify, err, err);
1300     }
1301 
1302     #[test]
test_sync_gpt_primary_entries_out_of_range()1303     fn test_sync_gpt_primary_entries_out_of_range() {
1304         test_gpt_sync_restore(
1305             |hdr, _| {
1306                 hdr.entries = 1;
1307                 hdr.update_crc();
1308             },
1309             |hdr, _| {
1310                 hdr.entries = hdr.last;
1311                 hdr.update_crc();
1312             },
1313             Error::GptError(GptError::InvalidPrimaryEntriesStart {
1314                 value: 1,
1315                 expect_range: (2, 2),
1316             }),
1317             Error::GptError(GptError::InvalidSecondaryEntriesStart {
1318                 value: 94,
1319                 expect_range: (95, 95),
1320             }),
1321         );
1322     }
1323 
1324     #[test]
test_sync_gpt_incorrect_entry_crc()1325     fn test_sync_gpt_incorrect_entry_crc() {
1326         fn modify(hdr: &mut GptHeader, _: Ref<&mut [u8], [GptEntry]>) {
1327             hdr.entries_crc = !hdr.entries_crc;
1328             hdr.update_crc();
1329         }
1330         let err = Error::GptError(GptError::IncorrectEntriesCrc);
1331         test_gpt_sync_restore(modify, modify, err, err);
1332     }
1333 
1334     #[test]
test_sync_gpt_partition_range_overflow()1335     fn test_sync_gpt_partition_range_overflow() {
1336         fn modify(hdr: &mut GptHeader, mut entries: Ref<&mut [u8], [GptEntry]>) {
1337             entries[1].last = hdr.last + 1;
1338             hdr.update_entries_crc(entries.as_bytes());
1339         }
1340         let err = Error::GptError(GptError::InvalidPartitionRange {
1341             idx: 2,
1342             part_range: (50, 95),
1343             usable_range: (34, 94),
1344         });
1345         test_gpt_sync_restore(modify, modify, err, err);
1346     }
1347 
1348     #[test]
test_sync_gpt_invalid_partition_range()1349     fn test_sync_gpt_invalid_partition_range() {
1350         fn modify(hdr: &mut GptHeader, mut entries: Ref<&mut [u8], [GptEntry]>) {
1351             entries[1].first = entries[1].last;
1352             entries[1].last = entries[1].first - 2;
1353             hdr.update_entries_crc(entries.as_bytes());
1354         }
1355         let err = Error::GptError(GptError::InvalidPartitionRange {
1356             idx: 2,
1357             part_range: (73, 71),
1358             usable_range: (34, 94),
1359         });
1360         test_gpt_sync_restore(modify, modify, err, err);
1361     }
1362 
1363     #[test]
test_sync_gpt_partition_overlap()1364     fn test_sync_gpt_partition_overlap() {
1365         fn modify(hdr: &mut GptHeader, mut entries: Ref<&mut [u8], [GptEntry]>) {
1366             entries[0].last = entries[1].first;
1367             entries.swap(0, 1);
1368             hdr.update_entries_crc(entries.as_bytes());
1369         }
1370         let err = Error::GptError(GptError::PartitionRangeOverlap {
1371             prev: (2, 34, 50),
1372             next: (1, 50, 73),
1373         });
1374         test_gpt_sync_restore(modify, modify, err, err);
1375     }
1376 
1377     #[test]
test_sync_gpt_zero_partition_type_guid()1378     fn test_sync_gpt_zero_partition_type_guid() {
1379         fn modify(hdr: &mut GptHeader, mut entries: Ref<&mut [u8], [GptEntry]>) {
1380             entries[1].part_type = [0u8; GPT_GUID_LEN];
1381             hdr.update_entries_crc(entries.as_bytes());
1382         }
1383         let err = Error::GptError(GptError::ZeroPartitionTypeGUID { idx: 2 });
1384         test_gpt_sync_restore(modify, modify, err, err);
1385     }
1386 
1387     #[test]
test_sync_gpt_zero_partition_unique_guid()1388     fn test_sync_gpt_zero_partition_unique_guid() {
1389         fn modify(hdr: &mut GptHeader, mut entries: Ref<&mut [u8], [GptEntry]>) {
1390             entries[1].guid = [0u8; GPT_GUID_LEN];
1391             hdr.update_entries_crc(entries.as_bytes());
1392         }
1393         let err = Error::GptError(GptError::ZeroPartitionUniqueGUID { idx: 2 });
1394         test_gpt_sync_restore(modify, modify, err, err);
1395     }
1396 
1397     #[test]
test_load_gpt_disk_primary_override_secondary()1398     fn test_load_gpt_disk_primary_override_secondary() {
1399         let mut disk = include_bytes!("../test/gpt_test_1.bin").to_vec();
1400         // Modifies secondary header.
1401         let secondary_hdr = GptHeader::from_bytes_mut(disk.last_chunk_mut::<512>().unwrap());
1402         secondary_hdr.revision = !secondary_hdr.revision;
1403         secondary_hdr.update_crc();
1404         let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1405         assert_eq!(
1406             block_on(dev.sync_gpt(&mut gpt)).unwrap(),
1407             GptSyncResult::SecondaryRestored(Error::GptError(GptError::DifferentFromPrimary)),
1408         );
1409     }
1410 
1411     #[test]
test_load_gpt_disk_too_small()1412     fn test_load_gpt_disk_too_small() {
1413         let disk_orig = include_bytes!("../test/gpt_test_1.bin");
1414         let mut disk = disk_orig.to_vec();
1415         // Resizes so that it's not enough to hold a full 128 maximum entries.
1416         // MBR + (header + entries) * 2 - 1
1417         disk.resize((1 + (32 + 1) * 2 - 1) * 512, 0);
1418         let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1419         let sync_res = block_on(dev.sync_gpt(&mut gpt)).unwrap();
1420         let err = Error::GptError(GptError::DiskTooSmall);
1421         assert_eq!(sync_res, GptSyncResult::NoValidGpt { primary: err, secondary: err });
1422     }
1423 
1424     #[test]
test_uninitialized_gpt()1425     fn test_uninitialized_gpt() {
1426         let disk = include_bytes!("../test/gpt_test_1.bin");
1427         // Load a good GPT first.
1428         let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1429         assert_eq!(block_on(dev.sync_gpt(&mut gpt)).unwrap(), GptSyncResult::BothValid);
1430         gpt.find_partition("boot_a").unwrap();
1431         // Corrupt GPT.
1432         block_on(dev.write(0, &mut vec![0u8; disk.len()])).unwrap();
1433         assert!(block_on(dev.sync_gpt(&mut gpt)).unwrap().res().is_err());
1434         assert!(gpt.find_partition("").is_err());
1435     }
1436 
1437     #[test]
test_update_gpt()1438     fn test_update_gpt() {
1439         let disk_orig = include_bytes!("../test/gpt_test_1.bin");
1440         let mut disk = disk_orig.to_vec();
1441         // Erases all GPT headers.
1442         disk[512..][..512].fill(0);
1443         disk.last_chunk_mut::<512>().unwrap().fill(0);
1444 
1445         let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1446 
1447         assert_ne!(dev.io().storage(), disk_orig);
1448         let mut mbr_primary = disk_orig[..34 * 512].to_vec();
1449         block_on(dev.update_gpt(&mut mbr_primary, false, &mut gpt)).unwrap();
1450         assert_eq!(dev.io().storage(), disk_orig);
1451     }
1452 
1453     #[test]
test_update_gpt_has_existing_valid_secondary()1454     fn test_update_gpt_has_existing_valid_secondary() {
1455         let disk_orig = include_bytes!("../test/gpt_test_1.bin");
1456         let mut disk = disk_orig.to_vec();
1457         // Erases all GPT headers.
1458         disk[512..][..512].fill(0);
1459         // Leaves a valid but different secondary GPT.
1460         let secondary_hdr = GptHeader::from_bytes_mut(disk.last_chunk_mut::<512>().unwrap());
1461         secondary_hdr.revision = !secondary_hdr.revision;
1462         secondary_hdr.update_crc();
1463 
1464         let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1465 
1466         assert_ne!(dev.io().storage(), disk_orig);
1467         let mut mbr_primary = disk_orig[..34 * 512].to_vec();
1468         block_on(dev.update_gpt(&mut mbr_primary, false, &mut gpt)).unwrap();
1469         assert_eq!(dev.io().storage(), disk_orig);
1470     }
1471 
1472     #[test]
test_update_gpt_last_usable_adjusted()1473     fn test_update_gpt_last_usable_adjusted() {
1474         let disk_orig = include_bytes!("../test/gpt_test_1.bin");
1475         let mut disk = disk_orig.to_vec();
1476         // Erases all GPT headers.
1477         disk[512..][..512].fill(0);
1478         disk.last_chunk_mut::<512>().unwrap().fill(0);
1479         // Doubles the disk size.
1480         disk.resize(disk_orig.len() * 2, 0);
1481 
1482         let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1483 
1484         assert_ne!(dev.io().storage, disk_orig);
1485         let mut mbr_primary = disk_orig[..34 * 512].to_vec();
1486         block_on(dev.update_gpt(&mut mbr_primary, true, &mut gpt)).unwrap();
1487         let expected_last = (disk.len() - GPT_MAX_NUM_ENTRIES_SIZE - 512) / 512 - 1;
1488 
1489         let (primary, secondary) = dev.io().storage().split_last_chunk_mut::<512>().unwrap();
1490         let primary_hdr = GptHeader::from_bytes_mut(&mut primary[512..]);
1491         let secondary_hdr = GptHeader::from_bytes_mut(secondary);
1492         // Header's last usable block is updated.
1493         assert_eq!({ primary_hdr.last }, expected_last.try_into().unwrap());
1494         assert_eq!({ primary_hdr.backup }, (disk.len() / 512 - 1).try_into().unwrap());
1495         assert_eq!({ secondary_hdr.last }, expected_last.try_into().unwrap());
1496     }
1497 
1498     #[test]
test_update_gpt_resize()1499     fn test_update_gpt_resize() {
1500         let disk_orig = include_bytes!("../test/gpt_test_1.bin");
1501         let mut disk = disk_orig.to_vec();
1502         // Erases all GPT headers.
1503         disk[512..][..512].fill(0);
1504         disk.last_chunk_mut::<512>().unwrap().fill(0);
1505         // Doubles the disk size.
1506         disk.resize(disk_orig.len() * 2, 0);
1507 
1508         let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1509 
1510         assert_ne!(dev.io().storage, disk_orig);
1511         let mut mbr_primary = disk_orig[..34 * 512].to_vec();
1512         block_on(dev.update_gpt(&mut mbr_primary, true, &mut gpt)).unwrap();
1513         // Last entry is extended.
1514         let expected_last = (disk.len() - GPT_MAX_NUM_ENTRIES_SIZE - 512) / 512 - 1;
1515         assert_eq!({ gpt.entries().unwrap()[1].last }, expected_last.try_into().unwrap());
1516     }
1517 
1518     #[test]
test_update_gpt_new_partition_out_of_range()1519     fn test_update_gpt_new_partition_out_of_range() {
1520         // `gpt_test_1.bin` has a 8k "boot_a" and a 12k "boot_b". Thus partitions space is 40
1521         // blocks (512 bytes block size) and in total the GPT disk needs (40 + 1 + (33) * 2) = 107
1522         // blocks.
1523         let (mut dev, mut gpt) = test_disk_and_gpt(&vec![0u8; 106 * 512]);
1524         let mut mbr_primary = include_bytes!("../test/gpt_test_1.bin")[..34 * 512].to_vec();
1525         assert!(block_on(dev.update_gpt(&mut mbr_primary, true, &mut gpt)).is_err());
1526     }
1527 
1528     #[test]
test_update_gpt_buffer_truncated()1529     fn test_update_gpt_buffer_truncated() {
1530         let mut disk = include_bytes!("../test/gpt_test_1.bin").to_vec();
1531         let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1532 
1533         // Less than 1 MBR block.
1534         assert_eq!(
1535             block_on(dev.update_gpt(&mut disk[..511], false, &mut gpt)),
1536             Err(Error::BufferTooSmall(Some(1024)))
1537         );
1538 
1539         // Less than MBR + GPT header.
1540         assert_eq!(
1541             block_on(dev.update_gpt(&mut disk[..1023], false, &mut gpt)),
1542             Err(Error::BufferTooSmall(Some(1024)))
1543         );
1544 
1545         // Less than MBR + GPT header + entries.
1546         assert_eq!(
1547             block_on(dev.update_gpt(&mut disk[..34 * 512 - 1], false, &mut gpt)),
1548             Err(Error::BufferTooSmall(Some(34 * 512)))
1549         );
1550     }
1551 
1552     #[test]
test_update_gpt_check_header_fail()1553     fn test_update_gpt_check_header_fail() {
1554         let disk = include_bytes!("../test/gpt_test_1.bin");
1555         let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1556         let mut mbr_primary = disk[..34 * 512].to_vec();
1557         // Corrupts the first byte of the GPT header.
1558         mbr_primary[512] = !mbr_primary[512];
1559         assert_eq!(
1560             block_on(dev.update_gpt(&mut mbr_primary, false, &mut gpt)),
1561             Err(Error::GptError(GptError::IncorrectMagic(0x54524150204946BA)))
1562         );
1563     }
1564 
1565     #[test]
test_update_gpt_check_entries_fail()1566     fn test_update_gpt_check_entries_fail() {
1567         let disk = include_bytes!("../test/gpt_test_1.bin");
1568         let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1569         let mut mbr_primary = disk[..34 * 512].to_vec();
1570         // Corrupts the first byte of the entries.
1571         mbr_primary[1024] = !mbr_primary[1024];
1572         assert_eq!(
1573             block_on(dev.update_gpt(&mut mbr_primary, false, &mut gpt)),
1574             Err(Error::GptError(GptError::IncorrectEntriesCrc))
1575         );
1576     }
1577 
1578     #[test]
test_erase_gpt_no_gpt()1579     fn test_erase_gpt_no_gpt() {
1580         let (mut dev, mut gpt) = test_disk_and_gpt(&[0u8; 1024 * 1024]);
1581         block_on(dev.erase_gpt(&mut gpt)).unwrap();
1582     }
1583 
1584     #[test]
test_erase_gpt()1585     fn test_erase_gpt() {
1586         let (mut dev, mut gpt) = test_disk_and_gpt(include_bytes!("../test/gpt_test_1.bin"));
1587         block_on(dev.erase_gpt(&mut gpt)).unwrap();
1588         const GPT_SECTOR: usize = 33 * 512;
1589         assert_eq!(dev.io().storage[512..][..GPT_SECTOR], vec![0u8; GPT_SECTOR]);
1590         assert_eq!(*dev.io().storage.last_chunk::<GPT_SECTOR>().unwrap(), *vec![0u8; GPT_SECTOR]);
1591         assert!(matches!(
1592             block_on(dev.sync_gpt(&mut gpt)).unwrap(),
1593             GptSyncResult::NoValidGpt { .. }
1594         ));
1595     }
1596 
1597     #[test]
test_zero_partition_size()1598     fn test_zero_partition_size() {
1599         let disk = include_bytes!("../test/gpt_test_1.bin").to_vec();
1600         let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1601         let (mut builder, _) = GptBuilder::new(&mut dev, &mut gpt).unwrap();
1602         assert_eq!(builder.remove("boot_a"), Ok(true));
1603         assert_eq!(builder.remove("boot_b"), Ok(true));
1604         builder.add("boot_b", [1u8; GPT_GUID_LEN], [1u8; GPT_GUID_LEN], 0, Some(0)).unwrap();
1605         block_on(builder.persist()).unwrap();
1606         assert_eq!(gpt.partition_iter().unwrap().next().unwrap().size().unwrap(), 0);
1607     }
1608 
1609     #[test]
test_sync_gpt_non_sorted_entries()1610     fn test_sync_gpt_non_sorted_entries() {
1611         let mut disk = include_bytes!("../test/gpt_test_1.bin").to_vec();
1612         let (header, entries) = disk[512..].split_at_mut(512);
1613         let header = GptHeader::from_bytes_mut(header);
1614         let mut entries = Ref::<_, [GptEntry]>::new_slice(entries).unwrap();
1615         // Makes partition non-sorted.
1616         entries.swap(0, 1);
1617         header.update_entries_crc(entries.as_bytes());
1618         let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1619         block_on(dev.sync_gpt(&mut gpt)).unwrap().res().unwrap();
1620     }
1621 
1622     #[test]
test_gpt_builder_initialize_gpt_if_no_valid_gpt()1623     fn test_gpt_builder_initialize_gpt_if_no_valid_gpt() {
1624         let (mut dev, mut gpt) = test_disk_and_gpt(vec![0u8; 1024 * 1024]);
1625         let (builder, valid) = GptBuilder::new(&mut dev, &mut gpt).unwrap();
1626         assert!(!valid);
1627         block_on(builder.persist()).unwrap();
1628         // A new GPT is created.
1629         block_on(dev.sync_gpt(&mut gpt)).unwrap().res().unwrap();
1630         assert!(gpt.partition_iter().unwrap().next().is_none());
1631     }
1632 
1633     #[test]
test_gpt_builder_remove_partition()1634     fn test_gpt_builder_remove_partition() {
1635         let (mut dev, mut gpt) = test_disk_and_gpt(include_bytes!("../test/gpt_test_1.bin"));
1636         let (mut builder, valid) = GptBuilder::new(&mut dev, &mut gpt).unwrap();
1637         assert!(valid);
1638         assert_eq!(builder.remove("boot_b"), Ok(true));
1639         assert_eq!(builder.remove("non-existent"), Ok(false));
1640         block_on(builder.persist()).unwrap();
1641         block_on(dev.sync_gpt(&mut gpt)).unwrap().res().unwrap();
1642         let part_iter = gpt.partition_iter().unwrap();
1643         assert_eq!(
1644             part_iter.map(|v| v.name().unwrap().into()).collect::<Vec<String>>(),
1645             ["boot_a"]
1646         );
1647     }
1648 
1649     #[test]
test_gpt_builder_add_partition_find_first()1650     fn test_gpt_builder_add_partition_find_first() {
1651         let (mut dev, mut gpt) = test_disk_and_gpt(include_bytes!("../test/gpt_test_1.bin"));
1652         let (mut builder, _) = GptBuilder::new(&mut dev, &mut gpt).unwrap();
1653         assert!(builder.remove("boot_a").unwrap());
1654         // Adds at the beginning.
1655         builder.add("new_0", [1u8; GPT_GUID_LEN], [1u8; GPT_GUID_LEN], 0, Some(1024)).unwrap();
1656         // Adds following "new_0"
1657         builder.add("new_1", [1u8; GPT_GUID_LEN], [1u8; GPT_GUID_LEN], 0, Some(1)).unwrap();
1658         block_on(builder.persist()).unwrap();
1659         block_on(dev.sync_gpt(&mut gpt)).unwrap().res().unwrap();
1660         assert_eq!(gpt.find_partition("new_0").unwrap().absolute_range().unwrap(), (17408, 18432));
1661         assert_eq!(gpt.find_partition("new_1").unwrap().absolute_range().unwrap(), (18432, 18944));
1662         assert_eq!(gpt.find_partition("boot_b").unwrap().absolute_range().unwrap(), (25600, 37888));
1663     }
1664 
1665     #[test]
test_gpt_builder_non_sorted_add_partition()1666     fn test_gpt_builder_non_sorted_add_partition() {
1667         let mut disk = include_bytes!("../test/gpt_test_1.bin").to_vec();
1668         let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1669         let (header, entries) = disk[512..].split_at_mut(512);
1670         let header = GptHeader::from_bytes_mut(header);
1671         let mut entries = Ref::<_, [GptEntry]>::new_slice(entries).unwrap();
1672         // Makes partition non-sorted.
1673         entries.swap(0, 1);
1674         header.update_entries_crc(entries.as_bytes());
1675 
1676         let (mut builder, _) = GptBuilder::new(&mut dev, &mut gpt).unwrap();
1677         // Adds following boot_b.
1678         builder.add("new", [1u8; GPT_GUID_LEN], [1u8; GPT_GUID_LEN], 0, Some(1024)).unwrap();
1679         block_on(builder.persist()).unwrap();
1680         assert_eq!(gpt.find_partition("boot_a").unwrap().absolute_range().unwrap(), (17408, 25600));
1681         assert_eq!(gpt.find_partition("boot_b").unwrap().absolute_range().unwrap(), (25600, 37888));
1682         assert_eq!(gpt.find_partition("new").unwrap().absolute_range().unwrap(), (37888, 38912));
1683     }
1684 
1685     #[test]
test_gpt_builder_add_partition_append()1686     fn test_gpt_builder_add_partition_append() {
1687         let (mut dev, mut gpt) = test_disk_and_gpt(include_bytes!("../test/gpt_test_1.bin"));
1688         let (mut builder, _) = GptBuilder::new(&mut dev, &mut gpt).unwrap();
1689         assert!(builder.remove("boot_b").unwrap());
1690         // Adds following "boot_a".
1691         builder.add("new_0", [1u8; GPT_GUID_LEN], [1u8; GPT_GUID_LEN], 0, Some(1024)).unwrap();
1692         // Consumes the rest of the space.
1693         builder.add("new_1", [1u8; GPT_GUID_LEN], [1u8; GPT_GUID_LEN], 0, None).unwrap();
1694         block_on(builder.persist()).unwrap();
1695         block_on(dev.sync_gpt(&mut gpt)).unwrap().res().unwrap();
1696         assert_eq!(gpt.find_partition("boot_a").unwrap().absolute_range().unwrap(), (17408, 25600));
1697         assert_eq!(gpt.find_partition("new_0").unwrap().absolute_range().unwrap(), (25600, 26624));
1698         assert_eq!(gpt.find_partition("new_1").unwrap().absolute_range().unwrap(), (26624, 48640));
1699     }
1700 
1701     #[test]
test_gpt_builder_not_enough_resource()1702     fn test_gpt_builder_not_enough_resource() {
1703         // Create a Gpt that can only load 1 entry.
1704         let mut gpt = new_gpt_n::<1>();
1705         let mut dev = test_disk(vec![0u8; 64 * 1024]);
1706         let (mut builder, _) = GptBuilder::new(&mut dev, &mut gpt).unwrap();
1707         builder.add("new_0", [1u8; GPT_GUID_LEN], [1u8; GPT_GUID_LEN], 0, Some(1024)).unwrap();
1708         assert!(builder.add("new_1", [1u8; GPT_GUID_LEN], [1u8; GPT_GUID_LEN], 0, None).is_err());
1709     }
1710 }
1711