• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2023, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 //! Support for parsing GUID partition tables.
16 
17 use crate::helpers::ceiling_div;
18 use crate::virtio::pci::VirtIOBlk;
19 use core::cmp::min;
20 use core::fmt;
21 use core::mem::size_of;
22 use core::ops::RangeInclusive;
23 use core::slice;
24 use static_assertions::const_assert;
25 use static_assertions::const_assert_eq;
26 use uuid::Uuid;
27 use virtio_drivers::device::blk::SECTOR_SIZE;
28 
29 pub enum Error {
30     /// VirtIO error during read operation.
31     FailedRead(virtio_drivers::Error),
32     /// VirtIO error during write operation.
33     FailedWrite(virtio_drivers::Error),
34     /// Invalid GPT header.
35     InvalidHeader,
36     /// Invalid partition block index.
37     BlockOutsidePartition(usize),
38 }
39 
40 impl fmt::Display for Error {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result41     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
42         match self {
43             Self::FailedRead(e) => write!(f, "Failed to read from disk: {e}"),
44             Self::FailedWrite(e) => write!(f, "Failed to write to disk: {e}"),
45             Self::InvalidHeader => write!(f, "Found invalid GPT header"),
46             Self::BlockOutsidePartition(i) => write!(f, "Accessed invalid block index {i}"),
47         }
48     }
49 }
50 
51 pub type Result<T> = core::result::Result<T, Error>;
52 
53 pub struct Partition {
54     partitions: Partitions,
55     indices: RangeInclusive<usize>,
56 }
57 
58 impl Partition {
get_by_name(device: VirtIOBlk, name: &str) -> Result<Option<Self>>59     pub fn get_by_name(device: VirtIOBlk, name: &str) -> Result<Option<Self>> {
60         Partitions::new(device)?.get_partition_by_name(name)
61     }
62 
new(partitions: Partitions, entry: &Entry) -> Self63     fn new(partitions: Partitions, entry: &Entry) -> Self {
64         let first = entry.first_lba().try_into().unwrap();
65         let last = entry.last_lba().try_into().unwrap();
66 
67         Self { partitions, indices: first..=last }
68     }
69 
indices(&self) -> RangeInclusive<usize>70     pub fn indices(&self) -> RangeInclusive<usize> {
71         self.indices.clone()
72     }
73 
read_block(&mut self, index: usize, blk: &mut [u8]) -> Result<()>74     pub fn read_block(&mut self, index: usize, blk: &mut [u8]) -> Result<()> {
75         let index = self.block_index(index).ok_or(Error::BlockOutsidePartition(index))?;
76         self.partitions.read_block(index, blk)
77     }
78 
write_block(&mut self, index: usize, blk: &[u8]) -> Result<()>79     pub fn write_block(&mut self, index: usize, blk: &[u8]) -> Result<()> {
80         let index = self.block_index(index).ok_or(Error::BlockOutsidePartition(index))?;
81         self.partitions.write_block(index, blk)
82     }
83 
block_index(&self, index: usize) -> Option<usize>84     fn block_index(&self, index: usize) -> Option<usize> {
85         if self.indices.contains(&index) {
86             Some(index)
87         } else {
88             None
89         }
90     }
91 }
92 
93 pub struct Partitions {
94     device: VirtIOBlk,
95     entries_count: usize,
96 }
97 
98 impl Partitions {
99     pub const LBA_SIZE: usize = SECTOR_SIZE;
100 
new(mut device: VirtIOBlk) -> Result<Self>101     fn new(mut device: VirtIOBlk) -> Result<Self> {
102         let mut blk = [0; Self::LBA_SIZE];
103         device.read_block(Header::LBA, &mut blk).map_err(Error::FailedRead)?;
104         let (header_bytes, _) = blk.split_at(size_of::<Header>());
105         let header = Header::from_bytes(header_bytes).ok_or(Error::InvalidHeader)?;
106         let entries_count = usize::try_from(header.entries_count()).unwrap();
107 
108         Ok(Self { device, entries_count })
109     }
110 
get_partition_by_name(mut self, name: &str) -> Result<Option<Partition>>111     fn get_partition_by_name(mut self, name: &str) -> Result<Option<Partition>> {
112         const_assert_eq!(Partitions::LBA_SIZE.rem_euclid(size_of::<Entry>()), 0);
113         let entries_per_blk = Partitions::LBA_SIZE.checked_div(size_of::<Entry>()).unwrap();
114 
115         // Create a UTF-16 reference against which we'll compare partition names. Note that unlike
116         // the C99 wcslen(), this comparison will cover bytes past the first L'\0' character.
117         let mut needle = [0; Entry::NAME_SIZE / size_of::<u16>()];
118         for (dest, src) in needle.iter_mut().zip(name.encode_utf16()) {
119             *dest = src;
120         }
121 
122         let mut blk = [0; Self::LBA_SIZE];
123         let mut rem = self.entries_count;
124         let num_blocks = ceiling_div(self.entries_count, entries_per_blk).unwrap();
125         for i in Header::ENTRIES_LBA..Header::ENTRIES_LBA.checked_add(num_blocks).unwrap() {
126             self.read_block(i, &mut blk)?;
127             let entries = blk.as_ptr().cast::<Entry>();
128             // SAFETY - blk is assumed to be properly aligned for Entry and its size is assert-ed
129             // above. All potential values of the slice will produce valid Entry values.
130             let entries = unsafe { slice::from_raw_parts(entries, min(rem, entries_per_blk)) };
131             for entry in entries {
132                 let entry_name = entry.name;
133                 if entry_name == needle {
134                     return Ok(Some(Partition::new(self, entry)));
135                 }
136                 rem -= 1;
137             }
138         }
139         Ok(None)
140     }
141 
read_block(&mut self, index: usize, blk: &mut [u8]) -> Result<()>142     fn read_block(&mut self, index: usize, blk: &mut [u8]) -> Result<()> {
143         self.device.read_block(index, blk).map_err(Error::FailedRead)
144     }
145 
write_block(&mut self, index: usize, blk: &[u8]) -> Result<()>146     fn write_block(&mut self, index: usize, blk: &[u8]) -> Result<()> {
147         self.device.write_block(index, blk).map_err(Error::FailedWrite)
148     }
149 }
150 
151 type Lba = u64;
152 
153 /// Structure as defined in release 2.10 of the UEFI Specification (5.3.2 GPT Header).
154 #[repr(C, packed)]
155 struct Header {
156     signature: u64,
157     revision: u32,
158     header_size: u32,
159     header_crc32: u32,
160     reserved0: u32,
161     current_lba: Lba,
162     backup_lba: Lba,
163     first_lba: Lba,
164     last_lba: Lba,
165     disk_guid: Uuid,
166     entries_lba: Lba,
167     entries_count: u32,
168     entry_size: u32,
169     entries_crc32: u32,
170 }
171 const_assert!(size_of::<Header>() < Partitions::LBA_SIZE);
172 
173 impl Header {
174     const SIGNATURE: u64 = u64::from_le_bytes(*b"EFI PART");
175     const REVISION_1_0: u32 = 1 << 16;
176     const LBA: usize = 1;
177     const ENTRIES_LBA: usize = 2;
178 
from_bytes(bytes: &[u8]) -> Option<&Self>179     fn from_bytes(bytes: &[u8]) -> Option<&Self> {
180         let bytes = bytes.get(..size_of::<Self>())?;
181         // SAFETY - We assume that bytes is properly aligned for Header and have verified above
182         // that it holds enough bytes. All potential values of the slice will produce a valid
183         // Header.
184         let header = unsafe { &*bytes.as_ptr().cast::<Self>() };
185 
186         if header.is_valid() {
187             Some(header)
188         } else {
189             None
190         }
191     }
192 
is_valid(&self) -> bool193     fn is_valid(&self) -> bool {
194         self.signature() == Self::SIGNATURE
195             && self.header_size() == size_of::<Self>().try_into().unwrap()
196             && self.revision() == Self::REVISION_1_0
197             && self.entry_size() == size_of::<Entry>().try_into().unwrap()
198             && self.current_lba() == Self::LBA.try_into().unwrap()
199             && self.entries_lba() == Self::ENTRIES_LBA.try_into().unwrap()
200     }
201 
signature(&self) -> u64202     fn signature(&self) -> u64 {
203         u64::from_le(self.signature)
204     }
205 
entries_count(&self) -> u32206     fn entries_count(&self) -> u32 {
207         u32::from_le(self.entries_count)
208     }
209 
header_size(&self) -> u32210     fn header_size(&self) -> u32 {
211         u32::from_le(self.header_size)
212     }
213 
revision(&self) -> u32214     fn revision(&self) -> u32 {
215         u32::from_le(self.revision)
216     }
217 
entry_size(&self) -> u32218     fn entry_size(&self) -> u32 {
219         u32::from_le(self.entry_size)
220     }
221 
entries_lba(&self) -> Lba222     fn entries_lba(&self) -> Lba {
223         Lba::from_le(self.entries_lba)
224     }
225 
current_lba(&self) -> Lba226     fn current_lba(&self) -> Lba {
227         Lba::from_le(self.current_lba)
228     }
229 }
230 
231 /// Structure as defined in release 2.10 of the UEFI Specification (5.3.3 GPT Partition Entry
232 /// Array).
233 #[repr(C, packed)]
234 struct Entry {
235     type_guid: Uuid,
236     guid: Uuid,
237     first_lba: Lba,
238     last_lba: Lba,
239     flags: u64,
240     name: [u16; Entry::NAME_SIZE / size_of::<u16>()], // UTF-16
241 }
242 
243 impl Entry {
244     const NAME_SIZE: usize = 72;
245 
first_lba(&self) -> Lba246     fn first_lba(&self) -> Lba {
247         Lba::from_le(self.first_lba)
248     }
249 
last_lba(&self) -> Lba250     fn last_lba(&self) -> Lba {
251         Lba::from_le(self.last_lba)
252     }
253 }
254