1 // Copyright 2024, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 use crate::utils::efi_to_zbi_mem_range_type;
16 #[allow(unused_imports)]
17 use crate::{
18 efi_blocks::{find_block_devices, EfiGblDisk},
19 fastboot::fastboot,
20 ops::Ops,
21 utils::get_efi_mem_attr,
22 };
23 use efi::{EfiEntry, EfiMemoryAttributesTable, EfiMemoryMap};
24 use efi_types::{
25 EfiMemoryAttributesTableHeader, EfiMemoryDescriptor, EFI_MEMORY_ATTRIBUTE_EMA_RUNTIME,
26 };
27 use liberror::Error;
28 use liberror::Error::BufferTooSmall;
29 use libgbl::{
30 constants::PAGE_SIZE as PAGE_SIZE_USIZE,
31 fuchsia_boot::{zircon_check_enter_fastboot, zircon_load_verify_abr, zircon_part_name},
32 gbl_print, gbl_println,
33 ops::ImageBuffer,
34 partition::check_part_unique,
35 GblOps,
36 IntegrationError::UnificationError,
37 Result,
38 };
39 use safemath::SafeNum;
40 use zbi::{zbi_format::zbi_mem_range_t, ZbiContainer, ZbiFlags, ZbiType};
41 use zerocopy::{Ref, SplitByteSliceMut};
42
43 const PAGE_SIZE: u64 = PAGE_SIZE_USIZE as u64;
44
45 /// Check if the disk GPT layout is a Fuchsia device layout.
is_fuchsia_gpt(disks: &[EfiGblDisk]) -> Result<()>46 pub fn is_fuchsia_gpt(disks: &[EfiGblDisk]) -> Result<()> {
47 let partitions: &[&[&str]] = &[
48 &["zircon_a", "zircon-a"],
49 &["zircon_b", "zircon-b"],
50 &["zircon_r", "zircon-r"],
51 &["vbmeta_a"],
52 &["vbmeta_b"],
53 &["vbmeta_r"],
54 &["misc", "durable_boot"],
55 ];
56 if !partitions
57 .iter()
58 .all(|&partition| partition.iter().any(|v| check_part_unique(&disks[..], *v).is_ok()))
59 {
60 return Err(Error::NotFound.into());
61 }
62
63 Ok(())
64 }
65
66 /// Loads and verifies Fuchsia according to A/B/R.
67 ///
68 /// On success, returns the kernel and zbi_item buffer.
efi_fuchsia_load(ops: &mut Ops) -> Result<(ImageBuffer<'static>, ImageBuffer<'static>)>69 pub fn efi_fuchsia_load(ops: &mut Ops) -> Result<(ImageBuffer<'static>, ImageBuffer<'static>)> {
70 gbl_println!(ops, "Try booting as Fuchsia/Zircon");
71 // Checks whether to enter fastboot mode.
72 if zircon_check_enter_fastboot(ops) {
73 fastboot(ops, &mut [])?;
74 }
75 let (zbi_items_buffer, kernel_buffer, slot) = zircon_load_verify_abr(ops)?;
76 gbl_println!(ops, "Booting from slot: {}", zircon_part_name(Some(slot)));
77 Ok((kernel_buffer, zbi_items_buffer))
78 }
79
80 /// Exits boot services and boots loaded fuchsia images.
efi_fuchsia_boot( _efi_entry: EfiEntry, mut _kernel_buffer: ImageBuffer, mut _zbi_items: ImageBuffer, ) -> Result<()>81 pub fn efi_fuchsia_boot(
82 _efi_entry: EfiEntry,
83 mut _kernel_buffer: ImageBuffer,
84 mut _zbi_items: ImageBuffer,
85 ) -> Result<()> {
86 let _zbi_items = _zbi_items.used_mut();
87 #[cfg(target_arch = "aarch64")]
88 {
89 // Uses the unused buffer for `exit_boot_services` to store output memory map.
90 // The map is not used for now. We currently rely on UEFI firmware to pass memory map via
91 // an raw zbi blob in device tree. Long term we want to support adding from EFI memory maps
92 // if none is provided.
93 let item_size = zbi::ZbiContainer::parse(&mut _zbi_items[..])?.container_size()?;
94 let (_, remains) = _zbi_items.split_at_mut(item_size);
95 let _ = efi::exit_boot_services(_efi_entry, remains).unwrap();
96 // SAFETY: The kernel has passed libavb verification or device is unlocked, in which case we
97 // assume the caller has addressed all safety and security concerns.
98 unsafe { boot::aarch64::jump_zircon_el2_or_lower(_kernel_buffer.used_mut(), _zbi_items) };
99 }
100
101 #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
102 {
103 const BUFFER_SIZE: usize = 32 * 1024 / 2;
104 let mut mem_map_buf = [0u8; BUFFER_SIZE];
105 let mut zbi_items = zbi::ZbiContainer::parse(&mut _zbi_items[..])?;
106 let efi_memory_attribute_table =
107 get_efi_mem_attr(&_efi_entry).ok_or(Error::InvalidInput)?;
108
109 // `exit_boot_service` returnes EFI memory map that is used to derive and append MEM_CONFIG
110 // items.
111 let efi_memory_map = efi::exit_boot_services(_efi_entry, &mut mem_map_buf).unwrap();
112
113 add_memory_items(&efi_memory_map, &efi_memory_attribute_table, &mut zbi_items)?;
114
115 // SAFETY: The kernel has passed libavb verification or device is unlocked, in which case we
116 // assume the caller has addressed all safety and security concerns.
117 unsafe { boot::x86::zbi_boot(_kernel_buffer.used_mut(), _zbi_items) };
118 }
119
120 #[cfg(target_arch = "riscv64")]
121 {
122 unimplemented!();
123 }
124 }
125
126 // This function must not use allocation
127 #[allow(unused)]
add_memory_items<B>( efi_memory_map: &EfiMemoryMap, efi_memory_attribute_table: &EfiMemoryAttributesTable, zbi_items: &mut ZbiContainer<B>, ) -> Result<()> where B: SplitByteSliceMut + PartialEq,128 fn add_memory_items<B>(
129 efi_memory_map: &EfiMemoryMap,
130 efi_memory_attribute_table: &EfiMemoryAttributesTable,
131 zbi_items: &mut ZbiContainer<B>,
132 ) -> Result<()>
133 where
134 B: SplitByteSliceMut + PartialEq,
135 {
136 generate_efi_memory_attributes_table_item(
137 efi_memory_map,
138 efi_memory_attribute_table,
139 zbi_items,
140 )?;
141
142 generate_mem_config_item(efi_memory_map, zbi_items)?;
143
144 Ok(())
145 }
146
generate_efi_memory_attributes_table_item<'b, B>( efi_memory_map: &EfiMemoryMap<'b>, efi_memory_attribute_table: &EfiMemoryAttributesTable, zbi_items: &mut ZbiContainer<B>, ) -> Result<()> where B: SplitByteSliceMut + PartialEq,147 fn generate_efi_memory_attributes_table_item<'b, B>(
148 efi_memory_map: &EfiMemoryMap<'b>,
149 efi_memory_attribute_table: &EfiMemoryAttributesTable,
150 zbi_items: &mut ZbiContainer<B>,
151 ) -> Result<()>
152 where
153 B: SplitByteSliceMut + PartialEq,
154 {
155 let payload = zbi_items.get_next_payload()?;
156 let provided_payload_size = payload.len();
157 let (mut header, mut tail) =
158 Ref::<&mut [u8], EfiMemoryAttributesTableHeader>::new_from_prefix(payload)
159 .ok_or(Error::BadBufferSize)?;
160
161 for efi_memory_desc in efi_memory_map.into_iter() {
162 if efi_memory_desc.attributes & EFI_MEMORY_ATTRIBUTE_EMA_RUNTIME == 0 {
163 continue;
164 }
165
166 let mut base = efi_memory_desc.physical_start;
167 let mut size: u64 = (SafeNum::from(efi_memory_desc.number_of_pages) * PAGE_SIZE)
168 .try_into()
169 .map_err(Error::from)?;
170
171 // This EMAT entry is either a sub-region or a full copy of the memory map region, per
172 // EFI 2.10 4.6.4: "Additionally, every memory region described by a Descriptor in
173 // EFI_MEMORY_ATTRIBUTES_TABLE must be a sub-region of, or equal to, a descriptor in the
174 // table produced by GetMemoryMap()."
175 //
176 // This means that we do not have to consider the case where the EMAT entry only overlaps
177 // the end of the memory map entry.
178 //
179 // EMAT items are ordered by physical address, so once we go past |base| we can quit the
180 // loop.
181 for emat_item in efi_memory_attribute_table
182 .into_iter()
183 .skip_while(move |item| item.physical_start < base)
184 .take_while(move |item| item.physical_start < base + size)
185 {
186 if emat_item.physical_start > base {
187 // Create a region for [base ... emat_item->PhysicalStart), because that region is
188 // not covered by the EMAT.
189 let mut generated_item;
190 (generated_item, tail) = Ref::<_, EfiMemoryDescriptor>::new_from_prefix(tail)
191 .ok_or(UnificationError(BufferTooSmall(Some(
192 size_of::<EfiMemoryDescriptor>(),
193 ))))?;
194
195 generated_item.physical_start = base;
196 generated_item.number_of_pages = (emat_item.physical_start - base) / PAGE_SIZE;
197 generated_item.virtual_start = 0;
198 generated_item.attributes = EFI_MEMORY_ATTRIBUTE_EMA_RUNTIME;
199 generated_item.memory_type = emat_item.memory_type;
200
201 // Adjust base and size forward.
202 size -= emat_item.physical_start - base;
203 base = emat_item.physical_start;
204 } else {
205 // emat_item.physical_start == base
206 // Create a region for [base ... emat_item->NumberOfPages * PAGE_SIZE)
207 let mut generated_item;
208 (generated_item, tail) = Ref::<_, EfiMemoryDescriptor>::new_from_prefix(tail)
209 .ok_or(UnificationError(BufferTooSmall(Some(
210 size_of::<EfiMemoryDescriptor>(),
211 ))))?;
212 *generated_item = *emat_item;
213
214 // Adjust base and size forward.
215 base += emat_item.number_of_pages * PAGE_SIZE;
216 size -= emat_item.number_of_pages * PAGE_SIZE;
217 }
218 }
219
220 if size != 0 {
221 let mut generated_item;
222 (generated_item, tail) = Ref::<_, EfiMemoryDescriptor>::new_from_prefix(tail)
223 .ok_or(UnificationError(BufferTooSmall(Some(size_of::<EfiMemoryDescriptor>()))))?;
224
225 generated_item.physical_start = base;
226 generated_item.number_of_pages = size / PAGE_SIZE;
227 generated_item.virtual_start = 0;
228 generated_item.attributes = EFI_MEMORY_ATTRIBUTE_EMA_RUNTIME;
229 generated_item.memory_type = efi_memory_desc.memory_type;
230 }
231 }
232
233 let used_payload = provided_payload_size - tail.len();
234 header.descriptor_size = size_of::<EfiMemoryDescriptor>().try_into().map_err(Error::from)?;
235 header.number_of_entries =
236 (used_payload / size_of::<EfiMemoryDescriptor>()).try_into().unwrap();
237 header.reserved = 0;
238 header.version = 1;
239
240 zbi_items.create_entry(
241 ZbiType::EfiMemoryAttributesTable,
242 0,
243 ZbiFlags::default(),
244 used_payload,
245 )?;
246
247 Ok(())
248 }
249
generate_mem_config_item<'b, B>( efi_memory_map: &EfiMemoryMap<'b>, zbi_items: &mut ZbiContainer<B>, ) -> Result<()> where B: SplitByteSliceMut + PartialEq,250 fn generate_mem_config_item<'b, B>(
251 efi_memory_map: &EfiMemoryMap<'b>,
252 zbi_items: &mut ZbiContainer<B>,
253 ) -> Result<()>
254 where
255 B: SplitByteSliceMut + PartialEq,
256 {
257 let mut tail = zbi_items.get_next_payload()?;
258 let provided_payload_size = tail.len();
259
260 for efi_desc in efi_memory_map.into_iter() {
261 let mut zbi_mem_range: Ref<&mut [u8], zbi_mem_range_t>;
262 (zbi_mem_range, tail) = Ref::new_from_prefix(tail)
263 .ok_or(UnificationError(BufferTooSmall(Some(size_of::<zbi_mem_range_t>()))))?;
264 zbi_mem_range.paddr = efi_desc.physical_start;
265 zbi_mem_range.length = efi_desc.number_of_pages * PAGE_SIZE;
266 zbi_mem_range.type_ = efi_to_zbi_mem_range_type(efi_desc.memory_type);
267 zbi_mem_range.reserved = 0;
268 }
269
270 let used_payload = provided_payload_size - tail.len();
271 zbi_items.create_entry(ZbiType::MemConfig, 0, ZbiFlags::default(), used_payload)?;
272
273 Ok(())
274 }
275